- Timestamp:
- Oct 10, 2019, 1:42:04 PM (5 years ago)
- Location:
- trunk
- Files:
-
- 19 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/Makefile
r637 r641 189 189 build/syscalls/sys_fsync.o \ 190 190 build/syscalls/sys_get_best_core.o \ 191 build/syscalls/sys_get_nb_cores.o 191 build/syscalls/sys_get_nb_cores.o \ 192 build/syscalls/sys_get_thread_info.o 192 193 193 194 VFS_OBJS = build/fs/vfs.o \ -
trunk/kernel/kern/do_syscall.c
r637 r641 110 110 sys_get_best_core, // 53 111 111 sys_get_nb_cores, // 54 112 sys_get_thread_info, // 55 112 113 }; 113 114 … … 177 178 case SYS_GET_BEST_CORE: return "GET_BEST_CORE"; // 53 178 179 case SYS_GET_NB_CORES: return "GET_NB_CORES"; // 54 180 case SYS_GET_THREAD_INFO: return "GET_THREAD_INFO"; // 55 179 181 180 182 default: return "undefined"; -
trunk/kernel/kern/rpc.c
r640 r641 349 349 rpc_server[index]( desc_xp ); 350 350 351 // update responses counter 352 responses = hal_remote_atomic_add( rsp_xp , -1 ); 353 351 354 #if DEBUG_RPC_SERVER_GENERIC 352 355 cycle = (uint32_t)hal_get_cycles(); 353 356 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 354 printk("\n[%s] RPC thread[%x,%x] completes rpc %s / client_cxy %x/ cycle %d\n",355 __FUNCTION__, server_ptr->process->pid, server_ptr->trdid, rpc_str[index], desc_cxy, cycle );357 printk("\n[%s] RPC thread[%x,%x] completes rpc %s / responses %d / cycle %d\n", 358 __FUNCTION__, server_ptr->process->pid, server_ptr->trdid, rpc_str[index], responses, cycle ); 356 359 #endif 357 360 // decrement expected responses counter 358 responses = hal_remote_atomic_add( rsp_xp , -1 );359 360 361 // unblock client thread if last response 361 362 if( responses == 1 ) … … 2388 2389 2389 2390 ////////////////////////////////////////////////////////// 2390 void rpc_vmm_resize_vseg_client( cxy_t 2391 struct process_s * process,2392 struct vseg_s * vseg,2393 intptr_t 2394 intptr_t 2391 void rpc_vmm_resize_vseg_client( cxy_t cxy, 2392 pid_t pid, 2393 intptr_t base, 2394 intptr_t new_base, 2395 intptr_t new_size ) 2395 2396 { 2396 2397 #if DEBUG_RPC_VMM_RESIZE_VSEG … … 2411 2412 2412 2413 // set input arguments in RPC descriptor 2413 rpc.args[0] = (uint64_t) (intptr_t)process;2414 rpc.args[1] = (uint64_t) (intptr_t)vseg;2414 rpc.args[0] = (uint64_t)pid; 2415 rpc.args[1] = (uint64_t)base; 2415 2416 rpc.args[2] = (uint64_t)new_base; 2416 2417 rpc.args[3] = (uint64_t)new_size; … … 2438 2439 #endif 2439 2440 2441 pid_t pid; 2440 2442 process_t * process; 2443 intptr_t base; 2441 2444 vseg_t * vseg; 2442 2445 intptr_t new_base; … … 2448 2451 2449 2452 // get arguments from client RPC descriptor 2450 process = (process_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2451 vseg = (vseg_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 2452 new_base = (intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); 2453 new_size = (intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) ); 2453 pid = (pid_t) hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2454 base = (intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 2455 new_base = (intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); 2456 new_size = (intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) ); 2457 2458 // get local pointer on target process 2459 process = cluster_get_local_process_from_pid( pid ); 2460 2461 // get target vseg from vaddr 2462 vmm_get_vseg( process , base , &vseg ); 2454 2463 2455 2464 // call relevant kernel function … … 2474 2483 ///////////////////////////////////////////////// 2475 2484 void rpc_vmm_remove_vseg_client( cxy_t cxy, 2476 p rocess_t * process,2477 vseg_t * vseg)2485 pid_t pid, 2486 intptr_t base ) 2478 2487 { 2479 2488 #if DEBUG_RPC_VMM_REMOVE_VSEG … … 2494 2503 2495 2504 // set input arguments in RPC descriptor 2496 rpc.args[0] = (uint64_t) (intptr_t)process;2497 rpc.args[1] = (uint64_t) (intptr_t)vseg;2505 rpc.args[0] = (uint64_t)pid; 2506 rpc.args[1] = (uint64_t)base; 2498 2507 2499 2508 // register RPC request in remote RPC fifo … … 2519 2528 #endif 2520 2529 2530 pid_t pid; 2531 intptr_t vaddr; 2521 2532 process_t * process; 2522 2533 vseg_t * vseg; … … 2527 2538 2528 2539 // get arguments from RPC descriptor 2529 process = (process_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2530 vseg = (vseg_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 2540 pid = (pid_t) hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2541 vaddr = (intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 2542 2543 // get local pointer on target process 2544 process = cluster_get_local_process_from_pid( pid ); 2545 2546 // get target vseg from vaddr 2547 vmm_get_vseg( process , vaddr , &vseg ); 2531 2548 2532 2549 // call relevant kernel function -
trunk/kernel/kern/rpc.h
r640 r641 130 130 * the caller. It exit with a Panic message if remote fifo is still full after 131 131 * (CONFIG_RPC_PUT_MAX_ITERATIONS) retries. 132 * - When the RPC <blocking> field is true, this function blocks and deschedule .133 * It returns only when the server acknowledges the RPC by writing in the RPC134 * "response" field, and unblocks the client.132 * - When the RPC <blocking> field is true, this function blocks and deschedule 133 * the client thread. It returns only when the server completes the RPC and 134 * unblocks the client thread. 135 135 * - When the <blocking> field is false, this function returns as soon as the RPC 136 * has been registered in the FIFO, and the server thread must directly signal137 * completion to the client thread.136 * has been registered in the FIFO, and the client thread must block itself when 137 * all RPCS have been registered in all target clusters. 138 138 *********************************************************************************** 139 139 * @ cxy : server cluster identifier … … 520 520 521 521 /*********************************************************************************** 522 * [25] The RPC_VMM_RESIZE_VSEG allows a client thread to request a remote vseg 523 * resize. Both the VSL and the GPT are updated in the remote cluster. 522 * [25] The RPC_VMM_RESIZE_VSEG allows a client thread to request a remote cluster 523 * to resize a vseg identified by the <base> argument in a process descriptor 524 * identified by the <pid> argument, as defined by the <new_base> and <new_size> 525 * arguments. Both the VSL and the GPT are updated in the remote cluster. 524 526 *********************************************************************************** 525 527 * @ cxy : server cluster identifier. 526 * @ p rocess : [in] local pointer on remote process.527 * @ vseg : [in] local pointer on remote vseg.528 * @ new_base : [in] new vseg base address.528 * @ pid : [in] process identifier. 529 * @ base : [in] vseg base. 530 * @ new_base : [in] new vseg base. 529 531 * @ new_size : [in] new vseg size. 530 532 **********************************************************************************/ 531 533 void rpc_vmm_resize_vseg_client( cxy_t cxy, 532 struct process_s * process,533 struct vseg_s * vseg,534 pid_t pid, 535 intptr_t base, 534 536 intptr_t new_base, 535 537 intptr_t new_size ); … … 538 540 539 541 /*********************************************************************************** 540 * [26] The RPC_VMM_REMOVE_VSEG allows a client thread to request a remote vseg 541 * delete. Bothe the VSL and the GPT are updated in the remote cluster. 542 *********************************************************************************** 543 * @ cxy : server cluster identifier. 544 * @ process : [in] local pointer on remote process. 545 * @ vseg : [in] local pointer on remote vseg. 546 **********************************************************************************/ 547 void rpc_vmm_remove_vseg_client( cxy_t cxy, 548 struct process_s * process, 549 struct vseg_s * vseg ); 542 * [26] The RPC_VMM_REMOVE_VSEG allows a client thread to request a remote cluster 543 * to delete a vseg identified by the <vaddr> argument in a process descriptor 544 * identified by the <pid> argument. 545 * Both the VSL and the GPT are updated in the remote cluster. 546 *********************************************************************************** 547 * @ cxy : server cluster identifier. 548 * @ pid : [in] process identifier. 549 * @ base : [in] vseg base. 550 **********************************************************************************/ 551 void rpc_vmm_remove_vseg_client( cxy_t cxy, 552 pid_t pid, 553 intptr_t base ); 550 554 551 555 void rpc_vmm_remove_vseg_server( xptr_t xp ); -
trunk/kernel/kern/scheduler.c
r640 r641 255 255 __FUNCTION__, process->pid, thread->trdid, local_cxy, thread->core->lid, cycle ); 256 256 #endif 257 258 #if CONFIG_INSTRUMENTATION_PGFAULTS259 uint32_t local_nr = thread->info.local_pgfault_nr;260 uint32_t local_cost = (local_nr == 0) ? 0 : (thread->info.local_pgfault_cost / local_nr);261 uint32_t global_nr = thread->info.global_pgfault_nr;262 uint32_t global_cost = (global_nr == 0) ? 0 : (thread->info.global_pgfault_cost / global_nr);263 uint32_t false_nr = thread->info.false_pgfault_nr;264 uint32_t false_cost = (false_nr == 0) ? 0 : (thread->info.false_pgfault_cost / false_nr);265 printk("\n***** page faults for thread[%x,%x]\n"266 " - %d local : %d cycles\n"267 " - %d global : %d cycles\n"268 " - %d false : %d cycles\n",269 process->pid, thread->trdid,270 local_nr, local_cost,271 global_nr, global_cost,272 false_nr, false_cost );273 #endif274 257 // destroy process descriptor if last thread 275 258 if( count == 1 ) -
trunk/kernel/kern/thread.c
r640 r641 183 183 dqdt_increment_threads(); 184 184 185 #if CONFIG_INSTRUMENTATION_PGFAULTS 186 thread->info.false_pgfault_nr = 0; 187 thread->info.false_pgfault_cost = 0; 188 thread->info.false_pgfault_max = 0; 189 thread->info.local_pgfault_nr = 0; 190 thread->info.local_pgfault_cost = 0; 191 thread->info.local_pgfault_max = 0; 192 thread->info.global_pgfault_nr = 0; 193 thread->info.global_pgfault_cost = 0; 194 thread->info.global_pgfault_max = 0; 195 #endif 196 185 197 #if DEBUG_THREAD_INIT 186 198 cycle = (uint32_t)hal_get_cycles(); … … 890 902 core_t * core = thread->core; 891 903 892 893 #if DEBUG_THREAD_DESTROY || CONFIG_INSTRUMENTATION_PGFAULTS 904 #if DEBUG_THREAD_DESTROY 894 905 uint32_t cycle; 895 906 thread_t * this = CURRENT_THREAD; … … 908 919 #if CONFIG_INSTRUMENTATION_PGFAULTS 909 920 process->vmm.false_pgfault_nr += thread->info.false_pgfault_nr; 921 process->vmm.false_pgfault_cost += thread->info.false_pgfault_cost; 910 922 process->vmm.local_pgfault_nr += thread->info.local_pgfault_nr; 923 process->vmm.local_pgfault_cost += thread->info.local_pgfault_cost; 911 924 process->vmm.global_pgfault_nr += thread->info.global_pgfault_nr; 912 process->vmm.false_pgfault_cost += thread->info.false_pgfault_cost;913 process->vmm.local_pgfault_cost += thread->info.local_pgfault_cost;914 925 process->vmm.global_pgfault_cost += thread->info.global_pgfault_cost; 915 926 #endif … … 917 928 #if (CONFIG_INSTRUMENTATION_PGFAULTS & 1) 918 929 uint32_t false_nr = thread->info.false_pgfault_nr; 930 uint32_t false_cost = thread->info.false_pgfault_cost; 931 uint32_t false_max = thread->info.false_pgfault_max; 932 uint32_t false_one = false_nr ? (false_cost / false_nr ) : 0; 933 919 934 uint32_t local_nr = thread->info.local_pgfault_nr; 935 uint32_t local_cost = thread->info.local_pgfault_cost; 936 uint32_t local_max = thread->info.local_pgfault_max; 937 uint32_t local_one = local_nr ? (local_cost / local_nr ) : 0; 938 920 939 uint32_t global_nr = thread->info.global_pgfault_nr; 921 uint32_t false_cost = thread->info.false_pgfault_cost;922 uint32_t local_cost = thread->info.local_pgfault_cost;923 940 uint32_t global_cost = thread->info.global_pgfault_cost; 924 printk("***** thread[%x,%x] page-faults\n" 925 " - false %d ( %d cycles )\n" 926 " - local %d ( %d cycles )\n" 927 " - global %d ( %d cycles )\n", 928 this->process->pid, this->trdid, 929 false_nr , false_cost / false_nr, 930 local_nr , local_cost / local_nr, 931 global_nr, global_cost / global_nr ); 941 uint32_t global_max = thread->info.global_pgfault_max; 942 uint32_t global_one = global_nr ? (global_cost / global_nr) : 0; 943 944 printk("\n***** thread[%x,%x] page-faults\n" 945 " - false : %d events / cost %d cycles / max %d cycles\n" 946 " - local : %d events / cost %d cycles / max %d cycles\n" 947 " - global : %d events / cost %d cycles / max %d cycles\n", 948 thread->process->pid, thread->trdid, 949 false_nr , false_one , false_max, 950 local_nr , local_one , local_max, 951 global_nr, global_one, global_max ); 932 952 #endif 933 953 -
trunk/kernel/kern/thread.h
r635 r641 28 28 #include <hal_kernel_types.h> 29 29 #include <shared_syscalls.h> 30 #include <shared_almos.h> 30 31 #include <hal_special.h> 31 32 #include <hal_kentry.h> … … 95 96 96 97 /*************************************************************************************** 97 * This structure defines thread instrumentation informations.98 **************************************************************************************/99 100 typedef struct thread_info_s101 {102 uint32_t false_pgfault_nr; /*! number of local page fault */103 uint32_t local_pgfault_nr; /*! number of local page fault */104 uint32_t global_pgfault_nr; /*! number of global page fault */105 uint32_t false_pgfault_cost; /*! cumulated cost */106 uint32_t local_pgfault_cost; /*! cumulated cost */107 uint32_t global_pgfault_cost; /*! cumulated cost */108 109 cycle_t last_cycle; /*! last cycle counter value (date) */110 cycle_t usr_cycles; /*! user execution duration (cycles) */111 cycle_t sys_cycles; /*! system execution duration (cycles) */112 }113 thread_info_t;114 115 /***************************************************************************************116 98 * This structure defines a thread descriptor. 117 99 * It is used for both the user threads and the kernel threads. … … 119 101 * - The TRDID 16 LSB bits contain the LTID (Local Thread Index). 120 102 * - The TRDID 16 MSB bits contain the CXY of cluster containing the thread. 121 * The main thread LTID value is always 0.103 * For the main thread the LTID value is always 0, in the owner cluster. 122 104 * The LTID is used to index the th_tbl[] array in the local process descriptor. 123 105 * This TRDID is computed by the process_register_thread() function, when the user 124 106 * thread is registered in the local copy of the process descriptor. 125 107 * 126 * WARNING (1) Don't modify the first 4 fields order, as this order is used by the 127 * hal_kentry assembly code for the TSAR architectures. 128 * 129 * WARNING (2) Most of the thread state is private and accessed only by this thread, 130 * but some fields are shared, and can be modified by other threads. 131 * - the "blocked" bit_vector can be modified by another thread 132 * running in another cluster (using atomic instructions), 133 * to change this thread scheduling status. 134 * - the "flags" bit_vector can be modified by another thread 135 * running in another cluster (using atomic instructions), 136 * to register requests such as ACK or DELETE. 137 * - the "join_xp" field can be modified by the joining thread, 138 * and this rendez-vous is protected by the dedicated "join_lock". 139 * 140 * WARNING (3) When this thread is blocked on a shared resource (queuelock, condvar, 141 * or chdev), it registers in the associated waiting queue, using the 142 * "wait_list" (local list) or "wait_xlist" (trans-cluster list) fields. 108 * Implementation notes: 109 * 110 * (1) Don't modify the first 4 fields order, as this order is used by the 111 * hal_kentry assembly code for the TSAR architectures. 112 * 113 * (2) Most of the thread state is private and accessed only by this thread, 114 * but some fields are shared, and can be modified by other threads. 115 * - the "blocked" bit_vector can be modified by another thread 116 * running in another cluster (using atomic instructions), 117 * to change this thread scheduling status. 118 * - the "flags" bit_vector can be modified by another thread 119 * running in another cluster (using atomic instructions), 120 * to register requests such as ACK or DELETE. 121 * - the "join_xp" field can be modified by the joining thread, 122 * and this rendez-vous is protected by the dedicated "join_lock". 123 * 124 * (3) When this thread is blocked on a shared resource (queuelock, condvar, 125 * or chdev), it registers in the associated waiting queue, using the 126 * "wait_list" (local list) or "wait_xlist" (trans-cluster list) fields. 127 * 128 * (4) The thread_info_t structure is defined in the shared_almos.h file in the 129 * /kernel/syscall/shared_include directory. 143 130 **************************************************************************************/ 144 131 -
trunk/kernel/kernel_config.h
r640 r641 96 96 #define DEBUG_HAL_GPT_CREATE 0 97 97 #define DEBUG_HAL_GPT_DESTROY 0 98 #define DEBUG_HAL_GPT_LOCK_PTE 298 #define DEBUG_HAL_GPT_LOCK_PTE 0 99 99 #define DEBUG_HAL_GPT_SET_COW 0 100 100 #define DEBUG_HAL_GPT_SET_PTE 0 … … 194 194 #define DEBUG_SYS_GET_CORE_ID 0 195 195 #define DEBUG_SYS_GET_NB_CORES 0 196 #define DEBUG_SYS_GET_THREAD_INFO 0 196 197 #define DEBUG_SYS_ISATTY 0 197 198 #define DEBUG_SYS_IS_FG 0 … … 254 255 255 256 #define DEBUG_VMM_CREATE_VSEG 0 256 #define DEBUG_VMM_DELETE_VSEG 0257 257 #define DEBUG_VMM_DESTROY 0 258 258 #define DEBUG_VMM_FORK_COPY 0 … … 462 462 463 463 #define CONFIG_INSTRUMENTATION_SYSCALLS 0 464 #define CONFIG_INSTRUMENTATION_PGFAULTS 0464 #define CONFIG_INSTRUMENTATION_PGFAULTS 1 465 465 #define CONFIG_INSTRUMENTATION_FOOTPRINT 0 466 466 #define CONFIG_INSTRUMENTATION_GPT 1 -
trunk/kernel/libk/user_dir.c
r640 r641 145 145 } 146 146 147 // Build an initialize the dirent array as a list of pages.147 // Build and initialize the dirent array as a list of pages. 148 148 // For each iteration in this while loop: 149 149 // - allocate one physical 4 Kbytes (64 dirent slots) … … 174 174 } 175 175 176 // call the relevant FS specific function to copy up to 64dirents in page176 // call the relevant FS specific function to copy dirents in page 177 177 error = vfs_fs_get_user_dir( inode, 178 178 base, … … 210 210 #endif 211 211 212 // compute required vseg size for a 64 bytes dirent212 // compute required vseg size 213 213 vseg_size = total_dirents << 6; 214 214 … … 254 254 // check vseg size 255 255 assert( (total_pages == hal_remote_l32( XPTR( ref_cxy , &vseg->vpn_size ) ) ), 256 "unconsistent vseg size for dirent array " );256 "unconsistent vseg size for dirent array " ); 257 257 258 258 // build extended pointer on reference process GPT … … 294 294 295 295 // delete the vseg 296 i f( ref_cxy == local_cxy) vmm_remove_vseg( ref_ptr, vseg);297 else rpc_vmm_remove_vseg_client( ref_cxy, ref_ptr, vseg);296 intptr_t base = (intptr_t)hal_remote_lpt( XPTR( ref_cxy , &vseg->min ) ); 297 rpc_vmm_remove_vseg_client( ref_cxy, ref_pid, base ); 298 298 299 299 // release the user_dir descriptor … … 434 434 // to wait all RPC responses, and will be unblocked by the last RPC server thread. 435 435 // It allocates a - shared - RPC descriptor in the stack, because all parallel 436 // server threads use the same input arguments, and the same response field.436 // server threads use the same input arguments, and there is no out argument. 437 437 438 438 // get owner cluster identifier and process lpid … … 454 454 455 455 // initialize a shared RPC descriptor 456 // can be shared, because no out arguments457 456 rpc.rsp = &responses; 458 rpc.blocking = false; 457 rpc.blocking = false; // non blocking behaviour for rpc_send() 459 458 rpc.index = RPC_VMM_REMOVE_VSEG; 460 459 rpc.thread = this; … … 476 475 hal_atomic_add( &responses , 1 ); 477 476 477 #if (DEBUG_USER_DIR & 1) 478 uint32_t cycle = (uint32_t)hal_get_cycles(); 479 if( cycle > DEBUG_USER_DIR ) 480 printk("\n[%s] thread[%x,%x] register RPC request in cluster %x\n", 481 __FUNCTION__, this->process->pid, this->trdid, process_cxy ); 482 #endif 483 478 484 // send RPC to target cluster 479 485 rpc_send( process_cxy , &rpc ); -
trunk/kernel/mm/vmm.c
r640 r641 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 * Mohamed Lamine Karaoui (2015)6 5 * Alain Greiner (2016,2017,2018,2019) 7 6 * … … 29 28 #include <hal_gpt.h> 30 29 #include <hal_vmm.h> 30 #include <hal_irqmask.h> 31 31 #include <hal_macros.h> 32 32 #include <printk.h> … … 217 217 218 218 //////////////////////////////////////////////////////////////////////////////////////////// 219 // This static function i s called by the vmm_remove_vseg() function, and implements220 // the VMM MMAP specific desallocator.219 // This static function implements the VMM MMAP specific desallocator. 220 // It is called by the vmm_remove_vseg() function. 221 221 //////////////////////////////////////////////////////////////////////////////////////////// 222 222 // @ vmm : [in] pointer on VMM. … … 495 495 intptr_t base ) 496 496 { 497 pid_t pid;498 497 cxy_t owner_cxy; 499 498 lpid_t owner_lpid; 500 501 xlist_entry_t * process_root_ptr; 499 reg_t save_sr; 500 501 xptr_t process_lock_xp; 502 502 xptr_t process_root_xp; 503 503 xptr_t process_iter_xp; … … 511 511 xptr_t vsl_iter_xp; 512 512 513 rpc_desc_t rpc; // shared rpc descriptor for parallel RPCs 514 uint32_t responses; // RPC responses counter 515 516 thread_t * this = CURRENT_THREAD; 517 pid_t pid = process->pid; 518 cluster_t * cluster = LOCAL_CLUSTER; 519 513 520 #if DEBUG_VMM_GLOBAL_DELETE_VSEG 514 521 uint32_t cycle = (uint32_t)hal_get_cycles(); 515 thread_t * this = CURRENT_THREAD;516 522 #endif 517 523 518 524 #if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1) 519 525 if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle ) 520 printk("\n[%s] thread[%x,%x] :process %x / base %x / cycle %d\n",526 printk("\n[%s] thread[%x,%x] enters / process %x / base %x / cycle %d\n", 521 527 __FUNCTION__, this->process->pid, this->trdid, process->pid, base, cycle ); 522 528 #endif 523 529 530 // initialize a shared RPC descriptor 531 rpc.rsp = &responses; 532 rpc.blocking = false; // non blocking behaviour for rpc_send() 533 rpc.index = RPC_VMM_REMOVE_VSEG; 534 rpc.thread = this; 535 rpc.lid = this->core->lid; 536 rpc.args[0] = this->process->pid; 537 rpc.args[1] = base; 538 524 539 // get owner process cluster and local index 525 pid = process->pid;526 540 owner_cxy = CXY_FROM_PID( pid ); 527 541 owner_lpid = LPID_FROM_PID( pid ); 528 542 529 // get extended pointer on root of process copies xlist in owner cluster 530 process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; 531 process_root_xp = XPTR( owner_cxy , process_root_ptr ); 543 // get extended pointer on root and lock of process copies xlist in owner cluster 544 process_root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[owner_lpid] ); 545 process_lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[owner_lpid] ); 546 547 // mask IRQs 548 hal_disable_irq( &save_sr ); 549 550 // client thread blocks itself 551 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC ); 552 553 // take the lock protecting process copies 554 remote_queuelock_acquire( process_lock_xp ); 555 556 // initialize responses counter 557 responses = 0; 532 558 533 559 // loop on process copies … … 559 585 if( vseg_base == base ) // found searched vseg 560 586 { 561 if( remote_process_cxy == local_cxy ) 562 { 563 vmm_remove_vseg( process, 564 vseg_ptr ); 565 } 566 else 567 { 568 rpc_vmm_remove_vseg_client( remote_process_cxy, 569 remote_process_ptr, 570 vseg_ptr ); 571 } 587 // atomically increment responses counter 588 hal_atomic_add( &responses , 1 ); 572 589 573 590 #if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1) 574 591 if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle ) 575 printk("\n[%s] thread[%x,%x] deleted vseg %x for process %x in cluster %x\n", 576 __FUNCTION__, this->process->pid, this->trdid, base, process->pid, remote_process_cxy ); 577 #endif 578 592 printk("\n[%s] thread[%x,%x] register RPC request in cluster %x\n", 593 __FUNCTION__, this->process->pid, this->trdid, remote_process_cxy ); 594 #endif 595 // send RPC to remote cluster 596 rpc_send( remote_process_cxy , &rpc ); 597 598 // exit loop on vsegs 599 break; 579 600 } 580 601 } // end of loop on vsegs 581 602 603 // release lock on remote VSL 604 remote_queuelock_release( vsl_lock_xp ); 605 606 } // end of loop on process copies 607 608 // release the lock protecting process copies 609 remote_queuelock_release( process_lock_xp ); 610 582 611 #if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1) 583 612 if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle ) 584 hal_vmm_display( remote_process_xp , false ); 585 #endif 586 587 // release lock on remote VSL 588 remote_queuelock_release( vsl_lock_xp ); 589 590 } // end of loop on process copies 613 printk("\n[%s] thread[%x,%x] deschedule / process %x / base %x\n", 614 __FUNCTION__, this->process->pid, this->trdid, process->pid, base ); 615 #endif 616 617 // client thread deschedule 618 sched_yield("blocked on rpc_vmm_delete_vseg"); 619 620 // restore IRQs 621 hal_restore_irq( save_sr ); 591 622 592 623 #if DEBUG_VMM_GLOBAL_DELETE_VSEG 593 624 cycle = (uint32_t)hal_get_cycles(); 594 625 if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle ) 595 printk("\n[%s] thread[%x,%x] exit forprocess %x / base %x / cycle %d\n",596 __FUNCTION__, this->process->pid, this->trdid, process->pid 626 printk("\n[%s] thread[%x,%x] exit / process %x / base %x / cycle %d\n", 627 __FUNCTION__, this->process->pid, this->trdid, process->pid, base, cycle ); 597 628 #endif 598 629 … … 605 636 intptr_t new_size ) 606 637 { 607 pid_t pid;608 638 cxy_t owner_cxy; 609 639 lpid_t owner_lpid; 610 611 xlist_entry_t * process_root_ptr; 640 reg_t save_sr; 641 642 xptr_t process_lock_xp; 612 643 xptr_t process_root_xp; 613 644 xptr_t process_iter_xp; … … 621 652 xptr_t vsl_iter_xp; 622 653 654 rpc_desc_t rpc; // shared rpc descriptor for parallel RPCs 655 uint32_t responses; // RPC responses counter 656 657 thread_t * this = CURRENT_THREAD; 658 pid_t pid = process->pid; 659 cluster_t * cluster = LOCAL_CLUSTER; 660 623 661 #if DEBUG_VMM_GLOBAL_RESIZE_VSEG 624 662 uint32_t cycle = (uint32_t)hal_get_cycles(); 625 thread_t * this = CURRENT_THREAD;626 663 #endif 627 664 … … 632 669 #endif 633 670 634 // get owner process cluster and local index 635 pid = process->pid; 671 // initialize a shared RPC descriptor 672 rpc.rsp = &responses; 673 rpc.blocking = false; // non blocking behaviour for rpc_send() 674 rpc.index = RPC_VMM_REMOVE_VSEG; 675 rpc.thread = this; 676 rpc.lid = this->core->lid; 677 rpc.args[0] = this->process->pid; 678 rpc.args[1] = base; 679 rpc.args[2] = new_base; 680 rpc.args[3] = new_size; 681 682 // get owner process cluster and local index 636 683 owner_cxy = CXY_FROM_PID( pid ); 637 684 owner_lpid = LPID_FROM_PID( pid ); 638 685 639 // get extended pointer on root of process copies xlist in owner cluster 640 process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; 641 process_root_xp = XPTR( owner_cxy , process_root_ptr ); 686 // get extended pointer on root and lock of process copies xlist in owner cluster 687 process_root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[owner_lpid] ); 688 process_lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[owner_lpid] ); 689 690 // mask IRQs 691 hal_disable_irq( &save_sr ); 692 693 // client thread blocks itself 694 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC ); 695 696 // take the lock protecting process copies 697 remote_queuelock_acquire( process_lock_xp ); 698 699 // initialize responses counter 700 responses = 0; 642 701 643 702 // loop on process copies … … 669 728 if( vseg_base == base ) // found searched vseg 670 729 { 671 if( remote_process_cxy == local_cxy ) 672 { 673 vmm_resize_vseg( remote_process_ptr, 674 vseg_ptr, 675 new_base, 676 new_size ); 677 } 678 else 679 { 680 rpc_vmm_resize_vseg_client( remote_process_cxy, 681 remote_process_ptr, 682 vseg_ptr, 683 new_base, 684 new_size ); 685 } 686 730 // atomically increment responses counter 731 hal_atomic_add( &responses , 1 ); 732 687 733 #if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1) 688 734 if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle ) 689 printk("\n[%s] thread[%x,%x] resized vseg %x for process %x in cluster %x\n", 690 __FUNCTION__, this->process->pid, this->trdid, base, process->pid, remote_process_cxy ); 691 #endif 692 735 printk("\n[%s] thread[%x,%x] register RPC request in cluster %x\n", 736 __FUNCTION__, this->process->pid, this->trdid, remote_process_cxy ); 737 #endif 738 // send RPC to remote cluster 739 rpc_send( remote_process_cxy , & rpc ); 740 741 // exit loop on vsegs 742 break; 693 743 } 744 694 745 } // end of loop on vsegs 695 746 … … 701 752 // release lock on remote VSL 702 753 remote_queuelock_release( vsl_lock_xp ); 754 703 755 } // end of loop on process copies 756 757 // release the lock protecting process copies 758 remote_queuelock_release( process_lock_xp ); 759 760 #if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1) 761 if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle ) 762 printk("\n[%s] thread[%x,%x] deschedule / process %x / base %x\n", 763 __FUNCTION__, this->process->pid, this->trdid, process->pid, base ); 764 #endif 765 766 // client thread deschedule 767 sched_yield("blocked on rpc_vmm_delete_vseg"); 768 769 // restore IRQs 770 hal_restore_irq( save_sr ); 704 771 705 772 #if DEBUG_VMM_GLOBAL_RESIZE_VSEG … … 1552 1619 #if (DEBUG_VMM_REMOVE_VSEG & 1 ) 1553 1620 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1554 printk("\n[%s] thread[%x,%x] enter / process %x /%s / base %x / cycle %d\n",1621 printk("\n[%s] thread[%x,%x] enters / process %x / type %s / base %x / cycle %d\n", 1555 1622 __FUNCTION__, this->process->pid, this->trdid, 1556 1623 process->pid, vseg_type_str(vseg->type), vseg->min, cycle ); … … 1568 1635 #if( DEBUG_VMM_REMOVE_VSEG & 1 ) 1569 1636 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1570 printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s",1637 printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / type %s\n", 1571 1638 __FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) ); 1572 1639 #endif … … 1607 1674 cycle = (uint32_t)hal_get_cycles(); 1608 1675 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1609 printk(" [%s] thread[%x,%x] exit / process %x /%s / base %x / cycle %d\n",1676 printk("\n[%s] thread[%x,%x] exit / process %x / type %s / base %x / cycle %d\n", 1610 1677 __FUNCTION__, this->process->pid, this->trdid, 1611 1678 process->pid, vseg_type_str(vseg->type), vseg->min, cycle ); … … 1684 1751 { 1685 1752 1686 #if( DEBUG_VMM_RE MOVE_VSEG & 1 )1753 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1687 1754 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1688 1755 printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s", … … 2189 2256 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 2190 2257 uint32_t end_cycle = (uint32_t)hal_get_cycles(); 2258 uint32_t cost = end_cycle - start_cycle; 2191 2259 #endif 2192 2260 … … 2199 2267 #if CONFIG_INSTRUMENTATION_PGFAULTS 2200 2268 this->info.local_pgfault_nr++; 2201 this->info.local_pgfault_cost += (end_cycle - start_cycle); 2269 this->info.local_pgfault_cost += cost; 2270 if( cost > this->info.local_pgfault_max ) this->info.local_pgfault_max = cost; 2202 2271 #endif 2203 2272 return EXCP_NON_FATAL; … … 2267 2336 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 2268 2337 uint32_t end_cycle = (uint32_t)hal_get_cycles(); 2338 uint32_t cost = end_cycle - start_cycle; 2269 2339 #endif 2270 2340 … … 2277 2347 #if CONFIG_INSTRUMENTATION_PGFAULTS 2278 2348 this->info.false_pgfault_nr++; 2279 this->info.false_pgfault_cost += (end_cycle - start_cycle); 2349 this->info.false_pgfault_cost += cost; 2350 if( cost > this->info.false_pgfault_max ) this->info.false_pgfault_max = cost; 2280 2351 #endif 2281 2352 return EXCP_NON_FATAL; … … 2332 2403 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 2333 2404 uint32_t end_cycle = (uint32_t)hal_get_cycles(); 2405 uint32_t cost = end_cycle - start_cycle; 2334 2406 #endif 2335 2407 … … 2342 2414 #if CONFIG_INSTRUMENTATION_PGFAULTS 2343 2415 this->info.global_pgfault_nr++; 2344 this->info.global_pgfault_cost += (end_cycle - start_cycle); 2416 this->info.global_pgfault_cost += cost; 2417 if( cost > this->info.global_pgfault_max ) this->info.global_pgfault_max = cost; 2345 2418 #endif 2346 2419 return EXCP_NON_FATAL; … … 2355 2428 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 2356 2429 uint32_t end_cycle = (uint32_t)hal_get_cycles(); 2430 uint32_t cost = end_cycle - start_cycle; 2357 2431 #endif 2358 2432 … … 2365 2439 #if CONFIG_INSTRUMENTATION_PGFAULTS 2366 2440 this->info.false_pgfault_nr++; 2367 this->info.false_pgfault_cost += (end_cycle - start_cycle); 2441 this->info.false_pgfault_cost += cost; 2442 if( cost > this->info.false_pgfault_max ) this->info.false_pgfault_max = cost; 2368 2443 #endif 2369 2444 return EXCP_NON_FATAL; -
trunk/kernel/syscalls/shared_include/shared_almos.h
r626 r641 58 58 display_type_t; 59 59 60 /******************************************************************************************* 61 * This structure defines the - user accessible - information stored in a thread. 62 ******************************************************************************************/ 63 64 typedef struct thread_info_s 65 { 66 unsigned long false_pgfault_nr; /*! number of local page fault */ 67 unsigned long false_pgfault_cost; /*! cumulated cost */ 68 unsigned long false_pgfault_max; /*! max cost of a local page fault */ 69 70 unsigned long local_pgfault_nr; /*! number of local page fault */ 71 unsigned long local_pgfault_cost; /*! cumulated cost */ 72 unsigned long local_pgfault_max; /*! max cost of a false page fault */ 73 74 unsigned long global_pgfault_nr; /*! number of global page fault */ 75 unsigned long global_pgfault_cost; /*! cumulated cost */ 76 unsigned long global_pgfault_max; /*! max cost of a global page fault */ 77 78 unsigned long long last_cycle; /*! last cycle counter value (date) */ 79 unsigned long long usr_cycles; /*! user execution duration (cycles) */ 80 unsigned long long sys_cycles; /*! system execution duration (cycles) */ 81 } 82 thread_info_t; 60 83 61 84 #endif /* _SHARED_ALMOS_H_ */ -
trunk/kernel/syscalls/shared_include/syscalls_numbers.h
r637 r641 31 31 typedef enum 32 32 { 33 SYS_THREAD_EXIT = 0,34 SYS_THREAD_YIELD = 1,35 SYS_THREAD_CREATE = 2,36 SYS_THREAD_JOIN = 3,37 SYS_THREAD_DETACH = 4,38 SYS_THREAD_CANCEL = 5,39 SYS_SEM = 6,40 SYS_CONDVAR = 7,41 SYS_BARRIER = 8,42 SYS_MUTEX = 9,33 SYS_THREAD_EXIT = 0, 34 SYS_THREAD_YIELD = 1, 35 SYS_THREAD_CREATE = 2, 36 SYS_THREAD_JOIN = 3, 37 SYS_THREAD_DETACH = 4, 38 SYS_THREAD_CANCEL = 5, 39 SYS_SEM = 6, 40 SYS_CONDVAR = 7, 41 SYS_BARRIER = 8, 42 SYS_MUTEX = 9, 43 43 44 SYS_RENAME = 10,45 SYS_MUNMAP = 11,46 SYS_OPEN = 12,47 SYS_MMAP = 13,48 SYS_READ = 14,49 SYS_WRITE = 15,50 SYS_LSEEK = 16,51 SYS_CLOSE = 17,52 SYS_UNLINK = 18,53 SYS_PIPE = 19,44 SYS_RENAME = 10, 45 SYS_MUNMAP = 11, 46 SYS_OPEN = 12, 47 SYS_MMAP = 13, 48 SYS_READ = 14, 49 SYS_WRITE = 15, 50 SYS_LSEEK = 16, 51 SYS_CLOSE = 17, 52 SYS_UNLINK = 18, 53 SYS_PIPE = 19, 54 54 55 SYS_CHDIR = 20,56 SYS_MKDIR = 21,57 SYS_MKFIFO = 22,58 SYS_OPENDIR = 23,59 SYS_READDIR = 24,60 SYS_CLOSEDIR = 25,61 SYS_GETCWD = 26,62 SYS_ISATTY = 27,63 SYS_ALARM = 28,64 SYS_RMDIR = 29,55 SYS_CHDIR = 20, 56 SYS_MKDIR = 21, 57 SYS_MKFIFO = 22, 58 SYS_OPENDIR = 23, 59 SYS_READDIR = 24, 60 SYS_CLOSEDIR = 25, 61 SYS_GETCWD = 26, 62 SYS_ISATTY = 27, 63 SYS_ALARM = 28, 64 SYS_RMDIR = 29, 65 65 66 SYS_UTLS = 30,67 SYS_CHMOD = 31,68 SYS_SIGNAL = 32,69 SYS_TIMEOFDAY = 33,70 SYS_KILL = 34,71 SYS_GETPID = 35,72 SYS_FORK = 36,73 SYS_EXEC = 37,74 SYS_STAT = 38,75 SYS_WAIT = 39,66 SYS_UTLS = 30, 67 SYS_CHMOD = 31, 68 SYS_SIGNAL = 32, 69 SYS_TIMEOFDAY = 33, 70 SYS_KILL = 34, 71 SYS_GETPID = 35, 72 SYS_FORK = 36, 73 SYS_EXEC = 37, 74 SYS_STAT = 38, 75 SYS_WAIT = 39, 76 76 77 SYS_GET_CONFIG = 40,78 SYS_GET_CORE_ID = 41,79 SYS_GET_CYCLE = 42,80 SYS_DISPLAY = 43,81 SYS_PLACE_FORK = 44,82 SYS_THREAD_SLEEP = 45,83 SYS_THREAD_WAKEUP = 46,84 SYS_TRACE = 47,85 SYS_FG = 48,86 SYS_IS_FG = 49,77 SYS_GET_CONFIG = 40, 78 SYS_GET_CORE_ID = 41, 79 SYS_GET_CYCLE = 42, 80 SYS_DISPLAY = 43, 81 SYS_PLACE_FORK = 44, 82 SYS_THREAD_SLEEP = 45, 83 SYS_THREAD_WAKEUP = 46, 84 SYS_TRACE = 47, 85 SYS_FG = 48, 86 SYS_IS_FG = 49, 87 87 88 SYS_EXIT = 50, 89 SYS_SYNC = 51, 90 SYS_FSYNC = 52, 91 SYS_GET_BEST_CORE = 53, 92 SYS_GET_NB_CORES = 54, 88 SYS_EXIT = 50, 89 SYS_SYNC = 51, 90 SYS_FSYNC = 52, 91 SYS_GET_BEST_CORE = 53, 92 SYS_GET_NB_CORES = 54, 93 SYS_GET_THREAD_INFO = 55, 93 94 94 SYSCALLS_NR = 55,95 SYSCALLS_NR = 56, 95 96 96 97 } syscalls_t; -
trunk/kernel/syscalls/sys_munmap.c
r640 r641 54 54 #if DEBUG_SYS_MUNMAP 55 55 if( DEBUG_SYS_MUNMAP < tm_start ) 56 printk("\n[ DBG] %s : thread %x enter / process %x/ cycle %d\n",57 __FUNCTION__ , this, process->pid, (uint32_t)tm_start );56 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 57 __FUNCTION__, process->pid, this->trdid, (uint32_t)tm_start ); 58 58 #endif 59 59 … … 95 95 return -1; 96 96 } 97 else if( (vseg_min == addr_min) && (vseg_m in == vseg_max) )97 else if( (vseg_min == addr_min) && (vseg_max == addr_max) ) 98 98 { 99 99 100 100 #if( DEBUG_SYS_MUNMAP & 1 ) 101 if( DEBUG_SYS_MUNMAP < cycle)102 printk("\n[%s] unmapped region[%x->%x[ / vseg[%x->%x[ => vseg deleted\n",103 __FUNCTION__, addr_min, addr_max, vseg_min, vseg_max );101 if( DEBUG_SYS_MUNMAP < tm_start ) 102 printk("\n[%s] thread[%x,%x] unmapped region[%x->%x[ / vseg[%x->%x[ => delete vseg\n", 103 __FUNCTION__, process->pid, this->trdid, addr_min, addr_max, vseg_min, vseg_max ); 104 104 #endif 105 105 // delete existing vseg … … 107 107 vseg_min ); 108 108 } 109 else if( (vseg_min == addr_min) || (vseg_m in == vseg_max) )109 else if( (vseg_min == addr_min) || (vseg_max == addr_max) ) 110 110 { 111 111 112 112 #if( DEBUG_SYS_MUNMAP & 1 ) 113 if( DEBUG_SYS_MUNMAP < cycle)114 printk("\n[%s] unmapped region[%x->%x[ / vseg[%x->%x[ => vseg resized\n",115 __FUNCTION__, addr_min, addr_max, vseg_min, vseg_max );113 if( DEBUG_SYS_MUNMAP < tm_start ) 114 printk("\n[%s] thread[%x,%x] unmapped region[%x->%x[ / vseg[%x->%x[ => resize vseg\n", 115 __FUNCTION__, process->pid, this->trdid, addr_min, addr_max, vseg_min, vseg_max ); 116 116 #endif 117 117 // resize existing vseg … … 121 121 addr_max - addr_min ); 122 122 } 123 else // vseg_min < addr_min) && (addr_max < vseg_max)123 else // addr_min > vseg_min) && (addr_max < vseg_max) 124 124 { 125 125 126 126 #if( DEBUG_SYS_MUNMAP & 1 ) 127 if( DEBUG_SYS_MUNMAP < cycle)128 printk("\n[%s] unmapped region[%x->%x[ / vseg[%x->%x[ => vseg resized & new vseg created\n",129 __FUNCTION__, addr_min, addr_max, vseg_min, vseg_max );127 if( DEBUG_SYS_MUNMAP < tm_start ) 128 printk("\n[%s] thread[%x,%x] unmapped region[%x->%x[ / vseg[%x->%x[ => create new vseg\n", 129 __FUNCTION__, process->pid, this->trdid, addr_min, addr_max, vseg_min, vseg_max ); 130 130 #endif 131 131 // resize existing vseg … … 160 160 #if DEBUG_SYS_MUNMAP 161 161 if( DEBUG_SYS_MUNMAP < tm_start ) 162 printk("\n[ DBG] %s : thread %x exit / process %x/ cycle %d\n",163 __FUNCTION__ , this, process->pid, (uint32_t)tm_end );162 printk("\n[%s] thread [%x,%x] exit / cycle %d\n", 163 __FUNCTION__ , process->pid, this->trdid, (uint32_t)tm_end ); 164 164 #endif 165 165 -
trunk/kernel/syscalls/syscalls.h
r640 r641 733 733 uint32_t * ncores ); 734 734 735 /****************************************************************************************** 736 * [55] This function implements the non-standard "get_thread_info" syscall. 737 * It copies in the user structure defined by the <info> argument the values registered 738 * in the calling thread "thread_info_t" kernel structure. 739 ****************************************************************************************** 740 * @ info : [out] pointer on thread_info_t structure in user space. 741 * @ return 0 if success / return -1 if illegal argument. 742 *****************************************************************************************/ 743 int sys_get_thread_info( thread_info_t * info ); 744 735 745 #endif // _SYSCALLS_H_ -
trunk/libs/libalmosmkh/almosmkh.c
r640 r641 279 279 /////////////// non standard debug functions /////////////////////////////////// 280 280 ////////////////////////////////////////////////////////////////////////////////////// 281 282 /////////////////////////////////////////// 283 int get_thread_info( thread_info_t * info ) 284 { 285 return hal_user_syscall( SYS_GET_THREAD_INFO, 286 (reg_t)info, 0, 0, 0 ); 287 } 281 288 282 289 //////////////////////////////////// -
trunk/libs/libalmosmkh/almosmkh.h
r640 r641 148 148 /***************** Non standard (ALMOS-MKH specific) debug functions ******************/ 149 149 150 /*************************************************************************************** 151 * This syscall copies in the user structure defined by the <info> argument the values 152 * registered in the calling thread "thread_info_t" kernel structure. 153 ****************************************************************************************** 154 * @ info : [out] pointer on thread_info_t structure in user space. 155 * @ return 0 if success / return -1 if illegal argument. 156 *****************************************************************************************/ 157 int get_thread_info( thread_info_t * info ); 150 158 151 159 /*************************************************************************************** -
trunk/params-hard.mk
r640 r641 2 2 3 3 ARCH = /Users/alain/soc/tsar-trunk-svn-2013/platforms/tsar_generic_iob 4 X_SIZE = 14 X_SIZE = 4 5 5 Y_SIZE = 2 6 NB_PROCS = 16 NB_PROCS = 4 7 7 NB_TTYS = 2 8 8 IOC_TYPE = IOC_BDV -
trunk/user/fft/fft.c
r640 r641 92 92 // parameters 93 93 94 #define DEFAULT_M 1 4// 16384 data points94 #define DEFAULT_M 16 // 16384 data points 95 95 #define USE_DQT_BARRIER 1 // use DDT barrier if non zero 96 96 #define MODE COSIN // DATA array initialisation mode … … 135 135 136 136 // instrumentation counters 137 unsigned int pgfault_nr[THREADS_MAX]; // total number of page faults (per thread) 138 unsigned int pgfault_cost[THREADS_MAX]; // total page faults cost (per thread) 139 unsigned int pgfault_max[THREADS_MAX]; // max page faults cost (per thread) 137 140 unsigned int parallel_time[THREADS_MAX]; // total computation time (per thread) 138 141 unsigned int sync_time[THREADS_MAX]; // cumulated waiting time in barriers (per thread) … … 458 461 } 459 462 460 // get instrumentation results for each thread 463 // initializes global (all threads) instrumentation values 464 unsigned int time_para = parallel_time[0]; 465 unsigned int time_sync = sync_time[0]; 466 unsigned int pgfaults_nr = 0; 467 unsigned int pgfaults_cost = 0; 468 unsigned int pgfaults_max = pgfault_max[0]; 469 470 // loop on threads to compute global instrumentation results 461 471 for (tid = 0 ; tid < nthreads ; tid++) 462 472 { 463 snprintf( string , 256 , "- tid %d : Sequencial %d / Parallel %d / Barrier %d\n", 464 tid, init_time, parallel_time[tid], sync_time[tid] ); 473 snprintf( string , 256 , 474 "- tid %d : Seq %d / Para %d / Sync %d / Pgfaults %d ( cost %d / max %d )\n", 475 tid, init_time, parallel_time[tid], sync_time[tid], 476 pgfault_nr[tid], (pgfault_cost[tid] / pgfault_nr[tid]) , pgfault_max[tid] ); 465 477 466 478 // save to instrumentation file … … 468 480 if( ret < 0 ) 469 481 { 470 printf("\n[fft error] cannot write thread %dto file <%s>\n", tid, path );482 printf("\n[fft error] cannot save thread %d results to file <%s>\n", tid, path ); 471 483 printf("%s", string ); 472 484 exit(0); 473 485 } 474 } 475 476 // compute min/max values 477 unsigned int min_para = parallel_time[0]; 478 unsigned int max_para = parallel_time[0]; 479 unsigned int min_sync = sync_time[0]; 480 unsigned int max_sync = sync_time[0]; 481 482 for (tid = 0 ; tid < nthreads ; tid++) 483 { 484 if (parallel_time[tid] > max_para) max_para = parallel_time[tid]; 485 if (parallel_time[tid] < min_para) min_para = parallel_time[tid]; 486 if (sync_time[tid] > max_sync) max_sync = sync_time[tid]; 487 if (sync_time[tid] < min_sync) min_sync = sync_time[tid]; 488 } 489 490 // display MIN/MAX values on terminal and save to file 491 snprintf( string , 256 , "\n Sequencial Parallel Barrier\n" 492 "MIN : %d\t | %d\t | %d\t (cycles)\n" 493 "MAX : %d\t | %d\t | %d\t (cycles)\n", 494 (int)init_time, (int)min_para, (int)min_sync, 495 (int)init_time, (int)max_para, (int)max_sync ); 486 487 // compute global values 488 if (parallel_time[tid] > time_para) time_para = parallel_time[tid]; 489 if (sync_time[tid] > time_sync) time_sync = sync_time[tid]; 490 pgfaults_nr += pgfault_nr[tid]; 491 pgfaults_cost += pgfault_cost[tid]; 492 if (pgfault_max[tid] > pgfaults_max) pgfaults_max = pgfault_max[tid]; 493 } 494 495 // display global values on terminal and save to file 496 snprintf( string , 256 , 497 "\nSeq %d / Para %d / Sync %d / Pgfaults %d ( cost %d / max %d )\n", 498 init_time, time_para, time_sync, pgfaults_nr, (pgfaults_cost / pgfaults_nr), pgfaults_max ); 499 496 500 printf("%s", string ); 501 502 // save global values to file 497 503 ret = fprintf( f , "%s", string ); 504 498 505 if( ret < 0 ) 499 506 { 500 printf("\n[fft error] cannot write MIN/MAX to file <%s>\n", path ); 507 printf("\n[fft error] cannot save global results to file <%s>\n", path ); 508 exit(0); 509 } 510 511 // close instrumentation file 512 ret = fclose( f ); 513 514 if( ret < 0 ) 515 { 516 printf("\n[fft error] cannot close file <%s>\n", path ); 501 517 exit(0); 502 518 } … … 504 520 #if DEBUG_MAIN 505 521 get_cycle( &debug_cycle ); 506 printf("\n[fft] main close file<%s> at cycle %d\n",522 printf("\n[fft] main exit <%s> at cycle %d\n", 507 523 path, (unsigned int)debug_cycle ); 508 524 #endif … … 645 661 get_cycle( ¶llel_stop ); 646 662 647 // register parallel time 663 // register parallel time in instrumentation counters 648 664 parallel_time[tid] = (unsigned int)(parallel_stop - parallel_start); 649 665 666 // get work thread info for page faults 667 thread_info_t info; 668 get_thread_info( &info ); 669 670 // register page faults in instrumentation counters 671 pgfault_nr[tid] = info.false_pgfault_nr + 672 info.local_pgfault_nr + 673 info.global_pgfault_nr; 674 pgfault_cost[tid] = info.false_pgfault_cost + 675 info.local_pgfault_cost + 676 info.global_pgfault_cost; 677 pgfault_max[tid] = info.false_pgfault_max + 678 info.local_pgfault_max + 679 info.global_pgfault_max; 650 680 #if DEBUG_WORK 651 681 printf("\n[fft] %s : thread %d completes fft / p_start %d / p_stop %d\n", -
trunk/user/ksh/ksh.c
r640 r641 123 123 124 124 #if DEBUG_CMD_CAT 125 snprintf( string , 128 , "[ksh] enter %s" , __FUNCTION__);125 snprintf( string , 128 , "[ksh] %s enters" , __FUNCTION__); 126 126 display_string( string ); 127 127 #endif … … 138 138 139 139 #if DEBUG_CMD_CAT 140 snprintf( string , 128 , "[ksh] in%s : after strcpy" , __FUNCTION__ );140 snprintf( string , 128 , "[ksh] %s : after strcpy" , __FUNCTION__ ); 141 141 display_string( string ); 142 142 #endif … … 180 180 181 181 #if DEBUG_CMD_CAT 182 snprintf( string , 128 , "[ksh] %s : size = %d", __FUNCTION__, size ); 182 snprintf( string , 128 , "[ksh] %s : size = %d", 183 __FUNCTION__, size ); 183 184 display_string( string ); 184 185 #endif … … 206 207 207 208 #if DEBUG_CMD_CAT 208 snprintf( string , 128 , "[ksh] %s : maped file %d to buffer %x", __FUNCTION__, fd , buf ); 209 snprintf( string , 128 , "[ksh] %s : mapped file %d to buffer %x", 210 __FUNCTION__, fd , buf ); 209 211 display_string( string ); 210 212 #endif … … 213 215 write( 1 , buf , size ); 214 216 215 // unmap t e file217 // unmap the file 216 218 if( munmap( buf , size ) ) 217 219 { … … 220 222 221 223 #if DEBUG_CMD_CAT 222 snprintf( string , 128 , "[ksh] %s : unmaped file %d from buffer %x", __FUNCTION__, fd , buf ); 224 snprintf( string , 128 , "[ksh] %s : unmapped file %d from buffer %x", 225 __FUNCTION__, fd , buf ); 223 226 display_string( string ); 224 227 #endif … … 852 855 853 856 #if DEBUG_CMD_LS 854 snprintf( string , 128 , "[ksh] %s : directory <%s> open / DIR %x \n",857 snprintf( string , 128 , "[ksh] %s : directory <%s> open / DIR %x", 855 858 __FUNCTION__, pathname , dir ); 856 859 display_string( string ); … … 875 878 876 879 #if DEBUG_CMD_LS 877 snprintf( string , 128 , "[ksh] %s : directory <%s> closed \n",880 snprintf( string , 128 , "[ksh] %s : directory <%s> closed", 878 881 __FUNCTION__, pathname ); 879 882 display_string( string ); … … 1020 1023 sem_post( &semaphore ); 1021 1024 1022 } // end_cmd_rm() 1025 } // end cmd_rm() 1026 1027 /////////////////////////////////////////////// 1028 static void cmd_stat( int argc , char **argv ) 1029 { 1030 struct stat st; 1031 unsigned int size; 1032 1033 if (argc != 2) 1034 { 1035 printf(" usage: %s pathname\n", argv[0]); 1036 } 1037 else 1038 { 1039 strcpy( pathname , argv[1] ); 1040 1041 if ( stat( pathname , &st ) ) 1042 { 1043 printf(" error: cannot stat <%s>\n", argv[2] ); 1044 } 1045 else 1046 { 1047 // get file size 1048 size = st.st_size; 1049 1050 // print file stat info 1051 printf(" <%s> : %d bytes\n", pathname, size ); 1052 } 1053 } 1054 1055 // release semaphore to get next command 1056 sem_post( &semaphore ); 1057 1058 } // end cmd_stat() 1023 1059 1024 1060 /////////////////////////////////////////////// … … 1103 1139 { "rm", "remove a file from file system", cmd_rm }, 1104 1140 { "rmdir", "remove a directory from file system", cmd_rmdir }, 1141 { "stat", "print infos on a given file", cmd_stat }, 1105 1142 { "trace", "activate trace for a given core", cmd_trace }, 1106 1143 { "untrace", "desactivate trace for a given core", cmd_untrace }, … … 1149 1186 1150 1187 #if DEBUG_EXECUTE 1151 snprintf( string , 128 , "\n[ksh] in %s : argc = %d / arg0 = %s / arg1 = %s \n",1188 snprintf( string , 128 , "\n[ksh] in %s : argc = %d / arg0 = %s / arg1 = %s", 1152 1189 __FUNCTION__ , argc , argv[0], argv[1] ); 1153 1190 #endif … … 1187 1224 char cmd[CMD_MAX_SIZE]; // buffer for one command 1188 1225 1189 /* 1. first direct command 1226 1227 1228 // 1. first direct command 1190 1229 if( sem_wait( &semaphore ) ) 1191 1230 { … … 1195 1234 else 1196 1235 { 1197 printf("\n[ksh] load bin/user/sort.elf\n"); 1236 strcpy( cmd , "load bin/user/fft.elf" ); 1237 printf("[ksh] %s\n", cmd ); 1238 execute( cmd ); 1198 1239 } 1199 1200 strcpy( cmd , "load bin/user/sort.elf" ); 1201 execute( cmd ); 1202 */ 1203 1204 1205 1206 // 2. second direct command 1240 // 1241 1242 1243 1244 /* 2. second direct command 1207 1245 if( sem_wait( &semaphore ) ) 1208 1246 { … … 1212 1250 else 1213 1251 { 1214 printf("\n[ksh] load bin/user/fft.elf\n"); 1252 strcpy( cmd , "cat home/p_fft_dqt_16384_1_2" ); 1253 printf("[ksh] %s\n", cmd ); 1254 execute( cmd ); 1215 1255 } 1216 1217 strcpy( cmd , "load bin/user/fft.elf" ); 1218 execute( cmd ); 1219 // 1220 1256 */ 1221 1257 1222 1258 … … 1253 1289 { 1254 1290 // initialize command buffer 1255 // memset( cmd , 0x20 , sizeof(cmd) ); // TODO useful ?1256 1291 count = 0; 1257 1292 state = NORMAL; … … 1459 1494 1460 1495 #if DEBUG_MAIN 1461 snprintf( string , 128 , "\n[ksh] main thread started on core[%x,%d] \n", cxy , lid );1496 snprintf( string , 128 , "\n[ksh] main thread started on core[%x,%d]", cxy , lid ); 1462 1497 display_string( string ); 1463 1498 #endif … … 1471 1506 1472 1507 #if DEBUG_MAIN 1473 snprintf( string , 128 , "\n[ksh] main initialized semaphore \n" );1508 snprintf( string , 128 , "\n[ksh] main initialized semaphore" ); 1474 1509 display_string( string ); 1475 1510 #endif … … 1485 1520 NULL ); 1486 1521 #if DEBUG_MAIN 1487 snprintf( string , 128 , "\n[ksh] main thread launched interactive thread %x \n", trdid );1522 snprintf( string , 128 , "\n[ksh] main thread launched interactive thread %x", trdid ); 1488 1523 display_string( string ); 1489 1524 #endif
Note: See TracChangeset
for help on using the changeset viewer.