Changeset 440 for trunk/kernel/kern
- Timestamp:
- May 3, 2018, 5:51:22 PM (7 years ago)
- Location:
- trunk/kernel/kern
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/chdev.c
r438 r440 124 124 { 125 125 thread_t * server_ptr; // local pointer on server thread associated to chdev 126 xptr_t server_xp; // extended pointer on server thread 126 127 core_t * core_ptr; // local pointer on core running the server thread 127 128 uint32_t lid; // core running the server thread local index … … 140 141 thread_t * this = CURRENT_THREAD; 141 142 142 // get device descriptorcluster and local pointer143 // get chdev cluster and local pointer 143 144 cxy_t chdev_cxy = GET_CXY( chdev_xp ); 144 chdev_t * chdev_ptr = (chdev_t *)GET_PTR( chdev_xp ); 145 chdev_t * chdev_ptr = GET_PTR( chdev_xp ); 146 147 // get local and extended pointers on server thread 148 server_ptr = (thread_t *)hal_remote_lpt( XPTR( chdev_cxy , &chdev_ptr->server) ); 149 server_xp = XPTR( chdev_cxy , server_ptr ); 150 151 // get local pointer on core running the server thread 152 core_ptr = (core_t *)hal_remote_lpt( XPTR( chdev_cxy , &server_ptr->core ) ); 153 154 // get server core local index 155 lid = hal_remote_lw( XPTR( chdev_cxy , &core_ptr->lid ) ); 145 156 146 157 #if (DEBUG_CHDEV_CMD_RX || DEBUG_CHDEV_CMD_TX) … … 162 173 #endif 163 174 164 // build extended pointers on client thread xlist and device root 165 xptr_t list_xp = XPTR( local_cxy , &this->wait_list ); 166 xptr_t root_xp = XPTR( chdev_cxy , &chdev_ptr->wait_root ); 167 168 // get local pointer on server thread 169 server_ptr = (thread_t *)hal_remote_lpt( XPTR( chdev_cxy , &chdev_ptr->server) ); 170 171 // build extended pointer on chdev lock protecting queue 175 // build extended pointer on client thread xlist 176 xptr_t list_xp = XPTR( local_cxy , &this->wait_list ); 177 178 // build extended pointer on chdev waiting queue root 179 xptr_t root_xp = XPTR( chdev_cxy , &chdev_ptr->wait_root ); 180 181 // build extended pointer on server thread blocked state 182 xptr_t blocked_xp = XPTR( chdev_cxy , &server_ptr->blocked ); 183 184 // build extended pointer on lock protecting chdev waiting queue 172 185 lock_xp = XPTR( chdev_cxy , &chdev_ptr->wait_lock ); 173 186 174 // get local pointer on core running the server thread 175 core_ptr = (core_t *)hal_remote_lpt( XPTR( chdev_cxy , &server_ptr->core ) ); 176 177 // get core local index 178 lid = hal_remote_lw( XPTR( chdev_cxy , &core_ptr->lid ) ); 179 180 // compute server core != thread core 181 different = (lid != this->core->lid) || (local_cxy != chdev_cxy); 182 183 // enter critical section to make atomic : 184 // (1) client blocking 185 // (2) client registration in server queue 186 // (3) IPI to force server scheduling 187 // (4) descheduling 187 // critical section for the following sequence: 188 // (1) take the lock protecting waiting queue 189 // (2) block the client thread 190 // (3) unblock the server thread if required 191 // (4) register client thread in server queue 192 // (5) send IPI to force server scheduling 193 // (6) release the lock protecting waiting queue 194 // (7) deschedule 188 195 // ... in this order 196 197 // enter critical section 189 198 hal_disable_irq( &save_sr ); 199 200 // take the lock 201 remote_spinlock_lock( lock_xp ); 190 202 191 203 // block current thread 192 204 thread_block( XPTR( local_cxy , CURRENT_THREAD ) , THREAD_BLOCKED_IO ); 193 205 206 if( hal_remote_lw( blocked_xp ) & THREAD_BLOCKED_IDLE ) 207 thread_unblock( server_xp , THREAD_BLOCKED_IDLE ); 208 194 209 // register client thread in waiting queue 195 remote_spinlock_lock( lock_xp );196 210 xlist_add_last( root_xp , list_xp ); 197 remote_spinlock_unlock( lock_xp ); 198 199 // send IPI to core running the server thread if required211 212 // send IPI to core running the server thread when server != client 213 different = (lid != this->core->lid) || (local_cxy != chdev_cxy); 200 214 if( different ) dev_pic_send_ipi( chdev_cxy , lid ); 201 215 216 // release lock 217 remote_spinlock_unlock( lock_xp ); 218 202 219 // deschedule 203 220 assert( thread_can_yield( this ) , __FUNCTION__ , "illegal sched_yield\n" ); … … 260 277 remote_spinlock_unlock( lock_xp ); 261 278 279 // block 280 thread_block( XPTR( local_cxy , server ) , THREAD_BLOCKED_IDLE ); 281 262 282 // deschedule 283 assert( thread_can_yield( server ) , __FUNCTION__ , "illegal sched_yield\n" ); 263 284 sched_yield("I/O queue empty"); 264 285 } 265 286 else // waiting queue not empty 266 287 { 288 // get extended pointer on first client thread 289 client_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list ); 290 291 // get client thread cluster and local pointer 292 client_cxy = GET_CXY( client_xp ); 293 client_ptr = GET_PTR( client_xp ); 294 295 // remove this first client thread from waiting queue 296 xlist_unlink( XPTR( client_cxy , &client_ptr->wait_list ) ); 297 267 298 // release lock 268 299 remote_spinlock_unlock( lock_xp ); 269 270 // get extended pointer on first client thread271 client_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list );272 273 // get client thread cluster, local pointer, and identifier274 client_cxy = GET_CXY( client_xp );275 client_ptr = (thread_t *)GET_PTR( client_xp );276 300 277 301 #if DEBUG_CHDEV_SERVER_RX … … 300 324 chdev->cmd( client_xp ); 301 325 302 // remove the client thread from waiting queue303 remote_spinlock_lock( lock_xp );304 xlist_unlink( XPTR( client_cxy , &client_ptr->wait_list ) );305 remote_spinlock_unlock( lock_xp );306 307 326 // unblock client thread 308 327 thread_unblock( client_xp , THREAD_BLOCKED_IO ); … … 343 362 chdev_t * chdev_ptr; 344 363 364 assert( (file_xp != XPTR_NULL) , __FUNCTION__, 365 "file_xp == XPTR_NULL\n" ); 366 345 367 // get cluster and local pointer on remote file descriptor 346 368 // associated inode and chdev are stored in same cluster as the file desc. … … 353 375 354 376 assert( (inode_type == INODE_TYPE_DEV) , __FUNCTION__ , 355 "inode type %d is not INODE_TYPE_DEV ", inode_type );377 "inode type %d is not INODE_TYPE_DEV\n", inode_type ); 356 378 357 379 // get chdev local pointer from inode extension -
trunk/kernel/kern/chdev.h
r428 r440 42 42 * independant) Channel Device descriptor (in brief "chdev"). 43 43 * ALMOS-MKH supports multi-channels peripherals, and defines one separated chdev 44 * descriptor for each channel (and for each RX/TX direction for the NIC device).44 * descriptor for each channel (and for each RX/TX direction for the NIC and TXT devices). 45 45 * Each chdev contains a waiting queue, registering the "client threads" requests, 46 46 * and an associated "server thread", handling these requests. -
trunk/kernel/kern/cluster.c
r438 r440 153 153 #endif 154 154 155 // initialises RPC fifo 156 local_fifo_init( &cluster->rpc_fifo ); 157 cluster->rpc_threads = 0; 155 // initialises RPC FIFOs 156 for( lid = 0 ; lid < cluster->cores_nr; lid++ ) 157 { 158 local_fifo_init( &cluster->rpc_fifo[lid] ); 159 cluster->rpc_threads[lid] = 0; 160 } 158 161 159 162 #if( DEBUG_CLUSTER_INIT & 1 ) … … 221 224 lid_t cluster_select_local_core() 222 225 { 223 uint32_t min = 100; 224 lid_t sel = 0; 225 lid_t lid; 226 uint32_t min = 1000; 227 lid_t sel = 0; 228 uint32_t nthreads; 229 lid_t lid; 230 scheduler_t * sched; 226 231 227 232 cluster_t * cluster = LOCAL_CLUSTER; … … 229 234 for( lid = 0 ; lid < cluster->cores_nr ; lid++ ) 230 235 { 231 if( cluster->core_tbl[lid].usage < min ) 236 sched = &cluster->core_tbl[lid].scheduler; 237 nthreads = sched->u_threads_nr + sched->k_threads_nr; 238 239 if( nthreads < min ) 232 240 { 233 min = cluster->core_tbl[lid].usage;241 min = nthreads; 234 242 sel = lid; 235 243 } … … 323 331 bool_t found; 324 332 333 #if DEBUG_CLUSTER_PID_ALLOC 334 uint32_t cycle = (uint32_t)hal_get_cycles(); 335 if( DEBUG_CLUSTER_PID_ALLOC < cycle ) 336 printk("\n[DBG] %s : thread %x enters in cluster %x / cycle %d\n", 337 __FUNCTION__ , CURRENT_THREAD , local_cxy , cycle ); 338 #endif 339 325 340 pmgr_t * pm = &LOCAL_CLUSTER->pmgr; 326 341 … … 361 376 } 362 377 378 #if DEBUG_CLUSTER_PID_ALLOC 379 cycle = (uint32_t)hal_get_cycles(); 380 if( DEBUG_CLUSTER_PID_ALLOC < cycle ) 381 printk("\n[DBG] %s : thread %x exit in cluster %x / pid %x / cycle %d\n", 382 __FUNCTION__ , CURRENT_THREAD , local_cxy , *pid , cycle ); 383 #endif 384 363 385 } // end cluster_pid_alloc() 364 386 … … 366 388 void cluster_pid_release( pid_t pid ) 367 389 { 390 391 #if DEBUG_CLUSTER_PID_RELEASE 392 uint32_t cycle = (uint32_t)hal_get_cycles(); 393 if( DEBUG_CLUSTER_PID_RELEASE < cycle ) 394 printk("\n[DBG] %s : thread %x enters in cluster %x / pid %x / cycle %d\n", 395 __FUNCTION__ , CURRENT_THREAD , local_cxy , pid , cycle ); 396 #endif 397 368 398 cxy_t owner_cxy = CXY_FROM_PID( pid ); 369 399 lpid_t lpid = LPID_FROM_PID( pid ); … … 371 401 pmgr_t * pm = &LOCAL_CLUSTER->pmgr; 372 402 373 // check pid argument 374 assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER) && (owner_cxy == local_cxy) , 375 __FUNCTION__ , "illegal PID" ); 403 // check lpid 404 assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER), __FUNCTION__ , 405 "illegal LPID = %d" , lpid ); 406 407 // check owner cluster 408 assert( (owner_cxy == local_cxy) , __FUNCTION__ , 409 "local_cluster %x != owner_cluster %x" , local_cxy , owner_cxy ); 376 410 377 411 // get the process manager lock … … 384 418 // release the processs_manager lock 385 419 spinlock_unlock( &pm->pref_lock ); 420 421 #if DEBUG_CLUSTER_PID_RELEASE 422 cycle = (uint32_t)hal_get_cycles(); 423 if( DEBUG_CLUSTER_PID_RELEASE < cycle ) 424 printk("\n[DBG] %s : thread %x exit in cluster %x / cycle %d\n", 425 __FUNCTION__ , CURRENT_THREAD , local_cxy , cycle ); 426 #endif 386 427 387 428 } // end cluster_pid_release() -
trunk/kernel/kern/cluster.h
r438 r440 96 96 typedef struct cluster_s 97 97 { 98 spinlock_t kcm_lock; /*! local, protect creation of KCM allocators*/98 spinlock_t kcm_lock; /*! local, protect creation of KCM allocators */ 99 99 100 100 // global parameters 101 uint32_t paddr_width; /*! numer of bits in physical address*/102 uint32_t x_width; /*! number of bits to code x_size (can be 0)*/103 uint32_t y_width; /*! number of bits to code y_size (can be 0)*/104 uint32_t x_size; /*! number of clusters in a row (can be 1)*/105 uint32_t y_size; /*! number of clusters in a column (can be 1)*/106 cxy_t io_cxy; /*! io cluster identifier*/107 uint32_t dqdt_root_level; /*! index of root node in dqdt_tbl[]*/108 uint32_t nb_txt_channels; /*! number of TXT channels*/109 uint32_t nb_nic_channels; /*! number of NIC channels*/110 uint32_t nb_ioc_channels; /*! number of IOC channels*/111 uint32_t nb_fbf_channels; /*! number of FBF channels*/101 uint32_t paddr_width; /*! numer of bits in physical address */ 102 uint32_t x_width; /*! number of bits to code x_size (can be 0) */ 103 uint32_t y_width; /*! number of bits to code y_size (can be 0) */ 104 uint32_t x_size; /*! number of clusters in a row (can be 1) */ 105 uint32_t y_size; /*! number of clusters in a column (can be 1) */ 106 cxy_t io_cxy; /*! io cluster identifier */ 107 uint32_t dqdt_root_level; /*! index of root node in dqdt_tbl[] */ 108 uint32_t nb_txt_channels; /*! number of TXT channels */ 109 uint32_t nb_nic_channels; /*! number of NIC channels */ 110 uint32_t nb_ioc_channels; /*! number of IOC channels */ 111 uint32_t nb_fbf_channels; /*! number of FBF channels */ 112 112 113 113 // local parameters 114 uint32_t cores_nr; /*! actual number of cores in cluster*/115 uint32_t ram_size; /*! physical memory size*/116 uint32_t ram_base; /*! physical memory base (local address)*/117 118 core_t core_tbl[CONFIG_MAX_LOCAL_CORES]; /*! embedded cores*/119 120 list_entry_t dev_root; /*! root of list of devices in cluster*/114 uint32_t cores_nr; /*! actual number of cores in cluster */ 115 uint32_t ram_size; /*! physical memory size */ 116 uint32_t ram_base; /*! physical memory base (local address) */ 117 118 core_t core_tbl[CONFIG_MAX_LOCAL_CORES]; /*! embedded cores */ 119 120 list_entry_t dev_root; /*! root of list of devices in cluster */ 121 121 122 122 // memory allocators 123 ppm_t ppm; /*! embedded kernel page manager*/124 khm_t khm; /*! embedded kernel heap manager*/125 kcm_t kcm; /*! embedded kernel cache manager (for KCMs)*/126 127 kcm_t * kcm_tbl[KMEM_TYPES_NR]; /*! pointers on allocated KCMs*/123 ppm_t ppm; /*! embedded kernel page manager */ 124 khm_t khm; /*! embedded kernel heap manager */ 125 kcm_t kcm; /*! embedded kernel KCMs manager */ 126 127 kcm_t * kcm_tbl[KMEM_TYPES_NR]; /*! pointers on allocated KCMs */ 128 128 129 129 // RPC 130 remote_fifo_t rpc_fifo; /*! RPC fifo (one per cluster)*/131 uint32_t rpc_threads; /*! current number of RPC threads in cluster*/130 remote_fifo_t rpc_fifo[CONFIG_MAX_LOCAL_CORES]; /*! one RPC FIFO per core */ 131 uint32_t rpc_threads[CONFIG_MAX_LOCAL_CORES]; /*! RPC threads per core */ 132 132 133 133 // DQDT 134 dqdt_node_t dqdt_tbl[CONFIG_DQDT_LEVELS_NR]; /*! embedded DQDT nodes in cluster*/134 dqdt_node_t dqdt_tbl[CONFIG_DQDT_LEVELS_NR]; /*! embedded DQDT nodes */ 135 135 136 136 // Local process manager 137 pmgr_t pmgr;/*! embedded process manager */138 139 void * pic_extend;/*! PIC implementation specific extension */137 pmgr_t pmgr; /*! embedded process manager */ 138 139 void * pic_extend; /*! PIC implementation specific extension */ 140 140 } 141 141 cluster_t; -
trunk/kernel/kern/kernel_init.c
r438 r440 1238 1238 dev_pic_enable_timer( CONFIG_SCHED_TICK_MS_PERIOD ); 1239 1239 1240 #if DEBUG_KERNEL_INIT 1241 printk("\n[DBG] %s : thread %x on core[%x,%d] jumps to thread_idle_func() / cycle %d\n", 1242 __FUNCTION__ , CURRENT_THREAD , local_cxy , core_lid , (uint32_t)hal_get_cycles() ); 1243 #endif 1244 1240 1245 // each core jump to thread_idle_func 1241 1246 thread_idle_func(); -
trunk/kernel/kern/process.c
r438 r440 106 106 char rx_path[40]; 107 107 char tx_path[40]; 108 xptr_t file_xp; 108 109 xptr_t chdev_xp; 109 110 chdev_t * chdev_ptr; … … 179 180 assert( (stdin_id == 0) , __FUNCTION__ , "stdin index must be 0" ); 180 181 182 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 183 cycle = (uint32_t)hal_get_cycles(); 184 if( DEBUG_PROCESS_REFERENCE_INIT ) 185 printk("\n[DBG] %s : thread %x / stdin open for process %x / cycle %d\n", 186 __FUNCTION__ , CURRENT_THREAD , pid , cycle ); 187 #endif 188 181 189 // create stdout pseudo file 182 190 error = vfs_open( process, … … 190 198 assert( (stdout_id == 1) , __FUNCTION__ , "stdout index must be 1" ); 191 199 200 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 201 cycle = (uint32_t)hal_get_cycles(); 202 if( DEBUG_PROCESS_REFERENCE_INIT ) 203 printk("\n[DBG] %s : thread %x / stdout open for process %x / cycle %d\n", 204 __FUNCTION__ , CURRENT_THREAD , pid , cycle ); 205 #endif 206 192 207 // create stderr pseudo file 193 208 error = vfs_open( process, … … 201 216 assert( (stderr_id == 2) , __FUNCTION__ , "stderr index must be 2" ); 202 217 218 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 219 cycle = (uint32_t)hal_get_cycles(); 220 if( DEBUG_PROCESS_REFERENCE_INIT ) 221 printk("\n[DBG] %s : thread %x / stderr open for process %x / cycle %d\n", 222 __FUNCTION__ , CURRENT_THREAD , pid , cycle ); 223 #endif 224 203 225 } 204 226 else // normal user process 205 227 { 228 // get extended pointer on stdin pseudo file in model process 229 file_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy , &model_ptr->fd_array.array[0] ) ); 230 206 231 // get extended pointer on model process TXT chdev 207 chdev_xp = chdev_from_file( model_ptr->fd_array.array[0]);232 chdev_xp = chdev_from_file( file_xp ); 208 233 209 234 // get cluster and local pointer on chdev … … 374 399 uint32_t cycle = (uint32_t)hal_get_cycles(); 375 400 if( DEBUG_PROCESS_DESTROY ) 376 printk("\n[DBG] %s : thread %x enter to destroy process %x (pid = %x)/ cycle %d\n",377 __FUNCTION__ , CURRENT_THREAD , p rocess, pid, cycle );401 printk("\n[DBG] %s : thread %x enter in cluster %x / pid %x / process %x / cycle %d\n", 402 __FUNCTION__ , CURRENT_THREAD , pid , process , cycle ); 378 403 #endif 379 404 … … 401 426 } 402 427 403 // release the process PID to cluster manager 404 cluster_pid_release( pid );428 // release the process PID to cluster manager if owner cluster 429 if( CXY_FROM_PID( pid ) == local_cxy ) cluster_pid_release( pid ); 405 430 406 431 // FIXME close all open files and update dirty [AG] … … 507 532 XLIST_FOREACH( root_xp , iter_xp ) 508 533 { 534 // atomically increment responses counter 535 hal_atomic_add( (void *)&rpc.responses , 1 ); 536 537 process_xp = XLIST_ELEMENT( iter_xp , process_t , copies_list ); 538 process_cxy = GET_CXY( process_xp ); 509 539 510 540 #if DEBUG_PROCESS_SIGACTION … … 513 543 __FUNCTION__ , process_action_str( action_type ) , pid , process_cxy ); 514 544 #endif 515 // atomically increment responses counter516 hal_atomic_add( (void *)&rpc.responses , 1 );517 518 process_xp = XLIST_ELEMENT( iter_xp , process_t , copies_list );519 process_cxy = GET_CXY( process_xp );520 521 545 // call RPC in target cluster 522 546 rpc_process_sigaction_client( process_cxy , &rpc ); … … 529 553 hal_restore_irq( save_sr); 530 554 531 // client deschedule : will be unblocked by the last RPC server thread555 // client thread deschedule : will be unblocked by the last RPC server thread 532 556 sched_yield("blocked on rpc_process_sigaction"); 533 557 … … 542 566 543 567 ///////////////////////////////////////////////// 544 void process_block_threads( process_t * process ) 568 void process_block_threads( process_t * process, 569 xptr_t client_xp ) 545 570 { 546 571 thread_t * target; // pointer on target thread … … 567 592 spinlock_lock( &process->th_lock ); 568 593 569 // loop to block all threads but the main thread594 // loop on target process local threads 570 595 // we use both "ltid" and "count" because it can exist "holes" in th_tbl 571 596 for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ ) … … 577 602 count++; 578 603 579 // main thread should not be deleted 580 if( (ltid != 0) || (owner_cxy != local_cxy) ) 604 // main thread and client thread should not be blocked 605 if( ((ltid != 0) || (owner_cxy != local_cxy)) && // not main thread 606 (client_xp) != XPTR( local_cxy , target ) ) // not client thread 581 607 { 582 608 // set the global blocked bit in target thread descriptor. … … 626 652 } // end process_block_threads() 627 653 628 ///////////////////////////////////////////////// //629 void process_ unblock_threads( process_t * process )630 { 631 thread_t * target; // pointer on target thead 654 ///////////////////////////////////////////////// 655 void process_delete_threads( process_t * process, 656 xptr_t client_xp ) 657 { 632 658 thread_t * this; // pointer on calling thread 659 thread_t * target; // local pointer on target thread 660 xptr_t target_xp; // extended pointer on target thread 661 cxy_t owner_cxy; // owner process cluster 633 662 uint32_t ltid; // index in process th_tbl 634 uint32_t count; // requests counter663 uint32_t count; // threads counter 635 664 636 665 // get calling thread pointer 637 666 this = CURRENT_THREAD; 667 668 // get target process owner cluster 669 owner_cxy = CXY_FROM_PID( process->pid ); 638 670 639 671 #if DEBUG_PROCESS_SIGACTION … … 647 679 spinlock_lock( &process->th_lock ); 648 680 649 // loop on process threads to unblock all threads681 // loop on target process local threads 650 682 // we use both "ltid" and "count" because it can exist "holes" in th_tbl 651 for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )683 for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ ) 652 684 { 653 685 target = process->th_tbl[ltid]; 654 686 655 if( target != NULL ) // thread found687 if( target != NULL ) // valid thread 656 688 { 657 689 count++; 658 659 // reset the global blocked bit in target thread descriptor. 660 thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL ); 690 target_xp = XPTR( local_cxy , target ); 691 692 // main thread and client thread should not be blocked 693 if( ((ltid != 0) || (owner_cxy != local_cxy)) && // not main thread 694 (client_xp) != target_xp ) // not client thread 695 { 696 // mark target thread for delete and block it 697 thread_delete( target_xp , process->pid , false ); // not forced 698 } 661 699 } 662 700 } … … 672 710 #endif 673 711 674 } // end process_unblock_threads() 675 676 ////////////////////////////////////////////////// 677 void process_delete_threads( process_t * process ) 678 { 679 thread_t * target; // pointer on target thread 712 } // end process_delete_threads() 713 714 /////////////////////////////////////////////////// 715 void process_unblock_threads( process_t * process ) 716 { 717 thread_t * target; // pointer on target thead 718 thread_t * this; // pointer on calling thread 680 719 uint32_t ltid; // index in process th_tbl 681 uint32_t count; // threads counter 720 uint32_t count; // requests counter 721 722 // get calling thread pointer 723 this = CURRENT_THREAD; 682 724 683 725 #if DEBUG_PROCESS_SIGACTION … … 685 727 if( DEBUG_PROCESS_SIGACTION < cycle ) 686 728 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", 687 __FUNCTION__ , CURRENT_THREAD, process->pid , local_cxy , cycle );729 __FUNCTION__ , this , process->pid , local_cxy , cycle ); 688 730 #endif 689 731 … … 691 733 spinlock_lock( &process->th_lock ); 692 734 693 // loop to set the REQ_DELETE flag on all threads but the main735 // loop on process threads to unblock all threads 694 736 // we use both "ltid" and "count" because it can exist "holes" in th_tbl 695 for( ltid = 0 , count = 0 737 for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ ) 696 738 { 697 739 target = process->th_tbl[ltid]; 698 740 699 if( target != NULL ) 741 if( target != NULL ) // thread found 700 742 { 701 743 count++; 702 703 thread_kill( XPTR( local_cxy , target ), 704 false, // is_exit 705 true ); // is_forced 744 745 // reset the global blocked bit in target thread descriptor. 746 thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL ); 706 747 } 707 748 } … … 714 755 if( DEBUG_PROCESS_SIGACTION < cycle ) 715 756 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n", 716 __FUNCTION__ , CURRENT_THREAD, process->pid , local_cxy , cycle );717 #endif 718 719 } // end process_ delete_threads()757 __FUNCTION__ , this , process->pid , local_cxy , cycle ); 758 #endif 759 760 } // end process_unblock_threads() 720 761 721 762 /////////////////////////////////////////////// … … 749 790 750 791 // allocate memory for a new local process descriptor 751 // and initialise it from reference cluster if required792 // and initialise it from reference cluster if not found 752 793 if( !found ) 753 794 { … … 765 806 if( error ) return NULL; 766 807 } 808 809 #if DEBUG_PROCESS_GET_LOCAL_COPY 810 uint32_t cycle = (uint32_t)hal_get_cycles(); 811 if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle ) 812 printk("\n[DBG] %s : enter in cluster %x / pid %x / process %x / cycle %d\n", 813 __FUNCTION__ , local_cxy , pid , process_ptr , cycle ); 814 #endif 767 815 768 816 return process_ptr; … … 1032 1080 // check parent process is the reference process 1033 1081 ref_xp = hal_remote_lwd( XPTR( parent_process_cxy , &parent_process_ptr->ref_xp ) ); 1034 1035 printk("\n@@@ %s : parent_cxy = %x / parent_ptr = %x / ref_cxy = %x / ref_ptr = %x\n",1036 __FUNCTION__, parent_process_cxy, parent_process_ptr, GET_CXY( ref_xp ), GET_PTR( ref_xp ) );1037 1082 1038 1083 assert( (parent_process_xp == ref_xp ) , __FUNCTION__ , -
trunk/kernel/kern/process.h
r436 r440 101 101 * 4) The <sem_root>, <mutex_root>, <barrier_root>, <condvar_root>, and the associated 102 102 * <sync_lock>, that are dynamically allocated, are only defined in the reference cluster. 103 * 5) The <children_root>, <children_nr>, < brothers_list>, and <txt_list> fields are only103 * 5) The <children_root>, <children_nr>, <children_list>, and <txt_list> fields are only 104 104 * defined in the reference cluster, and are undefined in other clusters. 105 105 * 6) The <local_list>, <copies_list>, <th_tbl>, <th_nr>, <th_lock> fields 106 106 * are defined in all process descriptors copies. 107 107 * 7) The termination <flags> and <exit_status> are only defined in the reference cluster. 108 * The term state format is defined in the shared_syscalls.h file. 108 109 ********************************************************************************************/ 109 110 … … 282 283 * all threads of a process identified by the <pid> argument, depending on the 283 284 * <action_type> argument. 284 * WARNING : the DELETE a ction isNOT executed on the target process main thread285 * (thread 0 in process owner cluster) .285 * WARNING : the DELETE and BLOCK actions are NOT executed on the target process main thread 286 * (thread 0 in process owner cluster), and not executed on the calling thread itself. 286 287 * It uses the multicast, non blocking rpc_process_sigaction_client() function to send 287 * parallel requests to all remote clusters containing a process copy.288 * parallel requests to all remote clusters containing process copies. 288 289 * Then it blocks and deschedule to wait completion of these parallel requests. 289 290 * … … 305 306 306 307 /********************************************************************************************* 307 * This function blocks all threads - but the main thread - for a given <process> 308 * in a given cluster. It sets the THREAD_BLOCKED_GLOBAL bit in the thread descriptor, 309 * and request the relevant schedulers to acknowledge the blocking, using IPI if required. 308 * This function blocks all threads for a given <process> in the local cluster. 309 * It scan the list of local thread, and sets the THREAD_BLOCKED_GLOBAL bit for all 310 * threads, BUT the main thread (thread 0 in owner cluster), and the client thread 311 * identified by the <client_xp> argument. It request the relevant schedulers to acknowledge 312 * the blocking, using IPI if required, and returns only when all blockable threads 313 * in cluster are actually blocked. 310 314 * The threads are not detached from the scheduler, and not detached from the local process. 311 * This function returns only when all blockable threads in cluster are actually blocked.312 315 ********************************************************************************************* 313 316 * @ process : pointer on the target process descriptor. 314 ********************************************************************************************/ 315 void process_block_threads( process_t * process ); 317 * @ client_xp : extended pointer on the client thread that should not be blocked. 318 ********************************************************************************************/ 319 void process_block_threads( process_t * process, 320 xptr_t client_xp ); 321 322 /********************************************************************************************* 323 * This function marks for deletion all threads for a given <process> in the local cluster. 324 * It scan the list of local thread, and sets the THREAD_FLAG_REQ_DELETE bit for all 325 * threads, BUT the main thread (thread 0 in owner cluster), and the client thread 326 * identified by the <client_xp> argument. 327 * The actual deletion will be done by the scheduler at the next scheduling point. 328 ********************************************************************************************* 329 * @ process : pointer on the process descriptor. 330 * @ client_xp : extended pointer on the client thread that should not be marked. 331 ********************************************************************************************/ 332 void process_delete_threads( process_t * process, 333 xptr_t client_xp ); 316 334 317 335 /********************************************************************************************* … … 321 339 ********************************************************************************************/ 322 340 void process_unblock_threads( process_t * process ); 323 324 /*********************************************************************************************325 * This function marks for deletion all threads - but the main thread - for a given <process>326 * in a given cluster. It sets the THREAD_FLAG_REQ_DELETE bit. For each marked thread,327 * the following actions will be done by the scheduler at the next scheduling point:328 * - the thread will be detached from the scheduler.329 * - the thread will be detached from the local process descriptor.330 * - the thread will be detached from parent if required.331 * - the memory allocated to the thread descriptor is released.332 * - the memory allocated to the process descriptor is released, if it is the last thread.333 *********************************************************************************************334 * @ process : pointer on the process descriptor.335 ********************************************************************************************/336 void process_delete_threads( process_t * process );337 341 338 342 /********************************************************************************************* … … 398 402 struct thread_s ** child_thread_ptr ); 399 403 400 401 404 /******************** File Management Operations ****************************************/ 402 405 -
trunk/kernel/kern/rpc.c
r438 r440 114 114 client_core_lid = this->core->lid; 115 115 116 // select a server_core index: 117 // use client core index if possible / core 0 otherwise 116 // select a server_core : use client core index if possible / core 0 otherwise 118 117 if( client_core_lid < hal_remote_lw( XPTR( server_cxy , &cluster->cores_nr ) ) ) 119 118 { … … 133 132 134 133 // get local pointer on rpc_fifo in remote cluster, 135 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo ;134 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[server_core_lid]; 136 135 137 136 // post RPC in remote fifo / deschedule and retry if fifo full … … 231 230 core_t * core = this->core; 232 231 scheduler_t * sched = &core->scheduler; 233 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo ;232 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[core->lid]; 234 233 235 234 #if DEBUG_RPC_SERVER_GENERIC … … 243 242 hal_disable_irq( &sr_save ); 244 243 245 // activate (or create) RPC thread if RPC FIFO not empty 244 // activate (or create) RPC thread if RPC FIFO not empty and no acive RPC thread 246 245 if( (rpc_fifo->owner == 0) && (local_fifo_is_empty(rpc_fifo) == false) ) 247 246 { … … 254 253 #endif 255 254 256 // search one IDLE RPC thread 255 // search one IDLE RPC thread associated to the selected core 257 256 list_entry_t * iter; 258 257 LIST_FOREACH( &sched->k_root , iter ) … … 270 269 } 271 270 272 // create new RPC thread if not found271 // create new RPC thread for the selected core if not found 273 272 if( found == false ) 274 273 { … … 277 276 &rpc_thread_func, 278 277 NULL, 279 this->core->lid ); 280 if( error ) 281 { 282 assert( false , __FUNCTION__ , 283 "no memory to allocate a new RPC thread in cluster %x", local_cxy ); 284 } 278 core->lid ); 279 280 assert( (error == 0), __FUNCTION__ , 281 "no memory to allocate a new RPC thread in cluster %x", local_cxy ); 285 282 286 283 // unblock created RPC thread 287 284 thread->blocked = 0; 288 285 289 // update core descriptorcounter290 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 );286 // update RRPC threads counter 287 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[core->lid] , 1 ); 291 288 292 289 #if DEBUG_RPC_SERVER_GENERIC … … 325 322 void rpc_thread_func() 326 323 { 327 uint32_t count; // handled RPC requests counter 328 error_t empty; // local RPC fifo state 329 xptr_t desc_xp; // extended pointer on RPC request 330 cxy_t desc_cxy; // RPC request cluster (client) 331 rpc_desc_t * desc_ptr; // RPC request local pointer 332 uint32_t index; // RPC request index 333 thread_t * thread_ptr; // local pointer on client thread 334 lid_t core_lid; // local index of client core 335 bool_t blocking; // blocking RPC when true 324 error_t empty; // local RPC fifo state 325 xptr_t desc_xp; // extended pointer on RPC request 326 cxy_t desc_cxy; // RPC request cluster (client) 327 rpc_desc_t * desc_ptr; // RPC request local pointer 328 uint32_t index; // RPC request index 329 thread_t * client_ptr; // local pointer on client thread 330 thread_t * server_ptr; // local pointer on server thread 331 xptr_t server_xp; // extended pointer on server thread 332 lid_t client_core_lid; // local index of client core 333 lid_t server_core_lid; // local index of server core 334 bool_t blocking; // blocking RPC when true 335 remote_fifo_t * rpc_fifo; // local pointer on RPC fifo 336 336 337 337 // makes RPC thread not preemptable 338 338 hal_disable_irq( NULL ); 339 339 340 thread_t * this = CURRENT_THREAD; 341 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo; 340 server_ptr = CURRENT_THREAD; 341 server_xp = XPTR( local_cxy , server_ptr ); 342 server_core_lid = server_ptr->core->lid; 343 rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[server_core_lid]; 342 344 343 345 // two embedded loops: 344 346 // - external loop : "infinite" RPC thread 345 // - internal loop : handle up to CONFIG_RPC_PENDING_MAX RPC requests347 // - internal loop : handle one RPC request per iteration 346 348 347 349 while(1) // infinite loop 348 350 { 349 351 // try to take RPC_FIFO ownership 350 if( hal_atomic_test_set( &rpc_fifo->owner , this->trdid ) )352 if( hal_atomic_test_set( &rpc_fifo->owner , server_ptr->trdid ) ) 351 353 { 352 354 … … 355 357 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 356 358 printk("\n[DBG] %s : RPC thread %x in cluster %x takes RPC fifo ownership / cycle %d\n", 357 __FUNCTION__, this, local_cxy, cycle ); 358 #endif 359 // initializes RPC requests counter 360 count = 0; 361 362 // exit internal loop in three cases: 363 // - RPC fifo is empty 364 // - ownership has been lost (because descheduling) 365 // - max number of RPCs is reached 366 while( 1 ) // internal loop 359 __FUNCTION__, server_ptr, local_cxy, cycle ); 360 #endif 361 while( 1 ) // one RPC request per iteration 367 362 { 368 363 empty = local_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp ); 369 364 370 if ( empty == 0 ) // one RPC request found 365 // exit when FIFO empty or FIFO ownership lost (in case of descheduling) 366 if ( (empty == 0) && (rpc_fifo->owner == server_ptr->trdid) ) 371 367 { 372 368 // get client cluster and pointer on RPC descriptor … … 381 377 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 382 378 printk("\n[DBG] %s : RPC thread %x in cluster %x got rpc[%d] / rpc_cxy %x / rpc_ptr %x\n", 383 __FUNCTION__, this, local_cxy, index, desc_cxy, desc_ptr );379 __FUNCTION__, server_ptr, local_cxy, index, desc_cxy, desc_ptr ); 384 380 #endif 385 381 // call the relevant server function … … 390 386 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 391 387 printk("\n[DBG] %s : RPC thread %x in cluster %x completes rpc[%d] / rpc_ptr %x / cycle %d\n", 392 __FUNCTION__, this, local_cxy, index, desc_ptr, cycle ); 393 #endif 394 // increment handled RPCs counter 395 count++; 396 388 __FUNCTION__, server_ptr, local_cxy, index, desc_ptr, cycle ); 389 #endif 397 390 // decrement response counter in RPC descriptor if blocking 398 391 if( blocking ) … … 402 395 403 396 // get client thread pointer and client core lid from RPC descriptor 404 thread_ptr= hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) );405 c ore_lid= hal_remote_lw ( XPTR( desc_cxy , &desc_ptr->lid ) );397 client_ptr = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) ); 398 client_core_lid = hal_remote_lw ( XPTR( desc_cxy , &desc_ptr->lid ) ); 406 399 407 400 // unblock client thread 408 thread_unblock( XPTR( desc_cxy , thread_ptr ) , THREAD_BLOCKED_RPC );401 thread_unblock( XPTR( desc_cxy , client_ptr ) , THREAD_BLOCKED_RPC ); 409 402 410 403 hal_fence(); … … 414 407 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 415 408 printk("\n[DBG] %s : RPC thread %x (cluster %x) unblocked client thread %x (cluster %x)\n", 416 __FUNCTION__, this, local_cxy, thread_ptr, desc_cxy, cycle );409 __FUNCTION__, server_ptr, local_cxy, client_ptr, desc_cxy, cycle ); 417 410 #endif 418 411 // send IPI to client core 419 dev_pic_send_ipi( desc_cxy , c ore_lid );412 dev_pic_send_ipi( desc_cxy , client_core_lid ); 420 413 } 421 414 } 422 423 // chek exit condition 424 if( local_fifo_is_empty( rpc_fifo ) || 425 (rpc_fifo->owner != this->trdid) || 426 (count >= CONFIG_RPC_PENDING_MAX) ) break; 415 else 416 { 417 break; 418 } 427 419 } // end internal loop 428 420 429 421 // release rpc_fifo ownership if not lost 430 if( rpc_fifo->owner == this->trdid ) rpc_fifo->owner = 0;422 if( rpc_fifo->owner == server_ptr->trdid ) rpc_fifo->owner = 0; 431 423 432 424 } // end if RPC fifo 433 425 434 // sucide if too many RPC threads in cluster 435 if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX ) 426 // RPC thread blocks on IDLE 427 thread_block( server_xp , THREAD_BLOCKED_IDLE ); 428 429 // sucide if too many RPC threads / simply deschedule otherwise 430 if( LOCAL_CLUSTER->rpc_threads[server_core_lid] >= CONFIG_RPC_THREADS_MAX ) 436 431 { 437 432 … … 440 435 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 441 436 printk("\n[DBG] %s : RPC thread %x in cluster %x suicides / cycle %d\n", 442 __FUNCTION__, this, local_cxy, cycle );437 __FUNCTION__, server_ptr, local_cxy, cycle ); 443 438 #endif 444 439 // update RPC threads counter 445 440 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 ); 446 441 447 // suicide 448 thread_kill( XPTR( local_cxy , this ), 449 true, // is_exit 450 true ); // is forced 442 // RPC thread blocks on GLOBAL 443 thread_block( server_xp , THREAD_BLOCKED_GLOBAL ); 444 445 // RPC thread set the REQ_DELETE flag to suicide 446 hal_remote_atomic_or( server_xp , THREAD_FLAG_REQ_DELETE ); 451 447 } 448 else 449 { 452 450 453 451 #if DEBUG_RPC_SERVER_GENERIC 454 452 uint32_t cycle = (uint32_t)hal_get_cycles(); 455 453 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 456 printk("\n[DBG] %s : RPC thread %x in cluster %x deschedules / cycle %d\n", 457 __FUNCTION__, this, local_cxy, cycle ); 458 #endif 459 460 // Block and deschedule 461 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_IDLE ); 462 sched_yield("RPC fifo empty or too much work"); 463 464 #if DEBUG_RPC_SERVER_GENERIC 465 cycle = (uint32_t)hal_get_cycles(); 466 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 467 printk("\n[DBG] %s : RPC thread %x in cluster %x resumes / cycle %d\n", 468 __FUNCTION__, this, local_cxy, cycle ); 469 #endif 454 printk("\n[DBG] %s : RPC thread %x in cluster %x block & deschedules / cycle %d\n", 455 __FUNCTION__, server_ptr, local_cxy, cycle ); 456 #endif 457 458 // RPC thread deschedules 459 assert( thread_can_yield( server_ptr ) , __FUNCTION__, "illegal sched_yield\n" ); 460 sched_yield("RPC fifo empty"); 461 } 470 462 471 463 } // end infinite loop … … 646 638 647 639 // set input arguments in RPC descriptor 648 rpc.args[0] = (uint64_t) (intptr_t)ref_process_xp;649 rpc.args[1] = (uint64_t) (intptr_t)parent_thread_xp;640 rpc.args[0] = (uint64_t)ref_process_xp; 641 rpc.args[1] = (uint64_t)parent_thread_xp; 650 642 651 643 // register RPC request in remote RPC fifo … … 903 895 void rpc_process_sigaction_server( xptr_t xp ) 904 896 { 905 pid_t pid; // target process identifier 906 process_t * process; // pointer on local target process descriptor 907 uint32_t action; // sigaction index 908 thread_t * client_thread; // pointer on client thread in client cluster 909 cxy_t client_cxy; // client cluster identifier 910 rpc_desc_t * rpc; // pointer on rpc descriptor in client cluster 911 xptr_t count_xp; // extended pointer on response counter 912 lid_t client_lid; // client core local index 897 pid_t pid; // target process identifier 898 process_t * process; // pointer on local target process descriptor 899 uint32_t action; // sigaction index 900 thread_t * client_ptr; // pointer on client thread in client cluster 901 xptr_t client_xp; // extended pointer client thread 902 cxy_t client_cxy; // client cluster identifier 903 rpc_desc_t * rpc; // pointer on rpc descriptor in client cluster 904 xptr_t count_xp; // extended pointer on responses counter 905 uint32_t count_value; // responses counter value 906 lid_t client_lid; // client core local index 913 907 914 908 // get client cluster identifier and pointer on RPC descriptor … … 927 921 #endif 928 922 923 // get client thread pointers 924 client_ptr = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) ); 925 client_xp = XPTR( client_cxy , client_ptr ); 926 929 927 // get local process descriptor 930 928 process = cluster_get_local_process_from_pid( pid ); 931 929 932 930 // call relevant kernel function 933 if ( action == DELETE_ALL_THREADS ) process_delete_threads ( process );934 else if ( action == BLOCK_ALL_THREADS ) process_block_threads ( process );931 if ( action == DELETE_ALL_THREADS ) process_delete_threads ( process , client_xp ); 932 else if ( action == BLOCK_ALL_THREADS ) process_block_threads ( process , client_xp ); 935 933 else if ( action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process ); 936 934 … … 939 937 940 938 // decrement the responses counter in RPC descriptor, 939 count_value = hal_remote_atomic_add( count_xp , -1 ); 940 941 941 // unblock the client thread only if it is the last response. 942 if( hal_remote_atomic_add( count_xp , -1 ) == 1 )942 if( count_value == 1 ) 943 943 { 944 // get client thread pointer and client core lid 945 client_thread = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) ); 944 // get client core lid 946 945 client_lid = (lid_t) hal_remote_lw ( XPTR( client_cxy , &rpc->lid ) ); 947 946 948 thread_unblock( XPTR( client_cxy , client_thread ) , THREAD_BLOCKED_RPC ); 947 // unblock client thread 948 thread_unblock( client_xp , THREAD_BLOCKED_RPC ); 949 950 // send an IPI to client core 949 951 dev_pic_send_ipi( client_cxy , client_lid ); 950 952 } … … 1192 1194 vfs_dentry_t * dentry ) 1193 1195 { 1196 #if DEBUG_RPC_VFS_DENTRY_DESTROY 1197 uint32_t cycle = (uint32_t)hal_get_cycles(); 1198 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1199 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1200 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1201 #endif 1202 1194 1203 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1195 1204 … … 1206 1215 rpc_send( cxy , &rpc ); 1207 1216 1217 #if DEBUG_RPC_VFS_DENTRY_DESTROY 1218 cycle = (uint32_t)hal_get_cycles(); 1219 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1220 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1221 __FUNCTION__ , CURRENT_THREAD , cycle ); 1222 #endif 1208 1223 } 1209 1224 … … 1211 1226 void rpc_vfs_dentry_destroy_server( xptr_t xp ) 1212 1227 { 1228 #if DEBUG_RPC_VFS_DENTRY_DESTROY 1229 uint32_t cycle = (uint32_t)hal_get_cycles(); 1230 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1231 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1232 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1233 #endif 1234 1213 1235 vfs_dentry_t * dentry; 1214 1236 … … 1223 1245 vfs_dentry_destroy( dentry ); 1224 1246 1247 #if DEBUG_RPC_VFS_DENTRY_DESTROY 1248 cycle = (uint32_t)hal_get_cycles(); 1249 if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY ) 1250 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1251 __FUNCTION__ , CURRENT_THREAD , cycle ); 1252 #endif 1225 1253 } 1226 1254 … … 1319 1347 vfs_file_t * file ) 1320 1348 { 1349 #if DEBUG_RPC_VFS_FILE_DESTROY 1350 uint32_t cycle = (uint32_t)hal_get_cycles(); 1351 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 1352 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1353 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1354 #endif 1355 1321 1356 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1322 1357 … … 1333 1368 rpc_send( cxy , &rpc ); 1334 1369 1370 #if DEBUG_RPC_VFS_FILE_DESTROY 1371 cycle = (uint32_t)hal_get_cycles(); 1372 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 1373 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1374 __FUNCTION__ , CURRENT_THREAD , cycle ); 1375 #endif 1335 1376 } 1336 1377 … … 1338 1379 void rpc_vfs_file_destroy_server( xptr_t xp ) 1339 1380 { 1381 #if DEBUG_RPC_VFS_FILE_DESTROY 1382 uint32_t cycle = (uint32_t)hal_get_cycles(); 1383 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 1384 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1385 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1386 #endif 1387 1340 1388 vfs_file_t * file; 1341 1389 … … 1350 1398 vfs_file_destroy( file ); 1351 1399 1400 #if DEBUG_RPC_VFS_FILE_DESTROY 1401 cycle = (uint32_t)hal_get_cycles(); 1402 if( cycle > DEBUG_RPC_VFS_FILE_DESTROY ) 1403 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1404 __FUNCTION__ , CURRENT_THREAD , cycle ); 1405 #endif 1352 1406 } 1353 1407 … … 1536 1590 error_t * error ) // out 1537 1591 { 1592 #if DEBUG_RPC_VMM_GET_VSEG 1593 uint32_t cycle = (uint32_t)hal_get_cycles(); 1594 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 1595 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1596 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1597 #endif 1598 1538 1599 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1539 1600 … … 1555 1616 *error = (error_t)rpc.args[3]; 1556 1617 1618 #if DEBUG_RPC_VMM_GET_VSEG 1619 cycle = (uint32_t)hal_get_cycles(); 1620 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 1621 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n", 1622 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1623 #endif 1557 1624 } 1558 1625 … … 1560 1627 void rpc_vmm_get_vseg_server( xptr_t xp ) 1561 1628 { 1629 #if DEBUG_RPC_VMM_GET_VSEG 1630 uint32_t cycle = (uint32_t)hal_get_cycles(); 1631 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 1632 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1633 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1634 #endif 1635 1562 1636 process_t * process; 1563 1637 intptr_t vaddr; … … 1582 1656 hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 1583 1657 1584 } 1585 1586 1587 ///////////////////////////////////////////////////////////////////////////////////////// 1588 // [21] Marshaling functions attached to RPC_VMM_GET_PTE (blocking) 1658 #if DEBUG_RPC_VMM_GET_VSEG 1659 cycle = (uint32_t)hal_get_cycles(); 1660 if( cycle > DEBUG_RPC_VMM_GET_VSEG ) 1661 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n", 1662 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1663 #endif 1664 } 1665 1666 1667 ///////////////////////////////////////////////////////////////////////////////////////// 1668 // [21] Marshaling functions attached to RPC_VMM_GET_VSEG (blocking) 1589 1669 ///////////////////////////////////////////////////////////////////////////////////////// 1590 1670 … … 1598 1678 error_t * error ) // out 1599 1679 { 1600 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1601 1602 // initialise RPC descriptor header 1603 rpc_desc_t rpc; 1604 rpc.index = RPC_VMM_GET_PTE; 1680 #if DEBUG_RPC_VMM_GET_PTE 1681 uint32_t cycle = (uint32_t)hal_get_cycles(); 1682 if( cycle > DEBUG_RPC_VMM_GET_PTE ) 1683 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1684 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1685 #endif 1686 1687 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 1688 1689 // initialise RPC descriptor header 1690 rpc_desc_t rpc; 1691 rpc.index = RPC_VMM_GET_VSEG; 1605 1692 rpc.blocking = true; 1606 1693 rpc.responses = 1; … … 1619 1706 *error = (error_t)rpc.args[5]; 1620 1707 1708 #if DEBUG_RPC_VMM_GET_PTE 1709 cycle = (uint32_t)hal_get_cycles(); 1710 if( cycle > DEBUG_RPC_VMM_GET_PTE ) 1711 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n", 1712 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1713 #endif 1621 1714 } 1622 1715 … … 1624 1717 void rpc_vmm_get_pte_server( xptr_t xp ) 1625 1718 { 1719 #if DEBUG_RPC_VMM_GET_PTE 1720 uint32_t cycle = (uint32_t)hal_get_cycles(); 1721 if( cycle > DEBUG_RPC_VMM_GET_PTE ) 1722 printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n", 1723 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1724 #endif 1725 1626 1726 process_t * process; 1627 1727 vpn_t vpn; … … 1648 1748 hal_remote_swd( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error ); 1649 1749 1750 #if DEBUG_RPC_VMM_GET_PTE 1751 cycle = (uint32_t)hal_get_cycles(); 1752 if( cycle > DEBUG_RPC_VMM_GET_PTE ) 1753 printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n", 1754 __FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle ); 1755 #endif 1650 1756 } 1651 1757 -
trunk/kernel/kern/scheduler.c
r438 r440 125 125 thread = LIST_ELEMENT( current , thread_t , sched_list ); 126 126 127 // execute RPC thread if non blocked 128 if( (thread->blocked == 0) && 129 (thread->type == THREAD_RPC) ) 130 { 131 spinlock_unlock( &sched->lock ); 132 return thread; 133 } 134 135 // execute DEV thread if non blocked and waiting queue non empty 136 if( (thread->blocked == 0) && 137 (thread->type == THREAD_DEV) && 138 (xlist_is_empty( XPTR( local_cxy , &thread->chdev->wait_root)) == 0) ) 127 // select kernel thread if non blocked and non IDLE 128 if( (thread->blocked == 0) && (thread->type != THREAD_IDLE) ) 139 129 { 140 130 spinlock_unlock( &sched->lock ); … … 186 176 187 177 list_entry_t * iter; 178 list_entry_t * root; 188 179 thread_t * thread; 189 180 process_t * process; 190 181 182 // get pointer on scheduler 191 183 scheduler_t * sched = &core->scheduler; 184 185 // get pointer on user threads root 186 root = &sched->u_root; 192 187 193 188 // take lock protecting threads lists 194 189 spinlock_lock( &sched->lock ); 195 190 191 // We use a while to scan the user threads, to control the iterator increment, 192 // because some threads will be destroyed, and we cannot use a LIST_FOREACH() 193 194 // initialise list iterator 195 iter = root->next; 196 196 197 // scan all user threads 197 LIST_FOREACH( &sched->u_root , iter ) 198 { 198 while( iter != root ) 199 { 200 // get pointer on thread 199 201 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 202 203 // increment iterator 204 iter = iter->next; 200 205 201 206 // handle REQ_ACK … … 219 224 process = thread->process; 220 225 226 // release FPU if required 227 if( thread->core->fpu_owner == thread ) thread->core->fpu_owner = NULL; 228 229 // remove thread from scheduler (scheduler lock already taken) 230 uint32_t threads_nr = sched->u_threads_nr; 231 232 assert( (threads_nr != 0) , __FUNCTION__ , "u_threads_nr cannot be 0\n" ); 233 234 sched->u_threads_nr = threads_nr - 1; 235 list_unlink( &thread->sched_list ); 236 if( threads_nr == 1 ) sched->u_last = NULL; 237 238 // delete thread 239 thread_destroy( thread ); 240 221 241 #if DEBUG_SCHED_HANDLE_SIGNALS 222 242 uint32_t cycle = (uint32_t)hal_get_cycles(); 223 243 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 224 printk("\n[DBG] %s : thread %x in proces %x must be deleted / cycle %d\n", 225 __FUNCTION__ , thread , process->pid , cycle ); 226 #endif 227 // release FPU if required 228 if( thread->core->fpu_owner == thread ) thread->core->fpu_owner = NULL; 229 230 // detach thread from parent if attached 231 if( (thread->flags & THREAD_FLAG_DETACHED) == 0 ) 232 thread_child_parent_unlink( thread->parent , XPTR( local_cxy , thread ) ); 233 234 // remove thread from scheduler (scheduler lock already taken) 235 uint32_t threads_nr = sched->u_threads_nr; 236 assert( (threads_nr != 0) , __FUNCTION__ , "u_threads_nr cannot be 0\n" ); 237 sched->u_threads_nr = threads_nr - 1; 238 list_unlink( &thread->sched_list ); 239 if( threads_nr == 1 ) sched->u_last = NULL; 240 241 // delete thread 242 thread_destroy( thread ); 243 244 #if DEBUG_SCHED_HANDLE_SIGNALS 245 cycle = (uint32_t)hal_get_cycles(); 246 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 247 printk("\n[DBG] %s : thread %x in process %x has been deleted / cycle %d\n", 248 __FUNCTION__ , thread , process->pid , cycle ); 244 printk("\n[DBG] %s : thread %x in proces %x (%x) deleted / cycle %d\n", 245 __FUNCTION__ , thread , process->pid , process , cycle ); 249 246 #endif 250 247 // destroy process descriptor if no more threads … … 314 311 { 315 312 313 if( (local_cxy == 0X1) && (core->lid == 1) && ((uint32_t)current == 0xcc000) ) 314 printk("\n@@@@@ cc000 exit at cycle %d\n", (uint32_t)hal_get_cycles() ); 315 316 if( (local_cxy == 0X1) && (core->lid == 1) && ((uint32_t)next == 0xcc000) ) 317 printk("\n@@@@@ cc000 enter at cycle %d\n", (uint32_t)hal_get_cycles() ); 318 316 319 #if DEBUG_SCHED_YIELD 317 320 uint32_t cycle = (uint32_t)hal_get_cycles(); -
trunk/kernel/kern/thread.c
r438 r440 184 184 thread->blocked = THREAD_BLOCKED_GLOBAL; 185 185 186 // reset children list 187 xlist_root_init( XPTR( local_cxy , &thread->children_root ) ); 188 thread->children_nr = 0; 189 190 // reset sched list and brothers list 186 // reset sched list 191 187 list_entry_init( &thread->sched_list ); 192 xlist_entry_init( XPTR( local_cxy , &thread->brothers_list ) );193 188 194 189 // reset thread info … … 238 233 // get process descriptor local copy 239 234 process = process_get_local_copy( pid ); 235 240 236 if( process == NULL ) 241 237 { … … 604 600 /////////////////////////////////////////////////////////////////////////////////////// 605 601 // TODO: check that all memory dynamically allocated during thread execution 606 // has been released, using a cache of mmap and mallocrequests. [AG]602 // has been released, using a cache of mmap requests. [AG] 607 603 /////////////////////////////////////////////////////////////////////////////////////// 608 604 void thread_destroy( thread_t * thread ) … … 619 615 __FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle ); 620 616 #endif 621 622 assert( (thread->children_nr == 0) , __FUNCTION__ , "still attached children" );623 617 624 618 assert( (thread->local_locks == 0) , __FUNCTION__ , "all local locks not released" ); … … 663 657 } // end thread_destroy() 664 658 665 /////////////////////////////////////////////////666 void thread_child_parent_link( xptr_t xp_parent,667 xptr_t xp_child )668 {669 // get extended pointers on children list root670 cxy_t parent_cxy = GET_CXY( xp_parent );671 thread_t * parent_ptr = (thread_t *)GET_PTR( xp_parent );672 xptr_t root = XPTR( parent_cxy , &parent_ptr->children_root );673 674 // get extended pointer on children list entry675 cxy_t child_cxy = GET_CXY( xp_child );676 thread_t * child_ptr = (thread_t *)GET_PTR( xp_child );677 xptr_t entry = XPTR( child_cxy , &child_ptr->brothers_list );678 679 // set the link680 xlist_add_first( root , entry );681 hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , 1 );682 683 } // end thread_child_parent_link()684 685 ///////////////////////////////////////////////////686 void thread_child_parent_unlink( xptr_t xp_parent,687 xptr_t xp_child )688 {689 // get extended pointer on children list lock690 cxy_t parent_cxy = GET_CXY( xp_parent );691 thread_t * parent_ptr = (thread_t *)GET_PTR( xp_parent );692 xptr_t lock = XPTR( parent_cxy , &parent_ptr->children_lock );693 694 // get extended pointer on children list entry695 cxy_t child_cxy = GET_CXY( xp_child );696 thread_t * child_ptr = (thread_t *)GET_PTR( xp_child );697 xptr_t entry = XPTR( child_cxy , &child_ptr->brothers_list );698 699 // get the lock700 remote_spinlock_lock( lock );701 702 // remove the link703 xlist_unlink( entry );704 hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , -1 );705 706 // release the lock707 remote_spinlock_unlock( lock );708 709 } // thread_child_parent_unlink()710 711 659 ////////////////////////////////////////////////// 712 660 inline void thread_set_req_ack( thread_t * target, … … 846 794 847 795 } // end thread_unblock() 796 797 /* 848 798 849 799 //////////////////////////////////// … … 875 825 process_t * target_process; // pointer on target thread process 876 826 877 // get target thread cluster and pointer827 // get target thread pointer and cluster 878 828 target_cxy = GET_CXY( target_xp ); 879 829 target_ptr = GET_PTR( target_xp ); … … 883 833 killer_xp = XPTR( local_cxy , killer_ptr ); 884 834 885 #if DEBUG_THREAD_ KILL835 #if DEBUG_THREAD_DELETE 886 836 uint32_t cycle = (uint32_t)hal_get_cycles; 887 if( DEBUG_THREAD_ KILL< cycle )837 if( DEBUG_THREAD_DELETE < cycle ) 888 838 printk("\n[DBG] %s : thread %x enter for target thread %x / cycle %d\n", 889 839 __FUNCTION__, killer_ptr, target_ptr, cycle ); … … 982 932 else hal_remote_atomic_or( process_state_xp , PROCESS_TERM_KILL ); 983 933 984 #if DEBUG_THREAD_ KILL934 #if DEBUG_THREAD_DELETE 985 935 cycle = (uint32_t)hal_get_cycles; 986 if( DEBUG_THREAD_ KILL< cycle )936 if( DEBUG_THREAD_DELETE < cycle ) 987 937 printk("\n[DBG] %s : thread %x exit for thread %x / main thread / cycle %d\n", 988 938 __FUNCTION__, killer_ptr, target_ptr, cycle ); … … 995 945 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE ); 996 946 997 #if DEBUG_THREAD_ KILL947 #if DEBUG_THREAD_DELETE 998 948 cycle = (uint32_t)hal_get_cycles; 999 if( DEBUG_THREAD_ KILL< cycle )949 if( DEBUG_THREAD_DELETE < cycle ) 1000 950 printk("\n[DBG] %s : thread %x exit for thread %x / not the main thread / cycle %d\n", 1001 951 __FUNCTION__, killer_ptr, target_ptr, cycle ); … … 1005 955 1006 956 } // end thread_kill() 957 958 */ 959 960 ////////////////////////////////////// 961 void thread_delete( xptr_t target_xp, 962 pid_t pid, 963 bool_t is_forced ) 964 { 965 reg_t save_sr; // for critical section 966 bool_t target_join_done; // joining thread arrived first 967 bool_t target_attached; // target thread attached 968 xptr_t killer_xp; // extended pointer on killer thread (this) 969 thread_t * killer_ptr; // pointer on killer thread (this) 970 cxy_t target_cxy; // target thread cluster 971 thread_t * target_ptr; // pointer on target thread 972 xptr_t target_flags_xp; // extended pointer on target thread <flags> 973 uint32_t target_flags; // target thread <flags> value 974 xptr_t target_join_lock_xp; // extended pointer on target thread <join_lock> 975 xptr_t target_join_xp_xp; // extended pointer on target thread <join_xp> 976 trdid_t target_trdid; // target thread identifier 977 ltid_t target_ltid; // target thread local index 978 xptr_t joining_xp; // extended pointer on joining thread 979 thread_t * joining_ptr; // pointer on joining thread 980 cxy_t joining_cxy; // joining thread cluster 981 cxy_t owner_cxy; // process owner cluster 982 983 984 // get target thread pointers, identifiers, and flags 985 target_cxy = GET_CXY( target_xp ); 986 target_ptr = GET_PTR( target_xp ); 987 target_trdid = hal_remote_lw( XPTR( target_cxy , &target_ptr->trdid ) ); 988 target_ltid = LTID_FROM_TRDID( target_trdid ); 989 target_flags_xp = XPTR( target_cxy , &target_ptr->flags ); 990 target_flags = hal_remote_lw( target_flags_xp ); 991 992 // get killer thread pointers 993 killer_ptr = CURRENT_THREAD; 994 killer_xp = XPTR( local_cxy , killer_ptr ); 995 996 #if DEBUG_THREAD_DELETE 997 uint32_t cycle = (uint32_t)hal_get_cycles; 998 if( DEBUG_THREAD_DELETE < cycle ) 999 printk("\n[DBG] %s : killer thread %x enter for target thread %x / cycle %d\n", 1000 __FUNCTION__, killer_ptr, target_ptr, cycle ); 1001 #endif 1002 1003 // target thread cannot be the main thread, because the main thread 1004 // must be deleted by the parent process sys_wait() function 1005 owner_cxy = CXY_FROM_PID( pid ); 1006 assert( ((owner_cxy != target_cxy) || (target_ltid != 0)), __FUNCTION__, 1007 "tharget thread cannot be the main thread\n" ); 1008 1009 // block the target thread 1010 thread_block( target_xp , THREAD_BLOCKED_GLOBAL ); 1011 1012 // get attached from target flag descriptor 1013 target_attached = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_DETACHED) != 0); 1014 1015 // synchronize with the joining thread if the target thread is attached 1016 if( target_attached && (is_forced == false) ) 1017 { 1018 // build extended pointers on target thread join fields 1019 target_join_lock_xp = XPTR( target_cxy , &target_ptr->join_lock ); 1020 target_join_xp_xp = XPTR( target_cxy , &target_ptr->join_xp ); 1021 1022 // enter critical section 1023 hal_disable_irq( &save_sr ); 1024 1025 // take the join_lock in target thread descriptor 1026 remote_spinlock_lock( target_join_lock_xp ); 1027 1028 // get join_done from target thread descriptor 1029 target_join_done = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0); 1030 1031 if( target_join_done ) // joining thread arrived first => unblock the joining thread 1032 { 1033 // get extended pointer on joining thread 1034 joining_xp = (xptr_t)hal_remote_lwd( target_join_xp_xp ); 1035 joining_ptr = GET_PTR( joining_xp ); 1036 joining_cxy = GET_CXY( joining_xp ); 1037 1038 // reset the join_done flag in target thread 1039 hal_remote_atomic_and( target_flags_xp , ~THREAD_FLAG_JOIN_DONE ); 1040 1041 // unblock the joining thread 1042 thread_unblock( joining_xp , THREAD_BLOCKED_JOIN ); 1043 1044 // release the join_lock in target thread descriptor 1045 remote_spinlock_unlock( target_join_lock_xp ); 1046 1047 // restore IRQs 1048 hal_restore_irq( save_sr ); 1049 } 1050 else // this thread arrived first => register flags and deschedule 1051 { 1052 // set the kill_done flag in target thread 1053 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE ); 1054 1055 // block this thread on BLOCKED_JOIN 1056 thread_block( killer_xp , THREAD_BLOCKED_JOIN ); 1057 1058 // set extended pointer on killer thread in target thread 1059 hal_remote_swd( target_join_xp_xp , killer_xp ); 1060 1061 // release the join_lock in target thread descriptor 1062 remote_spinlock_unlock( target_join_lock_xp ); 1063 1064 // deschedule 1065 sched_yield( "killer thread wait joining thread" ); 1066 1067 // restore IRQs 1068 hal_restore_irq( save_sr ); 1069 } 1070 } // end if attached 1071 1072 // set the REQ_DELETE flag in target thread descriptor 1073 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE ); 1074 1075 #if DEBUG_THREAD_DELETE 1076 cycle = (uint32_t)hal_get_cycles; 1077 if( DEBUG_THREAD_DELETE < cycle ) 1078 printk("\n[DBG] %s : killer thread %x exit for target thread %x / cycle %d\n", 1079 __FUNCTION__, killer_ptr, target_ptr, cycle ); 1080 #endif 1081 1082 } // end thread_delete() 1083 1084 1007 1085 1008 1086 /////////////////////// -
trunk/kernel/kern/thread.h
r438 r440 171 171 cxy_t fork_cxy; /*! target cluster for next fork() */ 172 172 173 xlist_entry_t children_root; /*! root of list of attached children */174 uint32_t children_nr; /*! number of attached children threads */175 remote_spinlock_t * children_lock; /*! lock protecting the children list */176 177 xlist_entry_t brothers_list; /*! list of attached threads to same parent */178 179 173 list_entry_t sched_list; /*! member of threads attached to same core */ 180 174 … … 222 216 * in an existing process. It allocates memory for an user thread descriptor in the 223 217 * local cluster, and initializes it from information contained in the arguments. 224 * The CPU context is initialized from scratch. If required by the <attr> argument, 225 * the new thread is attached to the core specified in <attr>. 218 * The CPU context is initialized from scratch. 226 219 * It is registered in the local process descriptor specified by the <pid> argument. 227 * The thread descriptor pointer is returned to allow the parent thread to register it228 * in its children list.229 220 * The THREAD_BLOCKED_GLOBAL bit is set => the thread must be activated to start. 230 221 *************************************************************************************** … … 325 316 326 317 /*************************************************************************************** 327 * This function registers a child thread in the global list of attached328 * children threads of a parent thread.329 * It does NOT take a lock, as this function is always called by the parent thread.330 ***************************************************************************************331 * @ parent_xp : extended pointer on the parent thread descriptor.332 * @ child_xp : extended pointer on the child thread descriptor.333 **************************************************************************************/334 void thread_child_parent_link( xptr_t parent_xp,335 xptr_t child_xp );336 337 /***************************************************************************************338 * This function removes an user thread from the parent thread global list339 * of attached children threads.340 ***************************************************************************************341 * @ parent_xp : extended pointer on the parent thread descriptor.342 * @ child_xp : extended pointer on the child thread descriptor.343 **************************************************************************************/344 void thread_child_parent_unlink( xptr_t parent_xp,345 xptr_t child_xp );346 347 /***************************************************************************************348 318 * This function is used by a "blocker" thread running in the same cluster as a "target" 349 319 * thread to request the scheduler of the target thread to acknowledge that the target … … 386 356 387 357 /*************************************************************************************** 388 * This function is called to handle the four pthread_cancel(), pthread_exit(), 389 * kill() and exit() system calls. It kills a "target" thread identified by the 390 * <thread_xp> argument. The "killer" thread can be the "target" thread, when the 391 * <is_exit> argument is true. The "killer" thread can run in any cluster, 392 * as it uses remote accesses. 393 * If the "target" thread is running in "attached" mode, and the <is_forced> argument 358 * This function is used by the four sys_thread_cancel(), sys_thread_exit(), 359 * sys_kill() and sys_exit() system calls to delete a given thread. 360 * It set the THREAD_BLOCKED_GLOBAL bit and set the the THREAD_FLAG_REQ_DELETE bit 361 * in the thread descriptor identified by the <thread_xp> argument, to ask the scheduler 362 * to asynchronously delete the target thread, at the next scheduling point. 363 * The calling thread can run in any cluster, as it uses remote accesses, but 364 * the target thread cannot be the main thread of the process identified by the <pid>, 365 * because the main thread must be deleted by the parent process argument. 366 * If the target thread is running in "attached" mode, and the <is_forced> argument 394 367 * is false, this function implements the required sychronisation with the joining 395 * thread, blocking the "killer" thread until the pthread_join() syscall is executed. 396 * To delete the target thread, this function sets the THREAD_FLAG_REQ_DELETE bit 397 * and the THREAD BLOCKED_GLOBAL bit in the target thread, and the actual destruction 398 * is asynchronously done by the scheduler at the next scheduling point. 368 * thread, blocking the calling thread until the pthread_join() syscall is executed. 399 369 *************************************************************************************** 400 370 * @ thread_xp : extended pointer on the target thread. 401 * @ is_exit : the killer thread is the target thread itself.402 * @ is_forced : the killingdoes not depends on the attached mode.403 **************************************************************************************/ 404 void thread_ kill( xptr_t thread_xp,405 bool_t is_exit,406 bool_t is_forced );371 * @ pid : process identifier (to get the owner cluster identifier). 372 * @ is_forced : the deletion does not depends on the attached mode. 373 **************************************************************************************/ 374 void thread_delete( xptr_t thread_xp, 375 pid_t pid, 376 bool_t is_forced ); 407 377 408 378 /***************************************************************************************
Note: See TracChangeset
for help on using the changeset viewer.