Changeset 583 for trunk/kernel
- Timestamp:
- Nov 1, 2018, 12:10:42 PM (6 years ago)
- Location:
- trunk/kernel/kern
- Files:
-
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/chdev.c
r581 r583 63 63 switch ( func_type ) 64 64 { 65 case DEV_FUNC_RAM: return "RAM";66 case DEV_FUNC_ROM: return "ROM";67 case DEV_FUNC_FBF: return "FBF";68 case DEV_FUNC_IOB: return "IOB";69 case DEV_FUNC_IOC: return "IOC";70 case DEV_FUNC_MMC: return "MMC";71 case DEV_FUNC_DMA: return "DMA";72 case DEV_FUNC_NIC: return "NIC";73 case DEV_FUNC_TIM: return "TIM";74 case DEV_FUNC_TXT: return "TXT";75 case DEV_FUNC_ICU: return "ICU";76 case DEV_FUNC_PIC: return "PIC";77 default: return "undefined";65 case DEV_FUNC_RAM: return "RAM"; 66 case DEV_FUNC_ROM: return "ROM"; 67 case DEV_FUNC_FBF: return "FBF"; 68 case DEV_FUNC_IOB: return "IOB"; 69 case DEV_FUNC_IOC: return "IOC"; 70 case DEV_FUNC_MMC: return "MMC"; 71 case DEV_FUNC_DMA: return "DMA"; 72 case DEV_FUNC_NIC: return "NIC"; 73 case DEV_FUNC_TIM: return "TIM"; 74 case DEV_FUNC_TXT: return "TXT"; 75 case DEV_FUNC_ICU: return "ICU"; 76 case DEV_FUNC_PIC: return "PIC"; 77 default: return "undefined"; 78 78 } 79 79 } -
trunk/kernel/kern/cluster.c
r582 r583 272 272 //////////////////////////////////////////////////////////////////////////////////// 273 273 274 ///////////////////////////////// 274 /////////////////////////////////////// 275 275 lid_t cluster_select_local_core( void ) 276 276 { … … 680 680 } // end cluster_process_copies_unlink() 681 681 682 /////////////////////////////////////////// 683 void cluster_processes_display( cxy_t cxy ) 682 //////////////////////////////////////////// 683 void cluster_processes_display( cxy_t cxy, 684 bool_t owned ) 684 685 { 685 686 xptr_t root_xp; … … 687 688 xptr_t iter_xp; 688 689 xptr_t process_xp; 690 process_t * process_ptr; 691 cxy_t process_cxy; 692 pid_t pid; 689 693 cxy_t txt0_cxy; 690 694 chdev_t * txt0_ptr; … … 692 696 xptr_t txt0_lock_xp; 693 697 694 assert( (cluster_is_undefined( cxy ) == false), 695 "illegal cluster index" ); 698 assert( (cluster_is_undefined( cxy ) == false), "illegal cluster index" ); 696 699 697 700 // get extended pointer on root and lock for local process list in cluster … … 720 723 XLIST_FOREACH( root_xp , iter_xp ) 721 724 { 722 process_xp = XLIST_ELEMENT( iter_xp , process_t , local_list ); 723 process_display( process_xp ); 725 process_xp = XLIST_ELEMENT( iter_xp , process_t , local_list ); 726 process_ptr = GET_PTR( process_xp ); 727 process_cxy = GET_CXY( process_xp ); 728 729 // get process PID 730 pid = hal_remote_l32( XPTR( process_cxy , &process_ptr->pid ) ); 731 732 if( owned ) // display only user & owned processes 733 { 734 if( (CXY_FROM_PID( pid ) == cxy) && (LPID_FROM_PID( pid ) != 0) ) 735 { 736 process_display( process_xp ); 737 } 738 } 739 else // display all local processes 740 { 741 process_display( process_xp ); 742 } 724 743 } 725 744 -
trunk/kernel/kern/cluster.h
r582 r583 309 309 ****************************************************************************************** 310 310 * @ cxy : cluster identifier. 311 *****************************************************************************************/ 312 void cluster_processes_display( cxy_t cxy ); 311 * @ owned : only owned process if non zero. 312 *****************************************************************************************/ 313 void cluster_processes_display( cxy_t cxy, 314 bool_t owned ); 313 315 314 316 /****************************************************************************************** -
trunk/kernel/kern/do_syscall.c
r527 r583 35 35 // This ƒonction should never be called... 36 36 /////////////////////////////////////////////////////////////////////////////////////// 37 staticint sys_undefined( void )37 int sys_undefined( void ) 38 38 { 39 39 assert( false , "undefined system call" ); … … 43 43 /////////////////////////////////////////////////////////////////////////////////////// 44 44 // This array of pointers define the kernel functions implementing the syscalls. 45 // It must be kept consistent with the enum in "s hared_syscalls.h" file.45 // It must be kept consistent with the enum in "syscalls_numbers.h" file. 46 46 /////////////////////////////////////////////////////////////////////////////////////// 47 47 … … 98 98 sys_get_cycle, // 42 99 99 sys_display, // 43 100 sys_ undefined,// 44100 sys_place_fork, // 44 101 101 sys_thread_sleep, // 45 102 102 sys_thread_wakeup, // 46 … … 106 106 }; 107 107 108 //////////////////////////////////// 108 //////////////////////////////////////////// 109 109 const char * syscall_str( syscalls_t index ) 110 110 { 111 switch (index) { 111 switch (index) 112 { 112 113 case SYS_THREAD_EXIT: return "THREAD_EXIT"; // 0 113 114 case SYS_THREAD_YIELD: return "THREAD_YIELD"; // 1 … … 158 159 case SYS_GET_CYCLE: return "GET_CYCLE"; // 42 159 160 case SYS_DISPLAY: return "DISPLAY"; // 43 161 case SYS_PLACE_FORK: return "PLACE_FORK"; // 44 160 162 case SYS_THREAD_SLEEP: return "THREAD_SLEEP"; // 45 161 163 case SYS_THREAD_WAKEUP: return "THREAD_WAKEUP"; // 46 … … 163 165 case SYS_FG: return "FG"; // 48 164 166 case SYS_IS_FG: return "IS_FG"; // 49 165 166 case SYS_UNDEFINED:167 167 default: return "undefined"; 168 }168 } 169 169 } 170 170 -
trunk/kernel/kern/dqdt.c
r582 r583 28 28 #include <hal_atomic.h> 29 29 #include <hal_remote.h> 30 #include <thread.h> 30 31 #include <printk.h> 31 32 #include <chdev.h> … … 54 55 55 56 // display node content 56 nolock_printk("- level %d in cluster %x (node %x) : threads = %x / pages = %x\n",57 node.level, GET_CXY( node_xp ), GET_PTR( node_xp ), node.threads, node.pages );57 nolock_printk("- level %d / cluster %x : threads = %x / pages = %x / clusters %d / cores %d\n", 58 node.level, GET_CXY( node_xp ), node.threads, node.pages, node.clusters, node.cores ); 58 59 59 60 // recursive call on children if node is not terminal … … 102 103 // This static function initializes recursively, from top to bottom, the quad-tree 103 104 // infrastructure. The DQDT nodes are allocated as global variables in each local 104 // cluster manager. At each level in the quad-tree, this function initializes the 105 // parent DQDT node in the cluster identified by the <cxy> and <level> arguments. 106 // A each level, it selects in each child macro-cluster the precise cluster where 107 // will be placed the the subtree root node, and call recursively itself to 108 // initialize the child node in this cluster. 105 // cluster manager. At each level in the quad-tree, this function initializes the 106 // node identified by the <cxy> and <level> arguments, selects in each child 107 // macro-cluster the precise cluster where will be placed the subtree root node, 108 // and call recursively itself to initialize the child node in the selected cluster. 109 109 /////////////////////////////////////////////////////////////////////////////////////// 110 110 // @ node cxy : cluster containing the node to initialize … … 124 124 uint32_t node_base_y; // associated macro_cluster y coordinate 125 125 uint32_t half; // associated macro-cluster half size 126 127 // get remote node cluster coordinates 126 uint32_t cores; // number of cores in macro cluster 127 uint32_t clusters; // number of clusters in macro cluster 128 129 // get node cluster coordinates 128 130 node_x = HAL_X_FROM_CXY( node_cxy ); 129 131 node_y = HAL_Y_FROM_CXY( node_cxy ); … … 140 142 cluster_t * cluster = LOCAL_CLUSTER; 141 143 142 // get local pointer on remote node to be initialized 143 dqdt_node_t * node = &cluster->dqdt_tbl[level]; 144 // build local and extended pointer on node to be initialized 145 dqdt_node_t * node_ptr = &cluster->dqdt_tbl[level]; 146 xptr_t node_xp = XPTR( node_cxy , node_ptr ); 144 147 145 148 #if DEBUG_DQDT_INIT 146 149 printk("\n[DBG] %s : cxy(%d,%d) / level %d / mask %x / half %d / ptr %x\n", 147 __FUNCTION__, node_x, node_y, level, mask, half, node );150 __FUNCTION__, node_x, node_y, level, mask, half, node_ptr ); 148 151 #endif 149 152 150 153 // make remote node default initialisation 151 hal_remote_memset( XPTR( node_cxy , node ) , 0 , sizeof( dqdt_node_t ) ); 154 hal_remote_memset( node_xp , 0 , sizeof( dqdt_node_t ) ); 155 156 // initialize <parent> field 157 hal_remote_s64( XPTR( node_cxy , &node_ptr->parent ) , parent_xp ); 158 159 // initialize <level> field 160 hal_remote_s32( XPTR( node_cxy , &node_ptr->level ) , level ); 152 161 153 162 // recursive initialisation 154 if( level == 0 ) // terminal case 163 if( level == 0 ) // terminal case : cluster 155 164 { 156 // update parent field 157 hal_remote_s64( XPTR( node_cxy , &node->parent ) , parent_xp ); 165 // initialize <clusters> field in node 166 hal_remote_s32( XPTR( node_cxy , &node_ptr->clusters ) , 1 ); 167 168 // initialize <cores> field in node 169 cores = hal_remote_l32( XPTR ( node_cxy , &cluster->cores_nr ) ); 170 hal_remote_s32( XPTR( node_cxy , &node_ptr->cores ) , cores ); 158 171 } 159 else // non terminal 172 else // non terminal : macro-cluster 160 173 { 161 uint32_t x; 162 uint32_t y; 163 cxy_t cxy; 164 bool_t found; 165 166 // update <level> in remote node 167 hal_remote_s32( XPTR( node_cxy , &node->level ) , level ); 168 169 // try to find a valid cluster in child[0][0] macro-cluster 174 bool_t found; 175 uint32_t x; 176 uint32_t y; 177 cxy_t child_cxy; 178 xptr_t child_xp; 179 dqdt_node_t * child_ptr = &cluster->dqdt_tbl[level-1]; 180 181 // search an active cluster in child[0][0] macro-cluster 170 182 found = false; 171 183 for( x = node_base_x ; … … 175 187 (y < (node_base_y + half)) && (found == false) ; y++ ) 176 188 { 177 cxy = HAL_CXY_FROM_XY( x , y ); 178 if( cluster_is_active( cxy ) ) 189 child_cxy = HAL_CXY_FROM_XY( x , y ); 190 191 if( cluster_is_active( child_cxy ) ) 179 192 { 180 // update <child[0][0]> in remote inode 181 hal_remote_s64( XPTR( node_cxy , &node->children[0][0] ), 182 XPTR( cxy , &cluster->dqdt_tbl[level - 1] ) ); 183 184 // udate <arity> in remote node 185 hal_remote_atomic_add( XPTR( node_cxy , &node->arity ) , 1 ); 186 187 // initialize recursively child[0][0] node 188 dqdt_recursive_build( cxy , level-1 , XPTR( node_cxy , node ) ); 193 // initialize recursively selected child[0][0] node 194 dqdt_recursive_build( child_cxy , level-1 , node_xp ); 195 196 // build extended pointer on child[0][0] node 197 child_xp = XPTR( child_cxy , child_ptr ); 198 199 // update <cores> field in node 200 cores = hal_remote_l32( XPTR ( child_cxy , &child_ptr->cores ) ); 201 hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->cores ) , cores ); 202 203 // update <clusters> field in node 204 clusters = hal_remote_l32( XPTR ( child_cxy , &child_ptr->clusters ) ); 205 hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->clusters ) , clusters ); 206 207 // update <child[0][0]> field in node 208 hal_remote_s64( XPTR( node_cxy , &node_ptr->children[0][0] ), child_xp ); 209 210 // udate <arity> field in node 211 hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->arity ) , 1 ); 189 212 190 213 // exit loops … … 194 217 } 195 218 196 // try to find a validcluster in child[0][1] macro-cluster219 // search an active cluster in child[0][1] macro-cluster 197 220 found = false; 198 221 for( x = node_base_x ; … … 200 223 { 201 224 for( y = (node_base_y + half) ; 202 (y < (node_base_y + (half<< 2))) && (found == false) ; y++ )225 (y < (node_base_y + (half<<1))) && (found == false) ; y++ ) 203 226 { 204 cxy = HAL_CXY_FROM_XY( x , y ); 205 if( cluster_is_active( cxy ) ) 227 child_cxy = HAL_CXY_FROM_XY( x , y ); 228 229 if( cluster_is_active( child_cxy ) ) 206 230 { 207 // update <child[0][1]> in remote inode 208 hal_remote_s64( XPTR( node_cxy , &node->children[0][1] ), 209 XPTR( cxy , &cluster->dqdt_tbl[level - 1] ) ); 210 211 // udate <arity> in remote node 212 hal_remote_atomic_add( XPTR( node_cxy , &node->arity ) , 1 ); 213 214 // initialize recursively child[0][1] node 215 dqdt_recursive_build( cxy , level-1 , XPTR( node_cxy , node ) ); 231 // initialize recursively selected child[0][1] node 232 dqdt_recursive_build( child_cxy , level-1 , node_xp ); 233 234 // build extended pointer on child[0][1] node 235 child_xp = XPTR( child_cxy , child_ptr ); 236 237 // update <cores> field in node 238 cores = hal_remote_l32( XPTR ( child_cxy , &child_ptr->cores ) ); 239 hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->cores ) , cores ); 240 241 // update <clusters> field in node 242 clusters = hal_remote_l32( XPTR ( child_cxy , &child_ptr->clusters ) ); 243 hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->clusters ) , clusters ); 244 245 // update <child[0][1]> field in node 246 hal_remote_s64( XPTR( node_cxy , &node_ptr->children[0][1] ), child_xp ); 247 248 // udate <arity> field in node 249 hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->arity ) , 1 ); 216 250 217 251 // exit loops … … 220 254 } 221 255 } 222 223 // try to find a validcluster in child[1][0] macro-cluster256 257 // search an active cluster in child[1][0] macro-cluster 224 258 found = false; 225 for( x = (node_base_x + 259 for( x = (node_base_x +half) ; 226 260 (x < (node_base_x + (half<<1))) && (found == false) ; x++ ) 227 261 { … … 229 263 (y < (node_base_y + half)) && (found == false) ; y++ ) 230 264 { 231 cxy = HAL_CXY_FROM_XY( x , y ); 232 if( cluster_is_active( cxy ) ) 265 child_cxy = HAL_CXY_FROM_XY( x , y ); 266 267 if( cluster_is_active( child_cxy ) ) 233 268 { 234 // update <child[1][0]> in remote inode 235 hal_remote_s64( XPTR( node_cxy , &node->children[1][0] ), 236 XPTR( cxy , &cluster->dqdt_tbl[level - 1] ) ); 237 238 // udate <arity> in remote node 239 hal_remote_atomic_add( XPTR( node_cxy , &node->arity ) , 1 ); 240 241 // initialize recursively child[1][0] node 242 dqdt_recursive_build( cxy , level-1 , XPTR( node_cxy , node ) ); 269 // initialize recursively selected child[1][0] node 270 dqdt_recursive_build( child_cxy , level-1 , node_xp ); 271 272 // build extended pointer on child[1][0] node 273 child_xp = XPTR( child_cxy , child_ptr ); 274 275 // update <cores> field in node 276 cores = hal_remote_l32( XPTR ( child_cxy , &child_ptr->cores ) ); 277 hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->cores ) , cores ); 278 279 // update <clusters> field in node 280 clusters = hal_remote_l32( XPTR ( child_cxy , &child_ptr->clusters ) ); 281 hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->clusters ) , clusters ); 282 283 // update <child[1][0]> field in node 284 hal_remote_s64( XPTR( node_cxy , &node_ptr->children[1][0] ), child_xp ); 285 286 // udate <arity> field in node 287 hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->arity ) , 1 ); 243 288 244 289 // exit loops … … 248 293 } 249 294 250 // try to find a validcluster in child[1][1] macro-cluster295 // search an active cluster in child[1][1] macro-cluster 251 296 found = false; 252 297 for( x = (node_base_x + half) ; … … 254 299 { 255 300 for( y = (node_base_y + half) ; 256 (y < (node_base_y + (half<< 2))) && (found == false) ; y++ )301 (y < (node_base_y + (half<<1))) && (found == false) ; y++ ) 257 302 { 258 cxy = HAL_CXY_FROM_XY( x , y ); 259 if( cluster_is_active( cxy ) ) 303 child_cxy = HAL_CXY_FROM_XY( x , y ); 304 305 if( cluster_is_active( child_cxy ) ) 260 306 { 261 // update <child[1][1]> in remote inode 262 hal_remote_s64( XPTR( node_cxy , &node->children[1][1] ), 263 XPTR( cxy , &cluster->dqdt_tbl[level - 1] ) ); 264 265 // udate <arity> in remote node 266 hal_remote_atomic_add( XPTR( node_cxy , &node->arity ) , 1 ); 267 268 // initialize recursively child[1][1] node 269 dqdt_recursive_build( cxy , level-1 , XPTR( node_cxy , node ) ); 307 // initialize recursively selected child[1][1] node 308 dqdt_recursive_build( child_cxy , level-1 , node_xp ); 309 310 // build extended pointer on child[1][1] node 311 child_xp = XPTR( child_cxy , child_ptr ); 312 313 // update <cores> field in node 314 cores = hal_remote_l32( XPTR ( child_cxy , &child_ptr->cores ) ); 315 hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->cores ) , cores ); 316 317 // update <clusters> field in node 318 clusters = hal_remote_l32( XPTR ( child_cxy , &child_ptr->clusters ) ); 319 hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->clusters ) , clusters ); 320 321 // update <child[1][1]> field in node 322 hal_remote_s64( XPTR( node_cxy , &node_ptr->children[1][1] ), child_xp ); 323 324 // udate <arity> field in node 325 hal_remote_atomic_add( XPTR( node_cxy , &node_ptr->arity ) , 1 ); 270 326 271 327 // exit loops … … 311 367 } // end dqdt_init() 312 368 369 313 370 /////////////////////////////////////////////////////////////////////////// 314 // This recursive function is called by the dqdt_update_threads() function. 315 // It traverses the quad tree from clusters to root. 316 /////////////////////////////////////////////////////////////////////////// 317 // @ node : extended pointer on current node 318 // @ increment : number of threads variation 319 /////////////////////////////////////////////////////////////////////////// 320 static void dqdt_propagate_threads( xptr_t node, 321 int32_t increment ) 322 { 323 // get current node cluster identifier and local pointer 324 cxy_t cxy = GET_CXY( node ); 325 dqdt_node_t * ptr = GET_PTR( node ); 326 327 // update current node threads number 328 hal_remote_atomic_add( XPTR( cxy , &ptr->threads ) , increment ); 329 330 // get extended pointer on parent node 331 xptr_t parent = (xptr_t)hal_remote_l64( XPTR( cxy , &ptr->parent ) ); 332 333 // propagate if required 334 if ( parent != XPTR_NULL ) dqdt_propagate_threads( parent, increment ); 335 } 336 337 /////////////////////////////////////////////////////////////////////////// 338 // This recursive function is called by the dqdt_update_pages() function. 371 // This recursive function is called by both the dqdt_increment_pages() 372 // and by the dqdt_decrement_pages() functions. 339 373 // It traverses the quad tree from clusters to root. 340 374 /////////////////////////////////////////////////////////////////////////// … … 349 383 dqdt_node_t * ptr = GET_PTR( node ); 350 384 351 // update current node threads number385 // update current node pages number 352 386 hal_remote_atomic_add( XPTR( cxy , &ptr->pages ) , increment ); 353 387 … … 359 393 } 360 394 361 /////////////////////////////////////////// //362 void dqdt_ update_threads( int32_t increment)395 /////////////////////////////////////////// 396 void dqdt_increment_pages( uint32_t order ) 363 397 { 364 398 cluster_t * cluster = LOCAL_CLUSTER; … … 366 400 367 401 // update DQDT node level 0 368 hal_atomic_add( &node-> threads , increment);402 hal_atomic_add( &node->pages , (1 << order) ); 369 403 370 404 // propagate to DQDT upper levels 371 if( node->parent != XPTR_NULL ) dqdt_propagate_threads( node->parent , increment ); 405 if( node->parent != XPTR_NULL ) dqdt_propagate_pages( node->parent , (1 << order) ); 406 407 #if DEBUG_DQDT_UPDATE_PAGES 408 uint32_t cycle = hal_get_cycles(); 409 if( cycle > DEBUG_DQDT_UPDATE_PAGES ) 410 printk("\n[DBG] %s : thread %x in process %x / %x pages in cluster %x / cycle %d\n", 411 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, node->pages, local_cxy, cycle ); 412 #endif 413 372 414 } 373 415 374 416 /////////////////////////////////////////// 375 void dqdt_ update_pages( int32_t increment)417 void dqdt_decrement_pages( uint32_t order ) 376 418 { 377 419 cluster_t * cluster = LOCAL_CLUSTER; … … 379 421 380 422 // update DQDT node level 0 381 hal_atomic_add( &node->pages , increment);423 hal_atomic_add( &node->pages , -(1 << order) ); 382 424 383 425 // propagate to DQDT upper levels 384 if( node->parent != XPTR_NULL ) dqdt_propagate_pages( node->parent , increment ); 385 } 386 387 //////////////////////////////////////////////////////////////////////////////// 426 if( node->parent != XPTR_NULL ) dqdt_propagate_pages( node->parent , -(1 << order) ); 427 428 #if DEBUG_DQDT_UPDATE_PAGES 429 uint32_t cycle = hal_get_cycles(); 430 if( cycle > DEBUG_DQDT_UPDATE_PAGES ) 431 printk("\n[DBG] %s : thread %x in process %x / %x pages in cluster %x / cycle %d\n", 432 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, node->pages, local_cxy, cycle ); 433 #endif 434 435 } 436 437 438 439 /////////////////////////////////////////////////////////////////////////// 440 // This recursive function is called by both the dqdt_increment_threads() 441 // and by the dqdt_decrement_threads functions. 442 // It traverses the quad tree from clusters to root. 443 /////////////////////////////////////////////////////////////////////////// 444 // @ node : extended pointer on current node 445 // @ increment : number of pages variation 446 /////////////////////////////////////////////////////////////////////////// 447 static void dqdt_propagate_threads( xptr_t node, 448 int32_t increment ) 449 { 450 // get current node cluster identifier and local pointer 451 cxy_t cxy = GET_CXY( node ); 452 dqdt_node_t * ptr = GET_PTR( node ); 453 454 // update current node threads number 455 hal_remote_atomic_add( XPTR( cxy , &ptr->threads ) , increment ); 456 457 // get extended pointer on parent node 458 xptr_t parent = (xptr_t)hal_remote_l64( XPTR( cxy , &ptr->parent ) ); 459 460 // propagate if required 461 if ( parent != XPTR_NULL ) dqdt_propagate_threads( parent, increment ); 462 } 463 464 /////////////////////////////////// 465 void dqdt_increment_threads( void ) 466 { 467 cluster_t * cluster = LOCAL_CLUSTER; 468 dqdt_node_t * node = &cluster->dqdt_tbl[0]; 469 470 // update DQDT node level 0 471 hal_atomic_add( &node->threads , 1 ); 472 473 // propagate to DQDT upper levels 474 if( node->parent != XPTR_NULL ) dqdt_propagate_threads( node->parent , 1 ); 475 476 #if DEBUG_DQDT_UPDATE_THREADS 477 uint32_t cycle = hal_get_cycles(); 478 if( cycle > DEBUG_DQDT_UPDATE_THREADS ) 479 printk("\n[DBG] %s : thread %x in process %x / %d threads in cluster %x / cycle %d\n", 480 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, node->threads, local_cxy, cycle ); 481 #endif 482 483 } 484 485 /////////////////////////////////// 486 void dqdt_decrement_threads( void ) 487 { 488 cluster_t * cluster = LOCAL_CLUSTER; 489 dqdt_node_t * node = &cluster->dqdt_tbl[0]; 490 491 // update DQDT node level 0 492 hal_atomic_add( &node->threads , -1 ); 493 494 // propagate to DQDT upper levels 495 if( node->parent != XPTR_NULL ) dqdt_propagate_threads( node->parent , -1 ); 496 497 #if DEBUG_DQDT_UPDATE_THREADS 498 uint32_t cycle = hal_get_cycles(); 499 if( cycle > DEBUG_DQDT_UPDATE_THREADS ) 500 printk("\n[DBG] %s : thread %x in process %x / %d threads in cluster %x / cycle %d\n", 501 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, node->threads, local_cxy, cycle ); 502 #endif 503 504 } 505 506 507 ///////////////////////////////////////////////////////////////////////////////////// 388 508 // This recursive function is called by both the dqdt_get_cluster_for_process() 389 // and by the dqdt_get_cluster_for_memory() functions to select the cluster 390 // with smallest number of thread, or smallest number of allocated pages.509 // and by the dqdt_get_cluster_for_memory() functions to select the cluster with the 510 // smallest number of threads per core, or the smallest number of pages per cluster. 391 511 // It traverses the quad tree from root to clusters. 392 /////////////////////////////////////////////////////////////////////////////// 512 ///////////////////////////////////////////////////////////////////////////////////// 393 513 static cxy_t dqdt_select_cluster( xptr_t node, 394 514 bool_t for_memory ) … … 422 542 cxy_t cxy = GET_CXY( child_xp ); 423 543 dqdt_node_t * ptr = GET_PTR( child_xp ); 424 if( for_memory ) load = hal_remote_l32( XPTR( cxy , &ptr->pages ) ); 425 else load = hal_remote_l32( XPTR( cxy , &ptr->threads ) ); 426 if( load < load_min ) 544 545 // compute average load for each child 546 if( for_memory ) 547 { 548 load = hal_remote_l32( XPTR( cxy , &ptr->pages ) ) / 549 hal_remote_l32( XPTR( cxy , &ptr->clusters ) ); 550 } 551 else 552 { 553 load = hal_remote_l32( XPTR( cxy , &ptr->threads ) ) / 554 hal_remote_l32( XPTR( cxy , &ptr->cores ) ); 555 } 556 557 // select children with smallest load 558 if( load <= load_min ) 427 559 { 428 560 load_min = load; … … 436 568 // select the child with the lowest load 437 569 return dqdt_select_cluster( node_copy.children[select_x][select_y], for_memory ); 438 } 570 571 } // end dqdt_select_cluster() 572 439 573 440 574 ////////////////////////////////////////// … … 442 576 { 443 577 // call recursive function 444 return dqdt_select_cluster( LOCAL_CLUSTER->dqdt_root_xp , false ); 578 cxy_t cxy = dqdt_select_cluster( LOCAL_CLUSTER->dqdt_root_xp , false ); 579 580 #if DEBUG_DQDT_SELECT_FOR_PROCESS 581 uint32_t cycle = hal_get_cycles(); 582 if( cycle > DEBUG_DQDT_SELECT_FOR_PROCESS ) 583 printk("\n[DBG] %s : thread %x in process %x select cluster %x / cycle %d\n", 584 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, cxy, cycle ); 585 #endif 586 587 return cxy; 445 588 } 446 589 … … 449 592 { 450 593 // call recursive function 451 return dqdt_select_cluster( LOCAL_CLUSTER->dqdt_root_xp , true ); 452 } 453 594 cxy_t cxy = dqdt_select_cluster( LOCAL_CLUSTER->dqdt_root_xp , true ); 595 596 #if DEBUG_DQDT_SELECT_FOR_MEMORY 597 uint32_t cycle = hal_get_cycles(); 598 if( cycle > DEBUG_DQDT_SELECT_FOR_MEMORY ) 599 printk("\n[DBG] %s : thread %x in process %x select cluster %x / cycle %d\n", 600 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, cxy, cycle ); 601 #endif 602 603 return cxy; 604 } 605 -
trunk/kernel/kern/dqdt.h
r582 r583 72 72 typedef struct dqdt_node_s 73 73 { 74 uint32_t level; // node level 75 uint32_t arity; // actual children number in this node 76 uint32_t threads; // current number of threads in subtree 77 uint32_t pages; // current number of pages in subtree 78 xptr_t parent; // extended pointer on parent node 79 xptr_t children[2][2]; // extended pointers on children nodes 74 uint32_t level; /*! node level */ 75 uint32_t arity; /*! actual children number in this node */ 76 uint32_t threads; /*! current number of threads in macro-cluster */ 77 uint32_t pages; /*! current number of pages in macro-cluster */ 78 uint32_t cores; /*! number of active cores in macro cluster */ 79 uint32_t clusters; /*! number of active cluster in macro cluster */ 80 xptr_t parent; /*! extended pointer on parent node */ 81 xptr_t children[2][2]; /*! extended pointers on children nodes */ 80 82 } 81 83 dqdt_node_t; … … 95 97 96 98 /**************************************************************************************** 97 * This local function updates the total number of threads in level 0 DQDT node, 98 * and propagates the variation to the DQDT upper levels. 99 * It should be called on each thread creation or destruction. 100 **************************************************************************************** 101 * @ increment : increment (can be positive or negative) 99 * These local function update the total number of threads in level 0 DQDT node, 100 * and immediately propagates the variation to the DQDT upper levels. 101 * They are called on each thread creation or destruction. 102 102 ***************************************************************************************/ 103 void dqdt_update_threads( int32_t increment ); 103 void dqdt_increment_threads( void ); 104 void dqdt_decrement_threads( void ); 104 105 105 106 /**************************************************************************************** 106 107 * This local function updates the total number of pages in level 0 DQDT node, 107 * and propagates the variation to the DQDT upper levels.108 * It should be calledon each physical memory page allocation or release.108 * and immediately propagates the variation to the DQDT upper levels. 109 * They are called by PPM on each physical memory page allocation or release. 109 110 **************************************************************************************** 110 * @ increment : increment (can be positive or negative)111 * @ order : ln2( number of small pages ) 111 112 ***************************************************************************************/ 112 void dqdt_update_pages( int32_t increment ); 113 void dqdt_increment_pages( uint32_t order ); 114 void dqdt_decrement_pages( uint32_t order ); 113 115 114 116 /**************************************************************************************** -
trunk/kernel/kern/kernel_init.c
r582 r583 174 174 "VFS_FILE", // 32 175 175 "VMM_VSL", // 33 176 "VMM_GPT", // 34 176 177 }; 177 178 … … 967 968 968 969 #if DEBUG_KERNEL_INIT 969 //if( (core_lid == 0) & (local_cxy == 0) )970 if( (core_lid == 0) & (local_cxy == 0) ) 970 971 printk("\n[DBG] %s : exit barrier 0 : TXT0 initialized / sr %x / cycle %d\n", 971 972 __FUNCTION__, (uint32_t)hal_get_sr(), (uint32_t)hal_get_cycles() ); -
trunk/kernel/kern/printk.c
r564 r583 385 385 386 386 //////////////////////////////////// 387 void panic( const char * file_name, 388 const char * function_name, 387 void panic( const char * function_name, 389 388 uint32_t line, 390 389 cycle_t cycle, … … 407 406 408 407 // print generic infos 409 nolock_printk( 410 "\n[PANIC] in %s: line %d | function %s | cycle %d\n" 411 "core[%x,%d] | thread %x (%x) in process %x (%x)\n", 412 file_name, line, function_name, (uint32_t) cycle, 413 local_cxy, current->core->lid, 414 current->trdid, current, 415 current->process->pid, current->process ); 408 nolock_printk("\n[PANIC] in %s: line %d | cycle %d\n" 409 "core[%x,%d] | thread %x (%x) | process %x (%x)\n", 410 function_name, line, (uint32_t)cycle, 411 local_cxy, current->core->lid, 412 current->trdid, current, 413 current->process->pid, current->process ); 416 414 417 415 // call kernel_printf to print format -
trunk/kernel/kern/printk.h
r564 r583 98 98 * See assert macro documentation for information about printed information. 99 99 *********************************************************************************/ 100 void panic( const char * file_name, 101 const char * function_name, 100 void panic( const char * function_name, 102 101 uint32_t line, 103 102 cycle_t cycle, … … 110 109 * Actually used to debug the kernel. 111 110 * 112 * Information printed by assert: 113 * Current running thread: 114 * - thread descriptior adress 115 * - thread id (trdid) 116 * 117 * Current Process: 118 * - Process descriptor adress 119 * - Process id (pid) 120 * 121 * Current Core: 122 * - local cluster position (local_cxy) 123 * - local core id (lid) 124 * 125 * File name (__FILE__) and were the assert is invoked. 126 * And the assert message. 127 * 128 * Cycle: before the assert branchment. 129 * Note: cycle may change due to compiler optimisation. 130 * 131 * Exemple: 132 * assert( my_ptr != NULL, "my_ptr should not be NULL") 111 * Extra information printed by assert: 112 * - Current thread, process, and core 113 * - Function name / line number in file / cycle 133 114 ********************************************************************************** 134 115 * @ condition : condition that must be true. … … 141 122 if ( ( expr ) == false ) \ 142 123 { \ 143 panic( __FILE__, __FUNCTION__, \ 144 __line_at_expansion, __assert_cycle, \ 124 panic( __FUNCTION__, \ 125 __line_at_expansion, \ 126 __assert_cycle, \ 145 127 ( format ), ##__VA_ARGS__ ); \ 146 128 } \ -
trunk/kernel/kern/process.c
r581 r583 68 68 ////////////////////////////////////////////////////////////////////////////////////////// 69 69 70 /////////////////////////// 70 ///////////////////////////////// 71 71 process_t * process_alloc( void ) 72 72 { … … 463 463 } // end process_destroy() 464 464 465 ///////////////////////////////////////////////// 465 /////////////////////////////////////////////////////////////////// 466 466 const char * process_action_str( process_sigactions_t action_type ) 467 467 { 468 switch ( action_type ) { 469 case BLOCK_ALL_THREADS: return "BLOCK"; 470 case UNBLOCK_ALL_THREADS: return "UNBLOCK"; 471 case DELETE_ALL_THREADS: return "DELETE"; 472 default: return "undefined"; 473 } 468 switch ( action_type ) 469 { 470 case BLOCK_ALL_THREADS: return "BLOCK"; 471 case UNBLOCK_ALL_THREADS: return "UNBLOCK"; 472 case DELETE_ALL_THREADS: return "DELETE"; 473 default: return "undefined"; 474 } 474 475 } 475 476 … … 499 500 remote_nr = 0; 500 501 501 // check calling thread can yield 502 assert( (client->busylocks == 0), 503 "cannot yield : busylocks = %d\n", client->busylocks ); 502 // check calling thread can yield 503 thread_assert_can_yield( client , __FUNCTION__ ); 504 504 505 505 #if DEBUG_PROCESS_SIGACTION 506 506 uint32_t cycle = (uint32_t)hal_get_cycles(); 507 507 if( DEBUG_PROCESS_SIGACTION < cycle ) 508 printk("\n[DBG] %s : thread %x in process %xenter to %s process %x / cycle %d\n",509 __FUNCTION__ , client-> trdid, client->process->pid,508 printk("\n[DBG] %s : thread[%x,%x] enter to %s process %x / cycle %d\n", 509 __FUNCTION__ , client->process->pid, client->trdid, 510 510 process_action_str( type ) , pid , cycle ); 511 511 #endif … … 522 522 lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] ); 523 523 524 525 526 527 524 // check action type 525 assert( ((type == DELETE_ALL_THREADS ) || 526 (type == BLOCK_ALL_THREADS ) || 527 (type == UNBLOCK_ALL_THREADS )), "illegal action type" ); 528 528 529 530 529 // The client thread send parallel RPCs to all remote clusters containing 531 530 // target process copies, wait all responses, and then handles directly … … 576 575 #if DEBUG_PROCESS_SIGACTION 577 576 if( DEBUG_PROCESS_SIGACTION < cycle ) 578 printk("\n[DBG] %s : thread %x in process %x handles remote process %x in cluster%x\n",579 __FUNCTION__, client-> trdid, client->process->pid, pid , process_cxy);577 printk("\n[DBG] %s : thread[%x,%x] send RPC to cluster %x for process %x\n", 578 __FUNCTION__, client->process->pid, client->trdid, process_cxy, pid ); 580 579 #endif 581 580 // call RPC in target cluster … … 608 607 #if DEBUG_PROCESS_SIGACTION 609 608 if( DEBUG_PROCESS_SIGACTION < cycle ) 610 printk("\n[DBG] %s : thread %x in process %xhandles local process %x in cluster %x\n",611 __FUNCTION__, client-> trdid, client->process->pid, pid , local_cxy );609 printk("\n[DBG] %s : thread[%x,%x] handles local process %x in cluster %x\n", 610 __FUNCTION__, client->process->pid, client->trdid, pid , local_cxy ); 612 611 #endif 613 612 if (type == DELETE_ALL_THREADS ) process_delete_threads ( local , client_xp ); 614 else if(type == BLOCK_ALL_THREADS ) process_block_threads ( local , client_xp);613 else if(type == BLOCK_ALL_THREADS ) process_block_threads ( local ); 615 614 else if(type == UNBLOCK_ALL_THREADS ) process_unblock_threads( local ); 616 615 } … … 619 618 cycle = (uint32_t)hal_get_cycles(); 620 619 if( DEBUG_PROCESS_SIGACTION < cycle ) 621 printk("\n[DBG] %s : thread %x in process %xexit after %s process %x / cycle %d\n",622 __FUNCTION__, client-> trdid, client->process->pid,620 printk("\n[DBG] %s : thread[%x,%x] exit after %s process %x / cycle %d\n", 621 __FUNCTION__, client->process->pid, client->trdid, 623 622 process_action_str( type ), pid, cycle ); 624 623 #endif … … 627 626 628 627 ///////////////////////////////////////////////// 629 void process_block_threads( process_t * process, 630 xptr_t client_xp ) 628 void process_block_threads( process_t * process ) 631 629 { 632 630 thread_t * target; // pointer on target thread … … 644 642 uint32_t cycle = (uint32_t)hal_get_cycles(); 645 643 if( DEBUG_PROCESS_SIGACTION < cycle ) 646 printk("\n[DBG] %s : thread %x in process %xenter for process %x in cluster %x / cycle %d\n",647 __FUNCTION__, this-> trdid, this->process->pid, pid, local_cxy , cycle );644 printk("\n[DBG] %s : thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", 645 __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle ); 648 646 #endif 649 647 650 648 // check target process is an user process 651 assert( ( process->pid != 0 ), 652 "target process must be an user process" ); 653 654 // get target process owner cluster 649 assert( (LPID_FROM_PID( process->pid ) != 0 ), "target process must be an user process" ); 650 651 // get target process cluster 655 652 owner_cxy = CXY_FROM_PID( process->pid ); 656 653 … … 668 665 count++; 669 666 670 // main thread and client thread should not be blocked 671 if( ((ltid != 0) || (owner_cxy != local_cxy)) && // not main thread 672 (client_xp) != XPTR( local_cxy , target ) ) // not client thread 667 // set the global blocked bit in target thread descriptor. 668 thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL ); 669 670 // - if the calling thread and the target thread are running on the same core, 671 // we don't need confirmation from scheduler, 672 // - if the calling thread and the target thread are not running on the same 673 // core, we ask the target scheduler to acknowlege the blocking 674 // to be sure that the target thread is not running. 675 676 if( this->core->lid != target->core->lid ) 673 677 { 674 // set the global blocked bit in target thread descriptor. 675 thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL ); 676 677 // - if the calling thread and the target thread are on the same core, 678 // we don't need confirmation from scheduler, 679 // - if the calling thread and the target thread are not running on the same 680 // core, we ask the target scheduler to acknowlege the blocking 681 // to be sure that the target thread is not running. 682 683 if( this->core->lid != target->core->lid ) 684 { 685 // increment responses counter 686 hal_atomic_add( (void*)&ack_count , 1 ); 687 688 // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor 689 thread_set_req_ack( target , (uint32_t *)&ack_count ); 690 691 // force scheduling on target thread 692 dev_pic_send_ipi( local_cxy , target->core->lid ); 693 } 678 // increment responses counter 679 hal_atomic_add( (void*)&ack_count , 1 ); 680 681 // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor 682 thread_set_req_ack( target , (uint32_t *)&ack_count ); 683 684 // force scheduling on target thread 685 dev_pic_send_ipi( local_cxy , target->core->lid ); 694 686 } 695 687 } … … 713 705 cycle = (uint32_t)hal_get_cycles(); 714 706 if( DEBUG_PROCESS_SIGACTION < cycle ) 715 printk("\n[DBG] %s : thread %x in process %xexit for process %x in cluster %x / cycle %d\n",707 printk("\n[DBG] %s : thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", 716 708 __FUNCTION__, this, this->process->pid, pid, local_cxy , cycle ); 717 709 #endif … … 740 732 uint32_t cycle = (uint32_t)hal_get_cycles(); 741 733 if( DEBUG_PROCESS_SIGACTION < cycle ) 742 printk("\n[DBG] %s : thread %x n process %x enter for process %x in cluster%x / cycle %d\n",743 __FUNCTION__, this-> trdid, this->process->pid, pid, local_cxy, cycle );734 printk("\n[DBG] %s : thread[%x,%x] enter in cluster %x for process %x / cycle %d\n", 735 __FUNCTION__, this->process->pid, this->trdid, local_cxy, process->pid, cycle ); 744 736 #endif 745 737 746 738 // check target process is an user process 747 assert( ( process->pid!= 0 ),739 assert( ( LPID_FROM_PID( process->pid ) != 0 ), 748 740 "target process must be an user process" ); 749 741 750 742 // get lock protecting process th_tbl[] 751 rwlock_ rd_acquire( &process->th_lock );743 rwlock_wr_acquire( &process->th_lock ); 752 744 753 745 // loop on target process local threads … … 773 765 774 766 // release lock protecting process th_tbl[] 775 rwlock_ rd_release( &process->th_lock );767 rwlock_wr_release( &process->th_lock ); 776 768 777 769 #if DEBUG_PROCESS_SIGACTION 778 770 cycle = (uint32_t)hal_get_cycles(); 779 771 if( DEBUG_PROCESS_SIGACTION < cycle ) 780 printk("\n[DBG] %s : thread %x in process %xexit for process %x in cluster %x / cycle %d\n",781 __FUNCTION__, this-> trdid, this->process->pid, pid, local_cxy , cycle );772 printk("\n[DBG] %s : thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", 773 __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle ); 782 774 #endif 783 775 … … 799 791 uint32_t cycle = (uint32_t)hal_get_cycles(); 800 792 if( DEBUG_PROCESS_SIGACTION < cycle ) 801 printk("\n[DBG] %s : thread %x in process %xenter for process %x in cluster %x / cycle %d\n",802 __FUNCTION__, this-> trdid, this->process->pid, pid, local_cxy , cycle );793 printk("\n[DBG] %s : thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", 794 __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy , cycle ); 803 795 #endif 804 796 … … 831 823 cycle = (uint32_t)hal_get_cycles(); 832 824 if( DEBUG_PROCESS_SIGACTION < cycle ) 833 printk("\n[DBG] %s : thread %x in process %xexit for process %x in cluster %x / cycle %d\n",834 __FUNCTION__, this-> trdid, this->process->pid, pid, local_cxy, cycle );825 printk("\n[DBG] %s : thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", 826 __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle ); 835 827 #endif 836 828 … … 850 842 uint32_t cycle = (uint32_t)hal_get_cycles(); 851 843 if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle ) 852 printk("\n[DBG] %s : thread %x in cluster %xenter for process %x in cluster %x / cycle %d\n",853 __FUNCTION__, this-> trdid, this->process->pid, pid, local_cxy, cycle );844 printk("\n[DBG] %s : thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", 845 __FUNCTION__, this->process->pid, this->trdid, pid, local_cxy, cycle ); 854 846 #endif 855 847 … … 897 889 cycle = (uint32_t)hal_get_cycles(); 898 890 if( DEBUG_PROCESS_GET_LOCAL_COPY < cycle ) 899 printk("\n[DBG] %s : thread %x in cluster %xexit in cluster %x / process %x / cycle %d\n",900 __FUNCTION__, this-> trdid, this->process->pid, local_cxy, process_ptr, cycle );891 printk("\n[DBG] %s : thread[%x,%x] exit in cluster %x / process %x / cycle %d\n", 892 __FUNCTION__, this->process->pid, this->trdid, local_cxy, process_ptr, cycle ); 901 893 #endif 902 894 … … 1109 1101 if( thread->type != THREAD_IDLE ) rwlock_wr_acquire( &process->th_lock ); 1110 1102 1111 // scan kth_tbl1103 // scan th_tbl 1112 1104 for( ltid = 0 ; ltid < CONFIG_THREADS_MAX_PER_CLUSTER ; ltid++ ) 1113 1105 { … … 1127 1119 // returns trdid 1128 1120 *trdid = TRDID( local_cxy , ltid ); 1129 } 1130 1131 // get the lock protecting th_tbl for all threads 1132 // but the idle thread executing kernel_init (cannot yield) 1121 1122 // if( LPID_FROM_PID( process->pid ) == 0 ) 1123 // printk("\n@@@ %s : allocate ltid %d for a thread %s in cluster %x\n", 1124 // __FUNCTION__, ltid, thread_type_str( thread->type), local_cxy ); 1125 1126 } 1127 1128 // release the lock protecting th_tbl 1133 1129 if( thread->type != THREAD_IDLE ) rwlock_wr_release( &process->th_lock ); 1134 1130 … … 1141 1137 { 1142 1138 uint32_t count; // number of threads in local process descriptor 1143 1144 // check argument1145 assert( (thread != NULL) , "thread argument is NULL" );1146 1139 1147 1140 process_t * process = thread->process; … … 1150 1143 ltid_t ltid = LTID_FROM_TRDID( thread->trdid ); 1151 1144 1152 // the lock depends on thread user/kernel type, because we cannot1153 // use a descheduling policy for the lock protecting the kth_tbl1154 1155 1145 // get the lock protecting th_tbl[] 1156 1146 rwlock_wr_acquire( &process->th_lock ); 1157 1147 1158 // get number of kernelthreads1148 // get number of threads 1159 1149 count = process->th_nr; 1160 1150 1151 // check thread 1152 assert( (thread != NULL) , "thread argument is NULL" ); 1153 1161 1154 // check th_nr value 1162 assert( (count > 0) , "process kth_nr cannot be 0\n" );1155 assert( (count > 0) , "process th_nr cannot be 0\n" ); 1163 1156 1164 1157 // remove thread from th_tbl[] … … 1166 1159 process->th_nr = count-1; 1167 1160 1168 // release lock protecting kth_tbl 1161 // if( LPID_FROM_PID( process->pid ) == 0 ) 1162 // printk("\n@@@ %s : release ltid %d for a thread %s in cluster %x\n", 1163 // __FUNCTION__, ltid, thread_type_str( thread->type), local_cxy ); 1164 1165 // release lock protecting th_tbl 1169 1166 rwlock_wr_release( &process->th_lock ); 1170 1167 … … 1203 1200 1204 1201 #if DEBUG_PROCESS_MAKE_FORK 1205 uint32_t cycle = (uint32_t)hal_get_cycles(); 1202 uint32_t cycle = (uint32_t)hal_get_cycles(); 1203 thread_t * this = CURRENT_THREAD; 1204 trdid_t trdid = this->trdid; 1205 pid_t pid = this->process->pid; 1206 1206 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1207 printk("\n[DBG] %s : thread %x in process %xenter / cluster %x / cycle %d\n",1208 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, local_cxy, cycle );1207 printk("\n[DBG] %s : thread[%x,%x] enter / cluster %x / cycle %d\n", 1208 __FUNCTION__, pid, trdid, local_cxy, cycle ); 1209 1209 #endif 1210 1210 … … 1231 1231 cycle = (uint32_t)hal_get_cycles(); 1232 1232 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1233 printk("\n[DBG] %s : thread %x in process %xallocated process %x / cycle %d\n",1234 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, new_pid, cycle );1233 printk("\n[DBG] %s : thread[%x,%x] allocated process %x / cycle %d\n", 1234 __FUNCTION__, pid, trdid, new_pid, cycle ); 1235 1235 #endif 1236 1236 … … 1243 1243 cycle = (uint32_t)hal_get_cycles(); 1244 1244 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1245 printk("\n[DBG] %s : thread %x in process %xinitialized child_process %x / cycle %d\n",1246 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, new_pid, cycle );1245 printk("\n[DBG] %s : thread[%x,%x] initialized child_process %x / cycle %d\n", 1246 __FUNCTION__, pid, trdid, new_pid, cycle ); 1247 1247 #endif 1248 1248 … … 1263 1263 cycle = (uint32_t)hal_get_cycles(); 1264 1264 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1265 printk("\n[DBG] %s : thread %x in process %x copied VMM from parent %x to child %x / cycle %d\n", 1266 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1267 parent_pid, new_pid, cycle ); 1265 printk("\n[DBG] %s : thread[%x,%x] copied VMM from parent to child / cycle %d\n", 1266 __FUNCTION__, pid, trdid, cycle ); 1268 1267 #endif 1269 1268 … … 1277 1276 cycle = (uint32_t)hal_get_cycles(); 1278 1277 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1279 printk("\n[DBG] %s : thread %x in process %x/ child takes TXT ownership / cycle %d\n",1280 __FUNCTION__ , CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, cycle );1278 printk("\n[DBG] %s : thread[%x,%x] / child takes TXT ownership / cycle %d\n", 1279 __FUNCTION__ , pid, trdid, cycle ); 1281 1280 #endif 1282 1281 … … 1306 1305 cycle = (uint32_t)hal_get_cycles(); 1307 1306 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1308 printk("\n[DBG] %s : thread %x in process %xcreated main thread %x / cycle %d\n",1309 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, thread, cycle );1307 printk("\n[DBG] %s : thread[%x,%x] created main thread %x / cycle %d\n", 1308 __FUNCTION__, pid, trdid, thread, cycle ); 1310 1309 #endif 1311 1310 … … 1328 1327 cycle = (uint32_t)hal_get_cycles(); 1329 1328 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1330 printk("\n[DBG] %s : thread %x in process %xset COW in parent and child / cycle %d\n",1331 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, cycle );1329 printk("\n[DBG] %s : thread[%x,%x] set COW in parent and child / cycle %d\n", 1330 __FUNCTION__, pid, trdid, cycle ); 1332 1331 #endif 1333 1332 … … 1350 1349 cycle = (uint32_t)hal_get_cycles(); 1351 1350 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1352 printk("\n[DBG] %s : thread %x in process %xexit / created process %x / cycle %d\n",1353 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, new_pid, cycle );1351 printk("\n[DBG] %s : thread[%x,%x] exit / created process %x / cycle %d\n", 1352 __FUNCTION__, pid, trdid, new_pid, cycle ); 1354 1353 #endif 1355 1354 … … 1384 1383 uint32_t cycle = (uint32_t)hal_get_cycles(); 1385 1384 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1386 printk("\n[DBG] %s : thread %x in process %x enters / path%s / cycle %d\n",1387 __FUNCTION__, thread->trdid, pid, path, cycle );1385 printk("\n[DBG] %s : thread[%x,%x] enters for %s / cycle %d\n", 1386 __FUNCTION__, pid, thread->trdid, path, cycle ); 1388 1387 #endif 1389 1388 … … 1406 1405 cycle = (uint32_t)hal_get_cycles(); 1407 1406 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1408 printk("\n[DBG] %s : thread %x in process %xopened file <%s> / cycle %d\n",1409 __FUNCTION__, thread->trdid, pid, path, cycle );1407 printk("\n[DBG] %s : thread[%x,%x] opened file <%s> / cycle %d\n", 1408 __FUNCTION__, pid, thread->trdid, path, cycle ); 1410 1409 #endif 1411 1410 … … 1416 1415 cycle = (uint32_t)hal_get_cycles(); 1417 1416 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1418 printk("\n[DBG] %s : thread %x in process %xdeleted all threads / cycle %d\n",1419 __FUNCTION__, thread->trdid, pid, cycle );1417 printk("\n[DBG] %s : thread[%x,%x] deleted all threads / cycle %d\n", 1418 __FUNCTION__, pid, thread->trdid, cycle ); 1420 1419 #endif 1421 1420 … … 1426 1425 cycle = (uint32_t)hal_get_cycles(); 1427 1426 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1428 printk("\n[DBG] %s : thread %x in process %xreset VMM / cycle %d\n",1429 __FUNCTION__, thread->trdid, pid, cycle );1427 printk("\n[DBG] %s : thread[%x,%x] reset VMM / cycle %d\n", 1428 __FUNCTION__, pid, thread->trdid, cycle ); 1430 1429 #endif 1431 1430 … … 1443 1442 cycle = (uint32_t)hal_get_cycles(); 1444 1443 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1445 printk("\n[DBG] %s : thread %x in process %x/ kentry/args/envs vsegs registered / cycle %d\n",1446 __FUNCTION__, thread->trdid, pid, cycle );1444 printk("\n[DBG] %s : thread[%x,%x] / kentry/args/envs vsegs registered / cycle %d\n", 1445 __FUNCTION__, pid, thread->trdid, cycle ); 1447 1446 #endif 1448 1447 … … 1461 1460 cycle = (uint32_t)hal_get_cycles(); 1462 1461 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1463 printk("\n[DBG] %s : thread %x in process %x/ code/data vsegs registered / cycle %d\n",1464 __FUNCTION__, thread->trdid, pid, cycle );1462 printk("\n[DBG] %s : thread[%x,%x] / code/data vsegs registered / cycle %d\n", 1463 __FUNCTION__, pid, thread->trdid, cycle ); 1465 1464 #endif 1466 1465 -
trunk/kernel/kern/process.h
r564 r583 312 312 313 313 /********************************************************************************************* 314 * This function blocks all threads for a given <process> in the local cluster. 315 * It scan the list of local thread, and sets the THREAD_BLOCKED_GLOBAL bit for all 316 * threads, BUT the main thread (thread 0 in owner cluster), and the client thread 317 * identified by the <client_xp> argument. It request the relevant schedulers to acknowledge 318 * the blocking, using IPI if required, and returns only when all blockable threads 319 * in cluster are actually blocked. 320 * The threads are not detached from the scheduler, and not detached from the local process. 321 ********************************************************************************************* 322 * @ process : pointer on the target process descriptor. 323 * @ client_xp : extended pointer on the client thread that should not be blocked. 324 ********************************************************************************************/ 325 void process_block_threads( process_t * process, 326 xptr_t client_xp ); 327 328 /********************************************************************************************* 329 * This function marks for deletion all threads for a given <process> in the local cluster. 314 * This function marks for delete all threads for a given <process> in the local cluster. 330 315 * It scan the list of local thread, and sets the THREAD_FLAG_REQ_DELETE bit for all 331 316 * threads, BUT the main thread (thread 0 in owner cluster), and the client thread 332 317 * identified by the <client_xp> argument. 333 * The actual delet ionwill be done by the scheduler at the next scheduling point.318 * The actual delete will be done by the scheduler at the next scheduling point. 334 319 ********************************************************************************************* 335 320 * @ process : pointer on the process descriptor. … … 337 322 ********************************************************************************************/ 338 323 void process_delete_threads( process_t * process, 339 xptr_t client_xp ); 324 xptr_t client_xp ); 325 326 /********************************************************************************************* 327 * This function blocks all threads for a given <process> in the local cluster. 328 * It scan the list of local thread, and sets the THREAD_BLOCKED_GLOBAL bit for all threads. 329 * It request the relevant schedulers to acknowledge the blocking, using IPI if required, 330 * and returns only when all threads in cluster are actually blocked. 331 * The threads are not detached from the scheduler, and not detached from the local process. 332 ********************************************************************************************* 333 * @ process : pointer on the target process descriptor. 334 ********************************************************************************************/ 335 void process_block_threads( process_t * process ); 340 336 341 337 /********************************************************************************************* … … 498 494 * @ process : pointer on the local process descriptor. 499 495 * @ thread : pointer on new thread to be registered. 500 * @ trdid : [out] address ofbuffer for allocated trdid.496 * @ trdid : [out] buffer for allocated trdid. 501 497 * @ returns 0 if success / returns non zero if no slot available. 502 498 ********************************************************************************************/ … … 504 500 struct thread_s * thread, 505 501 trdid_t * trdid ); 506 507 /*********************************************************************************************508 * This function atomically removes a thread registration from the local process descriptor509 * th_tbl[] array, using the relevant lock, depending on the kernel/user type.510 *********************************************************************************************511 * @ thread : local pointer on thread to be removed.512 * @ return true if the removed thread was the last registered thread.513 ********************************************************************************************/514 bool_t process_remove_thread( struct thread_s * thread );515 502 516 503 -
trunk/kernel/kern/rpc.c
r581 r583 72 72 73 73 &rpc_vmm_get_vseg_server, // 20 74 &rpc_vmm_g et_pte_server,// 2174 &rpc_vmm_global_update_pte_server, // 21 75 75 &rpc_kcm_alloc_server, // 22 76 76 &rpc_kcm_free_server, // 23 … … 108 108 109 109 "GET_VSEG", // 20 110 "G ET_PTE", // 21110 "GLOBAL_UPDATE_PTE", // 21 111 111 "KCM_ALLOC", // 22 112 112 "KCM_FREE", // 23 … … 126 126 127 127 /***************************************************************************************/ 128 /************ Generic function s supporting RPCs : client side**************************/128 /************ Generic function supporting RPCs : client side ***************************/ 129 129 /***************************************************************************************/ 130 130 … … 145 145 // RPCs executed by the IDLE thread during kernel_init do not deschedule 146 146 if( this->type != THREAD_IDLE ) thread_assert_can_yield( this , __FUNCTION__ ); 147 148 #if DEBUG_RPC_CLIENT_GENERIC149 uint32_t cycle = (uint32_t)hal_get_cycles();150 if( DEBUG_RPC_CLIENT_GENERIC < cycle )151 printk("\n[DBG] %s : thread %x in process %x enter for rpc %s / server_cxy %x / cycle %d\n",152 __FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], server_cxy, cycle );153 #endif154 147 155 148 // select a server_core : use client core index if possible / core 0 otherwise … … 163 156 } 164 157 165 // register client_thread pointer and client_core lidin RPC descriptor158 // register client_thread and client_core in RPC descriptor 166 159 rpc->thread = this; 167 160 rpc->lid = client_core_lid; … … 193 186 194 187 #if DEBUG_RPC_CLIENT_GENERIC 195 cycle = (uint32_t)hal_get_cycles();188 uint32_t cycle = (uint32_t)hal_get_cycles(); 196 189 uint32_t items = remote_fifo_items( rpc_fifo_xp ); 197 190 if( DEBUG_RPC_CLIENT_GENERIC < cycle ) 198 printk("\n[DBG] %s : thread %x in process %x / rpc %s / items %d / cycle %d\n", 199 __FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], items, cycle ); 191 printk("\n[DBG] %s : thread %x in process %x / rpc %s / server[%x,%d] / items %d / cycle %d\n", 192 __FUNCTION__, this->trdid, this->process->pid, rpc_str[rpc->index], 193 server_cxy, server_core_lid, items, cycle ); 200 194 #endif 201 195 … … 1752 1746 1753 1747 ///////////////////////////////////////////////////////////////////////////////////////// 1754 // [21] Marshaling functions attached to RPC_VMM_GET_PTE (blocking) 1755 ///////////////////////////////////////////////////////////////////////////////////////// 1756 1757 //////////////////////////////////////////// 1758 void rpc_vmm_get_pte_client( cxy_t cxy, 1759 process_t * process, // in 1760 vpn_t vpn, // in 1761 bool_t cow, // in 1762 uint32_t * attr, // out 1763 ppn_t * ppn, // out 1764 error_t * error ) // out 1765 { 1766 #if DEBUG_RPC_VMM_GET_PTE 1767 thread_t * this = CURRENT_THREAD; 1768 uint32_t cycle = (uint32_t)hal_get_cycles(); 1769 if( cycle > DEBUG_RPC_VMM_GET_PTE ) 1748 // [21] Marshaling functions attached to RPC_VMM_GLOBAL_UPDATE_PTE (blocking) 1749 ///////////////////////////////////////////////////////////////////////////////////////// 1750 1751 /////////////////////////////////////////////////////// 1752 void rpc_vmm_global_update_pte_client( cxy_t cxy, 1753 process_t * process, // in 1754 vpn_t vpn, // in 1755 uint32_t attr, // in 1756 ppn_t ppn ) // in 1757 { 1758 #if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE 1759 thread_t * this = CURRENT_THREAD; 1760 uint32_t cycle = (uint32_t)hal_get_cycles(); 1761 if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE ) 1770 1762 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1771 1763 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); … … 1776 1768 // initialise RPC descriptor header 1777 1769 rpc_desc_t rpc; 1778 rpc.index = RPC_VMM_G ET_PTE;1770 rpc.index = RPC_VMM_GLOBAL_UPDATE_PTE; 1779 1771 rpc.blocking = true; 1780 1772 rpc.responses = 1; … … 1783 1775 rpc.args[0] = (uint64_t)(intptr_t)process; 1784 1776 rpc.args[1] = (uint64_t)vpn; 1785 rpc.args[2] = (uint64_t)cow; 1777 rpc.args[2] = (uint64_t)attr; 1778 rpc.args[3] = (uint64_t)ppn; 1786 1779 1787 1780 // register RPC request in remote RPC fifo 1788 1781 rpc_send( cxy , &rpc ); 1789 1782 1790 // get output argument from rpc descriptor 1791 *attr = (uint32_t)rpc.args[3]; 1792 *ppn = (ppn_t)rpc.args[4]; 1793 *error = (error_t)rpc.args[5]; 1794 1795 #if DEBUG_RPC_VMM_GET_PTE 1796 cycle = (uint32_t)hal_get_cycles(); 1797 if( cycle > DEBUG_RPC_VMM_GET_PTE ) 1798 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1799 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1800 #endif 1801 } 1802 1803 //////////////////////////////////////// 1804 void rpc_vmm_get_pte_server( xptr_t xp ) 1805 { 1806 #if DEBUG_RPC_VMM_GET_PTE 1807 thread_t * this = CURRENT_THREAD; 1808 uint32_t cycle = (uint32_t)hal_get_cycles(); 1809 if( cycle > DEBUG_RPC_VMM_GET_PTE ) 1783 #if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE 1784 cycle = (uint32_t)hal_get_cycles(); 1785 if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE ) 1786 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1787 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); 1788 #endif 1789 } 1790 1791 ////////////////////////////////////////////////// 1792 void rpc_vmm_global_update_pte_server( xptr_t xp ) 1793 { 1794 #if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE 1795 thread_t * this = CURRENT_THREAD; 1796 uint32_t cycle = (uint32_t)hal_get_cycles(); 1797 if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE ) 1810 1798 printk("\n[DBG] %s : thread %x in process %x on core %d enter / cycle %d\n", 1811 1799 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); … … 1814 1802 process_t * process; 1815 1803 vpn_t vpn; 1816 bool_t cow;1817 1804 uint32_t attr; 1818 1805 ppn_t ppn; 1819 error_t error;1820 1806 1821 1807 // get client cluster identifier and pointer on RPC descriptor … … 1826 1812 process = (process_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 1827 1813 vpn = (vpn_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 1828 cow = (bool_t) hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); 1814 attr = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); 1815 ppn = (ppn_t) hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) ); 1829 1816 1830 1817 // call local kernel function 1831 error = vmm_get_pte( process , vpn , cow , &attr , &ppn ); 1832 1833 // set output argument "attr" & "ppn" to client RPC descriptor 1834 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)attr ); 1835 hal_remote_s64( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)ppn ); 1836 hal_remote_s64( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error ); 1837 1838 #if DEBUG_RPC_VMM_GET_PTE 1839 cycle = (uint32_t)hal_get_cycles(); 1840 if( cycle > DEBUG_RPC_VMM_GET_PTE ) 1818 vmm_global_update_pte( process , vpn , attr , ppn ); 1819 1820 #if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE 1821 cycle = (uint32_t)hal_get_cycles(); 1822 if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE ) 1841 1823 printk("\n[DBG] %s : thread %x in process %x on core %d exit / cycle %d\n", 1842 1824 __FUNCTION__, this->trdid, this->process->pid, this->core->lid , cycle ); -
trunk/kernel/kern/rpc.h
r564 r583 82 82 83 83 RPC_VMM_GET_VSEG = 20, 84 RPC_VMM_G ET_PTE= 21,84 RPC_VMM_GLOBAL_UPDATE_PTE = 21, 85 85 RPC_KCM_ALLOC = 22, 86 86 RPC_KCM_FREE = 23, … … 149 149 150 150 /*********************************************************************************** 151 * This function contains the infinite loop executed by a RPC thread, 152 * to handle all pending RPCs registered in the RPC fifo attached to a given core. 151 * This function contains the infinite loop executed by a RPC server thread, 152 * to handle pending RPCs registered in the RPC fifo attached to a given core. 153 * In each iteration in this loop, it try to handle one RPC request: 154 * - it tries to take the RPC FIFO ownership, 155 * - it consumes one request when the FIFO is not empty, 156 * - it releases the FIFO ownership, 157 * - it execute the requested service, 158 * - it unblock and send an IPI to the client thread, 159 * - it suicides if the number of RPC threads for this core is to large, 160 * - it block on IDLE and deschedule otherwise. 153 161 **********************************************************************************/ 154 162 void rpc_thread_func( void ); … … 483 491 484 492 /*********************************************************************************** 485 * [21] The RPC_VMM_GET_PTE returns in the <ppn> and <attr> arguments the PTE value 486 * for a given <vpn> in a given <process> (page_fault or copy_on_write event). 487 * The server cluster is supposed to be the reference cluster, and the vseg 488 * containing the VPN must be registered in the reference VMM. 489 * It returns an error if physical memory cannot be allocated for the missing PTE2, 490 * or for the missing page itself. 493 * [21] The RPC_VMM_GLOBAL_UPDATE_PTE can be used by a thread that is not running 494 * in reference cluster, to ask the reference cluster to update a specific entry, 495 * identified by the <vpn> argument in all GPT copies of a process identified by 496 * the <process> argument, using the values defined by <attr> and <ppn> arguments. 497 * The server cluster is supposed to be the reference cluster. 498 * It does not return any error code as the called function vmm_global_update_pte() 499 * cannot fail. 491 500 *********************************************************************************** 492 501 * @ cxy : server cluster identifier. 493 * @ process : [in] pointer on process descriptor in server cluster. 494 * @ vaddr : [in] virtual address to be searched. 495 * @ cow : [in] "copy_on_write" event if true / "page_fault" event if false. 496 * @ attr : [out] address of buffer for attributes. 497 * @ ppn : [out] address of buffer for PPN. 498 * @ error : [out] address of buffer for error code. 499 **********************************************************************************/ 500 void rpc_vmm_get_pte_client( cxy_t cxy, 501 struct process_s * process, 502 vpn_t vpn, 503 bool_t cow, 504 uint32_t * attr, 505 ppn_t * ppn, 506 error_t * error ); 507 508 void rpc_vmm_get_pte_server( xptr_t xp ); 502 * @ process : [in] pointer on process descriptor in server cluster. 503 * @ vpn : [in] virtual address to be searched. 504 * @ attr : [in] PTE attributes. 505 * @ ppn : [it] PTE PPN. 506 **********************************************************************************/ 507 void rpc_vmm_global_update_pte_client( cxy_t cxy, 508 struct process_s * process, 509 vpn_t vpn, 510 uint32_t attr, 511 ppn_t ppn ); 512 513 void rpc_vmm_global_update_pte_server( xptr_t xp ); 509 514 510 515 /*********************************************************************************** -
trunk/kernel/kern/scheduler.c
r582 r583 40 40 41 41 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 42 extern process_t process_zero; // allocated in kernel_init.c 42 43 43 44 /////////////////////////////////////////////////////////////////////////////////////////// … … 83 84 84 85 // check kernel threads list 85 assert( (count < sched->k_threads_nr), 86 "bad kernel threads list" ); 86 assert( (count < sched->k_threads_nr), "bad kernel threads list" ); 87 87 88 88 // get next entry in kernel list … … 118 118 119 119 // check user threads list 120 assert( (count < sched->u_threads_nr), 121 "bad user threads list" ); 120 assert( (count < sched->u_threads_nr), "bad user threads list" ); 122 121 123 122 // get next entry in user list … … 146 145 147 146 //////////////////////////////////////////////////////////////////////////////////////////// 148 // This static function is the only function that can remove a thread from the scheduler.147 // This static function is the only function that can actually delete a thread. 149 148 // It is private, because it is called by the sched_yield() public function. 150 149 // It scan all threads attached to a given scheduler, and executes the relevant 151 // actions for pending requests:150 // actions for two types of pending requests: 152 151 // - REQ_ACK : it checks that target thread is blocked, decrements the response counter 153 152 // to acknowledge the client thread, and reset the pending request. 154 // - REQ_DELETE : it detach the target thread from parent if attached, detach it from 155 // the process, remove it from scheduler, release memory allocated to thread descriptor, 156 // and destroy the process descriptor it the target thread was the last thread. 153 // - REQ_DELETE : it removes the target thread from the process th_tbl[], remove it 154 // from the scheduler list, and release the memory allocated to thread descriptor. 155 // For an user thread, it destroys the process descriptor it the target thread is 156 // the last thread in the local process descriptor. 157 // 158 // Implementation note: 159 // We use a while to scan the threads in scheduler lists, because some threads can 160 // be destroyed, and we want not use a LIST_FOREACH() 157 161 //////////////////////////////////////////////////////////////////////////////////////////// 158 162 // @ core : local pointer on the core descriptor. … … 166 170 process_t * process; 167 171 scheduler_t * sched; 168 bool_t last; 172 uint32_t threads_nr; // number of threads in scheduler list 173 ltid_t ltid; // thread local index 174 uint32_t count; // number of threads in local process 169 175 170 176 // get pointer on scheduler 171 177 sched = &core->scheduler; 172 178 173 // get pointer on user threads root179 ////// scan user threads to handle both ACK and DELETE requests 174 180 root = &sched->u_root; 175 176 // We use a while to scan the user threads, to control the iterator increment,177 // because some threads will be destroyed, and we want not use a LIST_FOREACH()178 179 // initialise list iterator180 181 iter = root->next; 181 182 // scan all user threads183 182 while( iter != root ) 184 183 { … … 210 209 process = thread->process; 211 210 212 // release FPU if required 213 if( thread->core->fpu_owner == thread ) thread->core->fpu_owner = NULL; 214 215 // take lock protecting sheduler state 211 // get thread ltid 212 ltid = LTID_FROM_TRDID( thread->trdid); 213 214 // take the lock protecting th_tbl[] 215 rwlock_wr_acquire( &process->th_lock ); 216 217 // take the lock protecting sheduler state 216 218 busylock_acquire( &sched->lock ); 217 219 218 220 // update scheduler state 219 uint32_tthreads_nr = sched->u_threads_nr;221 threads_nr = sched->u_threads_nr; 220 222 sched->u_threads_nr = threads_nr - 1; 221 223 list_unlink( &thread->sched_list ); … … 236 238 } 237 239 238 // release lock protecting scheduler state240 // release the lock protecting sheduler state 239 241 busylock_release( &sched->lock ); 240 242 241 // delete thread descriptor 242 last = thread_destroy( thread ); 243 // get number of threads in local process 244 count = process->th_nr; 245 246 // check th_nr value 247 assert( (count > 0) , "process th_nr cannot be 0\n" ); 248 249 // remove thread from process th_tbl[] 250 process->th_tbl[ltid] = NULL; 251 process->th_nr = count - 1; 252 253 // release the lock protecting th_tbl[] 254 rwlock_wr_release( &process->th_lock ); 255 256 // release memory allocated for thread descriptor 257 thread_destroy( thread ); 243 258 244 259 #if DEBUG_SCHED_HANDLE_SIGNALS 245 260 uint32_t cycle = (uint32_t)hal_get_cycles(); 246 261 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 247 printk("\n[DBG] %s : thread %x in process %xon core[%x,%d] deleted / cycle %d\n",248 __FUNCTION__ , thread->trdid , process->pid , local_cxy , thread->core->lid , cycle );262 printk("\n[DBG] %s : thread[%x,%x] on core[%x,%d] deleted / cycle %d\n", 263 __FUNCTION__ , process->pid , thread->trdid , local_cxy , thread->core->lid , cycle ); 249 264 #endif 250 // destroy process descriptor if no more threads251 if( last)265 // destroy process descriptor if last thread 266 if( count == 1 ) 252 267 { 253 268 // delete process … … 262 277 } 263 278 } 279 } // end user threads 280 281 ////// scan kernel threads for DELETE only 282 root = &sched->k_root; 283 iter = root->next; 284 while( iter != root ) 285 { 286 // get pointer on thread 287 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 288 289 // increment iterator 290 iter = iter->next; 291 292 // handle REQ_DELETE only if target thread != calling thread 293 if( (thread->flags & THREAD_FLAG_REQ_DELETE) && (thread != CURRENT_THREAD) ) 294 { 295 296 // check process descriptor is local kernel process 297 assert( ( thread->process == &process_zero ) , "illegal process descriptor\n"); 298 299 // get thread ltid 300 ltid = LTID_FROM_TRDID( thread->trdid); 301 302 // take the lock protecting th_tbl[] 303 rwlock_wr_acquire( &process_zero.th_lock ); 304 305 // take lock protecting sheduler state 306 busylock_acquire( &sched->lock ); 307 308 // update scheduler state 309 threads_nr = sched->k_threads_nr; 310 sched->k_threads_nr = threads_nr - 1; 311 list_unlink( &thread->sched_list ); 312 if( sched->k_last == &thread->sched_list ) 313 { 314 if( threads_nr == 1 ) 315 { 316 sched->k_last = NULL; 317 } 318 else if( sched->k_root.next == &thread->sched_list ) 319 { 320 sched->k_last = sched->k_root.pred; 321 } 322 else 323 { 324 sched->k_last = sched->k_root.next; 325 } 326 } 327 328 // release lock protecting scheduler state 329 busylock_release( &sched->lock ); 330 331 // get number of threads in local kernel process 332 count = process_zero.th_nr; 333 334 // check th_nr value 335 assert( (count > 0) , "kernel process th_nr cannot be 0\n" ); 336 337 // remove thread from process th_tbl[] 338 process_zero.th_tbl[ltid] = NULL; 339 process_zero.th_nr = count - 1; 340 341 // release the lock protecting th_tbl[] 342 rwlock_wr_release( &process_zero.th_lock ); 343 344 // delete thread descriptor 345 thread_destroy( thread ); 346 347 #if DEBUG_SCHED_HANDLE_SIGNALS 348 uint32_t cycle = (uint32_t)hal_get_cycles(); 349 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 350 printk("\n[DBG] %s : thread[%x,%x] on core[%x,%d] deleted / cycle %d\n", 351 __FUNCTION__ , process_zero.pid , thread->trdid , local_cxy , thread->core->lid , cycle ); 352 #endif 353 } 264 354 } 265 355 } // end sched_handle_signals() … … 268 358 // This static function is called by the sched_yield function when the RFC_FIFO 269 359 // associated to the core is not empty. 270 // It checks if it exists an idle (blocked) RPC thread for this core, and unblock271 // it if found.It creates a new RPC thread if no idle RPC thread is found.360 // It search an idle RPC thread for this core, and unblock it if found. 361 // It creates a new RPC thread if no idle RPC thread is found. 272 362 //////////////////////////////////////////////////////////////////////////////////////////// 273 363 // @ sched : local pointer on scheduler. … … 285 375 { 286 376 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 287 if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) ) 288 { 289 // exit loop 377 378 if( (thread->type == THREAD_RPC) && 379 (thread->blocked == THREAD_BLOCKED_IDLE ) ) 380 { 290 381 found = true; 291 382 break; … … 303 394 if ( error ) 304 395 { 305 printk("\n[ WARNING] in %s : no memory to create a RPC thread in cluster %x\n",396 printk("\n[ERROR] in %s : no memory to create a RPC thread in cluster %x\n", 306 397 __FUNCTION__, local_cxy ); 307 398 } … … 317 408 uint32_t cycle = (uint32_t)hal_get_cycles(); 318 409 if( DEBUG_SCHED_RPC_ACTIVATE < cycle ) 319 printk("\n[DBG] %s : new RPC thread %x created for core[%x,%d] / cycle %d\n",320 __FUNCTION__, thread->trdid, local_cxy, lid, cycle );410 printk("\n[DBG] %s : new RPC thread %x created for core[%x,%d] / total %d / cycle %d\n", 411 __FUNCTION__, thread->trdid, local_cxy, lid, LOCAL_CLUSTER->rpc_threads[lid], cycle ); 321 412 #endif 322 413 } … … 476 567 busylock_release( &sched->lock ); 477 568 478 #if DEBUG_SCHED_YIELD569 #if (DEBUG_SCHED_YIELD & 1) 479 570 if( sched->trace ) 480 571 printk("\n[DBG] %s : core[%x,%d] / cause = %s\n" … … 519 610 remote_busylock_acquire( lock_xp ); 520 611 521 nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n", 522 local_cxy , core->lid, sched->current, (uint32_t)hal_get_cycles() ); 612 nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n", 613 local_cxy , core->lid, sched->current, LOCAL_CLUSTER->rpc_threads[lid], 614 (uint32_t)hal_get_cycles() ); 523 615 524 616 // display kernel threads … … 564 656 "illegal cluster %x\n", cxy ); 565 657 566 // check lid567 658 assert( (lid < hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->cores_nr ) ) ), 568 659 "illegal core index %d\n", lid ); … … 590 681 remote_busylock_acquire( lock_xp ); 591 682 683 // get rpc_threads 684 uint32_t rpcs = hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->rpc_threads[lid] ) ); 685 592 686 // display header 593 nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n",594 cxy , lid, current, (uint32_t)hal_get_cycles() );687 nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n", 688 cxy , lid, current, rpcs, (uint32_t)hal_get_cycles() ); 595 689 596 690 // display kernel threads -
trunk/kernel/kern/thread.c
r581 r583 224 224 225 225 // update DQDT 226 dqdt_ update_threads( 1);226 dqdt_increment_threads(); 227 227 228 228 #if DEBUG_THREAD_INIT … … 768 768 hal_cpu_context_init( thread ); 769 769 770 // set THREAD_BLOCKED_IDLE for DEV threads 771 if( type == THREAD_DEV ) thread->blocked |= THREAD_BLOCKED_IDLE; 770 772 771 773 #if DEBUG_THREAD_KERNEL_CREATE … … 815 817 /////////////////////////////////////////////////////////////////////////////////////// 816 818 // TODO: check that all memory dynamically allocated during thread execution 817 // has been released , using a cache of mmap requests.[AG]819 // has been released => check vmm destroy for MMAP vsegs [AG] 818 820 /////////////////////////////////////////////////////////////////////////////////////// 819 bool_tthread_destroy( thread_t * thread )821 void thread_destroy( thread_t * thread ) 820 822 { 821 823 reg_t save_sr; 822 bool_t last_thread;823 824 824 825 process_t * process = thread->process; … … 826 827 827 828 #if DEBUG_THREAD_DESTROY 828 uint32_t cycle = (uint32_t)hal_get_cycles(); 829 uint32_t cycle = (uint32_t)hal_get_cycles(); 830 thread_t * this = CURRENT_THREAD; 829 831 if( DEBUG_THREAD_DESTROY < cycle ) 830 printk("\n[DBG] %s : thread %x enter to destroy thread %x in process %x / cycle %d\n", 831 __FUNCTION__, CURRENT_THREAD, thread->trdid, process->pid, cycle ); 832 #endif 833 834 // check busylocks counter 835 assert( (thread->busylocks == 0) , 836 "busylock not released for thread %x in process %x", thread->trdid, process->pid ); 832 printk("\n[DBG] %s : thread[%x,%x] enter to destroy thread[%x,%x] / cycle %d\n", 833 __FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle ); 834 #endif 835 836 // check busylocks counter 837 thread_assert_can_yield( thread , __FUNCTION__ ); 837 838 838 839 // update intrumentation values … … 852 853 hal_restore_irq( save_sr ); 853 854 854 // remove thread from process th_tbl[]855 last_thread = process_remove_thread( thread );856 857 // update DQDT858 dqdt_update_threads( -1 );859 860 855 // invalidate thread descriptor 861 856 thread->signature = 0; … … 867 862 cycle = (uint32_t)hal_get_cycles(); 868 863 if( DEBUG_THREAD_DESTROY < cycle ) 869 printk("\n[DBG] %s : thread %x exit / destroyed thread %x in process %x / last %d / cycle %d\n", 870 __FUNCTION__, CURRENT_THREAD, thread->trdid, process->pid, last_thread / cycle ); 871 #endif 872 873 return last_thread; 864 printk("\n[DBG] %s : thread[%x,%x] exit / destroyed thread[%x,%x] / cycle %d\n", 865 __FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle ); 866 #endif 874 867 875 868 } // end thread_destroy() … … 1023 1016 uint32_t cycle = (uint32_t)hal_get_cycles(); 1024 1017 if( DEBUG_THREAD_DELETE < cycle ) 1025 printk("\n[DBG] %s : thread %x in process %x enters / target thread %x / cycle %d\n", 1026 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid, target_ptr->trdid, cycle ); 1027 #endif 1028 1029 // check killer thread can yield 1030 assert( (killer_ptr->busylocks == 0), 1031 "cannot yield : busylocks = %d\n", killer_ptr->busylocks ); 1018 printk("\n[DBG] %s : killer[%x,%x] enters / target[%x,%x] / cycle %d\n", 1019 __FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid, 1020 target_ptr->process->pid, target_ptr->trdid, cycle ); 1021 #endif 1032 1022 1033 1023 // check target thread is not the main thread, because the main thread … … 1036 1026 "tharget thread cannot be the main thread\n" ); 1037 1027 1038 // block the target thread 1039 thread_block( target_xp , THREAD_BLOCKED_GLOBAL ); 1040 1041 // synchronize with the joining thread if attached 1042 if( target_attached && (is_forced == false) ) 1043 { 1044 1045 #if (DEBUG_THREAD_DELETE & 1) 1046 if( DEBUG_THREAD_DELETE < cycle ) 1047 printk("\n[DBG] %s : thread %x in process %x / target thread is attached\n", 1048 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid ); 1049 #endif 1028 // check killer thread can yield 1029 thread_assert_can_yield( killer_ptr , __FUNCTION__ ); 1030 1031 // if the target thread is attached, we must synchonize with the joining thread 1032 // before blocking and marking the target thead for delete. 1033 1034 if( target_attached && (is_forced == false) ) // synchronize with joining thread 1035 { 1050 1036 // build extended pointers on target thread join fields 1051 1037 target_join_lock_xp = XPTR( target_cxy , &target_ptr->join_lock ); … … 1061 1047 target_join_done = ((hal_remote_l32( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0); 1062 1048 1063 if( target_join_done ) // joining thread arrived first => unblock the joining thread1049 if( target_join_done ) // joining thread arrived first 1064 1050 { 1065 1066 #if (DEBUG_THREAD_DELETE & 1)1067 if( DEBUG_THREAD_DELETE < cycle )1068 printk("\n[DBG] %s : thread %x in process %x / joining thread arrived first\n",1069 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid );1070 #endif1071 1051 // get extended pointer on joining thread 1072 1052 joining_xp = (xptr_t)hal_remote_l64( target_join_xp_xp ); … … 1083 1063 remote_busylock_release( target_join_lock_xp ); 1084 1064 1065 // block the target thread 1066 thread_block( target_xp , THREAD_BLOCKED_GLOBAL ); 1067 1085 1068 // set the REQ_DELETE flag in target thread descriptor 1086 1069 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE ); 1087 1070 1088 // restore IRQs1071 // exit critical section 1089 1072 hal_restore_irq( save_sr ); 1090 }1091 else // killer thread arrived first => register flags and deschedule1092 {1093 1094 #if (DEBUG_THREAD_DELETE & 1)1095 if( DEBUG_THREAD_DELETE < cycle )1096 printk("\n[DBG] %s : thread %x in process %x / killer thread arrived first\n",1097 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid );1098 #endif1099 // set the kill_done flag in target thread1100 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );1101 1102 // block this thread on BLOCKED_JOIN1103 thread_block( killer_xp , THREAD_BLOCKED_JOIN );1104 1105 // set extended pointer on killer thread in target thread1106 hal_remote_s64( target_join_xp_xp , killer_xp );1107 1108 // release the join_lock in target thread descriptor1109 remote_busylock_release( target_join_lock_xp );1110 1111 #if (DEBUG_THREAD_DELETE & 1)1112 if( DEBUG_THREAD_DELETE < cycle )1113 printk("\n[DBG] %s : thread %x in process %x / killer thread deschedule\n",1114 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid );1115 #endif1116 // deschedule1117 sched_yield( "killer thread wait joining thread" );1118 1119 #if (DEBUG_THREAD_DELETE & 1)1120 if( DEBUG_THREAD_DELETE < cycle )1121 printk("\n[DBG] %s : thread %x in process %x / killer thread resume\n",1122 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid );1123 #endif1124 // set the REQ_DELETE flag in target thread descriptor1125 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );1126 1127 // restore IRQs1128 hal_restore_irq( save_sr );1129 }1130 }1131 else // target thread not attached1132 {1133 // set the REQ_DELETE flag in target thread descriptor1134 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );1135 }1136 1073 1137 1074 #if DEBUG_THREAD_DELETE 1138 1075 cycle = (uint32_t)hal_get_cycles; 1139 1076 if( DEBUG_THREAD_DELETE < cycle ) 1140 printk("\n[DBG] %s : thread %x in process %x exit / target thread %x / cycle %d\n", 1141 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid, target_ptr->trdid, cycle ); 1142 #endif 1077 printk("\n[DBG] %s : killer[%x,%x] exit / target[%x,%x] marked after join / cycle %d\n", 1078 __FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid, 1079 target_ptr->process->pid, target_ptr->trdid, cycle ); 1080 #endif 1081 1082 } 1083 else // killer thread arrived first 1084 { 1085 // set the kill_done flag in target thread 1086 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE ); 1087 1088 // block this thread on BLOCKED_JOIN 1089 thread_block( killer_xp , THREAD_BLOCKED_JOIN ); 1090 1091 // set extended pointer on killer thread in target thread 1092 hal_remote_s64( target_join_xp_xp , killer_xp ); 1093 1094 // release the join_lock in target thread descriptor 1095 remote_busylock_release( target_join_lock_xp ); 1096 1097 #if DEBUG_THREAD_DELETE 1098 cycle = (uint32_t)hal_get_cycles; 1099 if( DEBUG_THREAD_DELETE < cycle ) 1100 printk("\n[DBG] %s : killer[%x,%x] deschedules / target[%x,%x] not completed / cycle %d\n", 1101 __FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid, 1102 target_ptr->process->pid, target_ptr->trdid, cycle ); 1103 #endif 1104 // deschedule 1105 sched_yield( "killer thread wait joining thread" ); 1106 1107 // block the target thread 1108 thread_block( target_xp , THREAD_BLOCKED_GLOBAL ); 1109 1110 // set the REQ_DELETE flag in target thread descriptor 1111 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE ); 1112 1113 // exit critical section 1114 hal_restore_irq( save_sr ); 1115 1116 #if DEBUG_THREAD_DELETE 1117 cycle = (uint32_t)hal_get_cycles; 1118 if( DEBUG_THREAD_DELETE < cycle ) 1119 printk("\n[DBG] %s : killer[%x,%x] exit / target[%x,%x] marked after join / cycle %d\n", 1120 __FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid, 1121 target_ptr->process->pid, target_ptr->trdid, cycle ); 1122 #endif 1123 1124 } 1125 } 1126 else // no synchronization with joining thread required 1127 { 1128 // block the target thread 1129 thread_block( target_xp , THREAD_BLOCKED_GLOBAL ); 1130 1131 // set the REQ_DELETE flag in target thread descriptor 1132 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE ); 1133 1134 #if DEBUG_THREAD_DELETE 1135 cycle = (uint32_t)hal_get_cycles; 1136 if( DEBUG_THREAD_DELETE < cycle ) 1137 printk("\n[DBG] %s : killer[%x,%x] exit / target [%x,%x] marked / no join / cycle %d\n", 1138 __FUNCTION__, killer_ptr->process->pid, killer_ptr->trdid, 1139 target_ptr->process->pid, target_ptr->trdid, cycle ); 1140 #endif 1141 1142 } 1143 1143 1144 1144 } // end thread_delete() … … 1155 1155 1156 1156 // force core to low-power mode (optional) 1157 if( CONFIG_ THREAD_IDLE_MODE_SLEEP )1157 if( CONFIG_SCHED_IDLE_MODE_SLEEP ) 1158 1158 { 1159 1159 … … 1354 1354 #if DEBUG_BUSYLOCK 1355 1355 1356 // get root of list of taken busylocks 1356 // scan list of busylocks 1357 xptr_t iter_xp; 1357 1358 xptr_t root_xp = XPTR( local_cxy , &thread->busylocks_root ); 1358 xptr_t iter_xp;1359 1360 // scan list of busylocks1361 1359 XLIST_FOREACH( root_xp , iter_xp ) 1362 1360 { -
trunk/kernel/kern/thread.h
r580 r583 72 72 #define THREAD_FLAG_JOIN_DONE 0x0002 /*! Parent thread made a join request */ 73 73 #define THREAD_FLAG_KILL_DONE 0x0004 /*! This thread received a kill request */ 74 #define THREAD_FLAG_SCHED 0x0008 /*! Scheduling required for this thread */75 74 #define THREAD_FLAG_REQ_ACK 0x0010 /*! Acknowledge required from scheduler */ 76 75 #define THREAD_FLAG_REQ_DELETE 0x0020 /*! Destruction required from scheduler */ … … 334 333 * is marked for delete. This include the thread descriptor itself, the associated 335 334 * CPU and FPU context, and the physical memory allocated for an user thread local stack. 336 * The destroyed thread is removed from the local process th_tbl[] array, and returns337 * true when the destroyed thread was the last thread registered in process.338 335 *************************************************************************************** 339 336 * @ thread : pointer on the thread descriptor to release. 340 337 * @ return true, if the thread was the last registerd thread in local process. 341 338 **************************************************************************************/ 342 bool_tthread_destroy( thread_t * thread );339 void thread_destroy( thread_t * thread ); 343 340 344 341 /*************************************************************************************** … … 390 387 * to asynchronously delete the target thread, at the next scheduling point. 391 388 * The calling thread can run in any cluster, as it uses remote accesses, but 392 * the target thread cannot be the main thread of the process identified by the <pid> ,393 * because the main thread must be deleted by the parent process argument.389 * the target thread cannot be the main thread of the process identified by the <pid> 390 * argument, because the main thread must be deleted by the parent process argument. 394 391 * If the target thread is running in "attached" mode, and the <is_forced> argument 395 392 * is false, this function implements the required sychronisation with the joining 396 * thread, blocking the calling thread until the pthread_join() syscall is executed. 393 * thread, blocking the killer thread until the pthread_join() syscall is executed 394 * by the joining thread. 397 395 *************************************************************************************** 398 396 * @ thread_xp : extended pointer on the target thread.
Note: See TracChangeset
for help on using the changeset viewer.