Changeset 433 for trunk/kernel/kern
- Timestamp:
- Feb 14, 2018, 3:40:19 PM (7 years ago)
- Location:
- trunk/kernel/kern
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/chdev.c
r428 r433 129 129 thread_t * this = CURRENT_THREAD; 130 130 131 chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) enter / cycle %d\n", 132 __FUNCTION__, local_cxy, this->core->lid, thread_type_str(this->type) , hal_time_stamp() ); 131 #if CONFIG_DEBUG_CHDEV_REGISTER_COMMAND 132 uint32_t cycle = (uint32_t)hal_get_cycles(); 133 if( CONFIG_DEBUG_CHDEV_REGISTER_COMMAND < cycle ) 134 printk("\n[DBG] %s : client_thread %x (%s) enter / cycle %d\n", 135 __FUNCTION__, this, thread_type_str(this->type) , cycle ); 136 #endif 133 137 134 138 // get device descriptor cluster and local pointer … … 142 146 // get local pointer on server thread 143 147 server_ptr = (thread_t *)hal_remote_lpt( XPTR( chdev_cxy , &chdev_ptr->server) ); 144 145 chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) / server_cxy %x / server_ptr %x / server_type %\n",146 __FUNCTION__, local_cxy, this->core->lid, server_cxy, server_ptr,147 thread_type_str( hal_remote_lw( XPTR( server_cxy , &server_ptr->type) ) ) );148 148 149 149 // build extended pointer on chdev lock protecting queue … … 178 178 if( different ) dev_pic_send_ipi( chdev_cxy , lid ); 179 179 180 chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) deschedules / cycle %d\n", 181 __FUNCTION__, local_cxy, this->core->lid, thread_type_str(this->type) , hal_time_stamp() ); 180 #if CONFIG_DEBUG_CHDEV_REGISTER_COMMAND 181 cycle = (uint32_t)hal_get_cycles(); 182 if( CONFIG_DEBUG_CHDEV_REGISTER_COMMAND < cycle ) 183 printk("\n[DBG] %s : client_thread %x (%s) exit / cycle %d\n", 184 __FUNCTION__, this, thread_type_str(this->type) , cycle ); 185 #endif 182 186 183 187 // deschedule … … 185 189 sched_yield("blocked on I/O"); 186 190 187 chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) resumes / cycle %d\n",188 __FUNCTION__, local_cxy, this->core->lid, thread_type_str(this->type) , hal_time_stamp() );189 190 191 // exit critical section 191 192 hal_restore_irq( save_sr ); 193 194 #if CONFIG_DEBUG_CHDEV_REGISTER_COMMAND 195 cycle = (uint32_t)hal_get_cycles(); 196 if( CONFIG_DEBUG_CHDEV_REGISTER_COMMAND < cycle ) 197 printk("\n[DBG] %s : client_thread %x (%s) resumes / cycle %d\n", 198 __FUNCTION__, this, thread_type_str(this->type) , cycle ); 199 #endif 192 200 193 201 #if CONFIG_READ_DEBUG … … 209 217 server = CURRENT_THREAD; 210 218 211 chdev_dmsg("\n[DBG] %s : enter / server = %x / chdev = %x / cycle %d\n", 212 __FUNCTION__ , server , chdev , hal_time_stamp() ); 219 #if CONFIG_DEBUG_CHDEV_SEQUENCIAL_SERVER 220 uint32_t cycle = (uint32_t)hal_get_cycles(); 221 if( CONFIG_DEBUG_CHDEV_SEQUENCIAL_SERVER < cycle ) 222 printk("\n[DBG] %s : server_thread %x enter / chdev = %x / cycle %d\n", 223 __FUNCTION__ , server , chdev , cycle ); 224 #endif 213 225 214 226 root_xp = XPTR( local_cxy , &chdev->wait_root ); … … 265 277 thread_unblock( client_xp , THREAD_BLOCKED_IO ); 266 278 267 chdev_dmsg("\n[DBG] %s : thread %x complete operation for client %x / cycle %d\n", 268 __FUNCTION__ , server , client_ptr , hal_time_stamp() ); 279 #if CONFIG_DEBUG_CHDEV_SEQUENCIAL_SERVER 280 cycle = (uint32_t)hal_get_cycles(); 281 if( CONFIG_DEBUG_CHDEV_SEQUENCIAL_SERVER < cycle ) 282 printk("\n[DBG] %s : server_thread %x complete operation for client %x / cycle %d\n", 283 __FUNCTION__ , server , client_ptr , cycle ); 284 #endif 269 285 270 286 #if CONFIG_READ_DEBUG -
trunk/kernel/kern/cluster.c
r428 r433 89 89 spinlock_init( &cluster->kcm_lock ); 90 90 91 cluster_dmsg("\n[DBG] %s for cluster %x enters\n", 92 __FUNCTION__ , local_cxy ); 91 #if CONFIG_DEBUG_CLUSTER_INIT 92 uint32_t cycle = (uint32_t)hal_get_cycles(); 93 if( CONFIG_DEBUG_CLUSTER_INIT < cycle ) 94 printk("\n[DBG] %s enters for cluster %x / cycle %d\n", 95 __FUNCTION__ , local_cxy , cycle ); 96 #endif 93 97 94 98 // initialises DQDT … … 109 113 } 110 114 111 cluster_dmsg("\n[DBG] %s : PPM initialized in cluster %x at cycle %d\n", 112 __FUNCTION__ , local_cxy , hal_get_cycles() ); 115 #if CONFIG_DEBUG_CLUSTER_INIT 116 cycle = (uint32_t)hal_get_cycles(); 117 if( CONFIG_DEBUG_CLUSTER_INIT < cycle ) 118 cluster_dmsg("\n[DBG] %s : PPM initialized in cluster %x / cycle %d\n", 119 __FUNCTION__ , local_cxy , cycle ); 120 #endif 113 121 114 122 // initialises embedded KHM … … 132 140 } 133 141 134 cluster_dmsg("\n[DBG] %s : cores initialized in cluster %x at cycle %d\n", 135 __FUNCTION__ , local_cxy , hal_get_cycles() ); 142 #if CONFIG_DEBUG_CLUSTER_INIT 143 cycle = (uint32_t)hal_get_cycles(); 144 if( CONFIG_DEBUG_CLUSTER_INIT < cycle ) 145 cluster_dmsg("\n[DBG] %s : cores initialized in cluster %x / cycle %d\n", 146 __FUNCTION__ , local_cxy , cycle ); 147 #endif 136 148 137 149 // initialises RPC fifo … … 164 176 } 165 177 166 cluster_dmsg("\n[DBG] %s Process Manager initialized in cluster %x at cycle %d\n", 167 __FUNCTION__ , local_cxy , hal_get_cycles() ); 178 #if CONFIG_DEBUG_CLUSTER_INIT 179 cycle = (uint32_t)hal_get_cycles(); 180 if( CONFIG_DEBUG_CLUSTER_INIT < cycle ) 181 cluster_dmsg("\n[DBG] %s Process Manager initialized in cluster %x / cycle %d\n", 182 __FUNCTION__ , local_cxy , cycle ); 183 #endif 168 184 169 185 hal_fence(); … … 215 231 // Process related functions 216 232 //////////////////////////////////////////////////////////////////////////////////// 233 234 235 ////////////////////////////////////////////////////// 236 xptr_t cluster_get_owner_process_from_pid( pid_t pid ) 237 { 238 xptr_t root_xp; // xptr on root of list of processes in owner cluster 239 xptr_t lock_xp; // xptrr on lock protecting this list 240 xptr_t iter_xp; // iterator 241 xptr_t current_xp; // xptr on current process descriptor 242 process_t * current_ptr; // local pointer on current process 243 pid_t current_pid; // current process identifier 244 bool_t found; 245 246 cluster_t * cluster = LOCAL_CLUSTER; 247 248 // get owner cluster and lpid 249 cxy_t owner_cxy = CXY_FROM_PID( pid ); 250 251 // get lock & root of list of process in owner cluster 252 root_xp = XPTR( owner_cxy , &cluster->pmgr.local_root ); 253 lock_xp = XPTR( owner_cxy , &cluster->pmgr.local_lock ); 254 255 // take the lock protecting the list of processes 256 remote_spinlock_lock( lock_xp ); 257 258 // scan list of processes in owner cluster 259 found = false; 260 XLIST_FOREACH( root_xp , iter_xp ) 261 { 262 current_xp = XLIST_ELEMENT( iter_xp , process_t , local_list ); 263 current_ptr = GET_PTR( current_xp ); 264 current_pid = hal_remote_lw( XPTR( owner_cxy , ¤t_ptr->pid ) ); 265 266 if( current_pid == pid ) 267 { 268 found = true; 269 break; 270 } 271 } 272 273 // release the lock protecting the list of processes 274 remote_spinlock_unlock( lock_xp ); 275 276 // return extended pointer on process descriptor in owner cluster 277 if( found ) return current_xp; 278 else return XPTR_NULL; 279 } 217 280 218 281 ////////////////////////////////////////////////////////// … … 442 505 443 506 // skip one line 444 printk("\n ");507 printk("\n***** processes in cluster %x / cycle %d\n", cxy , (uint32_t)hal_get_cycles() ); 445 508 446 509 // loop on all reference processes in cluster cxy -
trunk/kernel/kern/cluster.h
r428 r433 189 189 190 190 /****************************************************************************************** 191 * This function returns an extended pointer on the process descriptor in owner cluster 192 * from the process PID. This PID can be be different from the calling process PID. 193 * It can be called by any thread running in any cluster, 194 ****************************************************************************************** 195 * @ pid : process identifier. 196 * @ return extended pointer on owner process if found / XPTR_NULL if not found. 197 *****************************************************************************************/ 198 xptr_t cluster_get_owner_process_from_pid( pid_t pid ); 199 200 /****************************************************************************************** 191 201 * This function returns an extended pointer on the reference process descriptor 192 202 * from the process PID. This PID can be be different from the calling process PID. … … 194 204 ****************************************************************************************** 195 205 * @ pid : process identifier. 196 * @ return extended pointer on reference process if success / return XPTR_NULL if error.206 * @ return extended pointer on reference process if found / XPTR_NULL if not found. 197 207 *****************************************************************************************/ 198 208 xptr_t cluster_get_reference_process_from_pid( pid_t pid ); -
trunk/kernel/kern/core.c
r409 r433 75 75 } 76 76 77 /* deprecated 14/08/2017 [AG]78 //////////////////////////////////////79 void core_time_update( core_t * core )80 {81 uint32_t elapsed;82 uint32_t ticks_nr = core->ticks_nr;83 uint64_t cycles = core->cycles;84 uint32_t time_stamp = core->time_stamp;85 uint32_t time_now = hal_get_cycles();86 87 // compute number of elapsed cycles taking into account 32 bits register wrap88 if( time_now < time_stamp ) elapsed = (0xFFFFFFFF - time_stamp) + time_now;89 else elapsed = time_now - time_stamp;90 91 cycles += elapsed;92 ticks_nr = elapsed / core->ticks_period;93 94 core->time_stamp = time_now;95 core->cycles = cycles + elapsed;96 core->ticks_nr = ticks_nr + (elapsed / core->ticks_period);97 hal_fence();98 }99 */100 101 77 //////////////////////////////// 102 78 void core_clock( core_t * core ) … … 136 112 hal_fence(); 137 113 138 #if CONFIG_SHOW_CPU_USAGE139 printk(INFO, "INFO: core %d in cluster %x : busy_percent = %d / cumulated_usage = %d\n",140 core->lid, local_cxy , busy_percent , usage );141 #endif142 143 114 core->ticks_nr = 0; 144 115 idle->ticks_nr = 0; -
trunk/kernel/kern/process.c
r428 r433 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017 )6 * Alain Greiner (2016,2017,2018) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 124 124 model_pid = hal_remote_lw( XPTR( model_cxy , &model_ptr->pid ) ); 125 125 126 process_dmsg("\n[DBG] %s : core[%x,%d] enters / pid = %x / ppid = %x / model_pid = %x\n", 127 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid , parent_pid , model_pid ); 126 #if CONFIG_DEBUG_PROCESS_REFERENCE_INIT 127 uint32_t cycle = (uint32_t)hal_get_cycles(); 128 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT ) 129 printk("\n[DBG] %s : thread %x enter / pid = %x / ppid = %x / model_pid = %x / cycle %d\n", 130 __FUNCTION__ , CURRENT_THREAD , pid , parent_pid , model_pid , cycle ); 131 #endif 128 132 129 133 // initialize PID, REF_XP, PARENT_XP, and STATE 130 process->pid = pid;131 process->ref_xp = XPTR( local_cxy , process );132 process->parent_xp = parent_xp;133 process-> state = PROCESS_STATE_RUNNING;134 process->pid = pid; 135 process->ref_xp = XPTR( local_cxy , process ); 136 process->parent_xp = parent_xp; 137 process->term_state = 0; 134 138 135 139 // initialize vmm as empty … … 137 141 assert( (error == 0) , __FUNCTION__ , "cannot initialize VMM\n" ); 138 142 139 process_dmsg("\n[DBG] %s : core[%x,%d] / vmm inialised as empty for process %x\n", 140 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid ); 143 #if (CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1) 144 cycle = (uint32_t)hal_get_cycles(); 145 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT ) 146 printk("\n[DBG] %s : thread %x / vmm empty for process %x / cycle %d\n", 147 __FUNCTION__ , CURRENT_THREAD , pid , cycle ); 148 #endif 141 149 142 150 // initialize fd_array as empty … … 224 232 remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) ); 225 233 226 process_dmsg("\n[DBG] %s : core[%x,%d] / fd array initialised for process %x\n", 227 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid ); 234 #if (CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1) 235 cycle = (uint32_t)hal_get_cycles(); 236 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT ) 237 printk("\n[DBG] %s : thread %x / fd_array for process %x / cycle %d\n", 238 __FUNCTION__ , CURRENT_THREAD , pid , cycle ); 239 #endif 228 240 229 241 // reset children list root … … 260 272 hal_fence(); 261 273 262 process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n", 263 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid ); 274 #if (CONFIG_DEBUG_PROCESS_REFERENCE_INIT & 1) 275 cycle = (uint32_t)hal_get_cycles(); 276 if( CONFIG_DEBUG_PROCESS_REFERENCE_INIT ) 277 printk("\n[DBG] %s : thread %x exit / process %x / cycle %d\n", 278 __FUNCTION__ , CURRENT_THREAD , pid , cycle ); 279 #endif 264 280 265 281 } // process_reference_init() … … 276 292 277 293 // initialize PID, REF_XP, PARENT_XP, and STATE 278 local_process->pid = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->pid ) ); 279 local_process->parent_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->parent_xp ) ); 280 local_process->ref_xp = reference_process_xp; 281 local_process->state = PROCESS_STATE_RUNNING; 282 283 process_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x\n", 284 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , local_process->pid ); 294 local_process->pid = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->pid ) ); 295 local_process->parent_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->parent_xp ) ); 296 local_process->ref_xp = reference_process_xp; 297 local_process->term_state = 0; 298 299 #if CONFIG_DEBUG_PROCESS_COPY_INIT 300 uint32_t cycle = (uint32_t)hal_get_cycles(); 301 if( CONFIG_DEBUG_PROCESS_COPY_INIT ) 302 printk("\n[DBG] %s : thread %x enter for process %x\n", 303 __FUNCTION__ , CURRENT_THREAD , local_process->pid ); 304 #endif 285 305 286 306 // reset local process vmm … … 327 347 hal_fence(); 328 348 329 process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n", 330 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , local_process->pid ); 349 #if CONFIG_DEBUG_PROCESS_COPY_INIT 350 cycle = (uint32_t)hal_get_cycles(); 351 if( CONFIG_DEBUG_PROCESS_COPY_INIT ) 352 printk("\n[DBG] %s : thread %x exit for process %x\n", 353 __FUNCTION__ , CURRENT_THREAD , local_process->pid ); 354 #endif 331 355 332 356 return 0; … … 347 371 "process %x in cluster %x has still active threads", process->pid , local_cxy ); 348 372 349 process_dmsg("\n[DBG] %s : core[%x,%d] enter for process %x\n", 350 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid ); 373 #if CONFIG_DEBUG_PROCESS_DESTROY 374 uint32_t cycle = (uint32_t)hal_get_cycles(); 375 if( CONFIG_DEBUG_PROCESS_DESTROY ) 376 printk("\n[DBG] %s : thread %x enter to destroy process %x (pid = %x) / cycle %d\n", 377 __FUNCTION__ , CURRENT_THREAD , process, process->pid , cycle ); 378 #endif 351 379 352 380 // get local process manager pointer … … 386 414 xlist_unlink( XPTR( local_cxy , &process->children_list ) ); 387 415 remote_spinlock_unlock( children_lock_xp ); 388 389 // get extende pointer on parent main thread390 parent_thread_xp = XPTR( parent_cxy ,391 hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->th_tbl[1] )));392 393 // unblock parent process main thread394 thread_unblock( parent_thread_xp , THREAD_BLOCKED_WAIT );395 416 } 396 417 … … 411 432 process_free( process ); 412 433 413 process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n", 414 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid ); 434 #if CONFIG_DEBUG_PROCESS_DESTROY 435 cycle = (uint32_t)hal_get_cycles(); 436 if( CONFIG_DEBUG_PROCESS_DESTROY ) 437 printk("\n[DBG] %s : thread %x exit / destroyed process %x (pid = %x) / cycle %d\n", 438 __FUNCTION__ , CURRENT_THREAD , process, process->pid, cycle ); 439 #endif 415 440 416 441 } // end process_destroy() … … 440 465 uint32_t responses; // number of remote process copies 441 466 uint32_t rsp_count; // used to assert number of copies 442 443 467 rpc_desc_t rpc; // rpc descriptor allocated in stack 444 468 445 process_dmsg("\n[DBG] %s : enter to %s process %x in cluster %x\n", 446 __FUNCTION__ , process_action_str( action_type ) , process->pid , local_cxy ); 469 #if CONFIG_DEBUG_PROCESS_SIGACTION 470 uint32_t cycle = (uint32_t)hal_get_cycles(); 471 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 472 printk("\n[DBG] %s : thread %x enter to %s process %x in cluster %x / cycle %d\n", 473 __FUNCTION__ , CURRENT_THREAD, process_action_str( action_type ) , 474 process->pid , local_cxy , cycle ); 475 #endif 447 476 448 477 thread_t * client = CURRENT_THREAD; 449 xptr_t client_xp = XPTR( local_cxy , client );450 478 451 479 // get local pointer on local cluster manager … … 492 520 { 493 521 494 process_dmsg("\n[DBG] %s : send RPC to remote cluster %x\n", 495 __FUNCTION__ , process_cxy ); 522 #if CONFIG_DEBUG_PROCESS_SIGACTION 523 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 524 printk("\n[DBG] %s : send RPC to remote cluster %x\n", __FUNCTION__ , process_cxy ); 525 #endif 496 526 497 527 rpc.args[0] = (uint64_t)action_type; … … 517 547 } 518 548 519 process_dmsg("\n[DBG] %s : make action in owner cluster %x\n", 520 __FUNCTION__ , local_cxy ); 521 549 #if CONFIG_DEBUG_PROCESS_SIGACTION 550 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 551 printk("\n[DBG] %s : make action in owner cluster %x\n", __FUNCTION__ , local_cxy ); 552 #endif 522 553 523 554 // call directly the relevant function in local owner cluster 524 if (action_type == DELETE_ALL_THREADS ) process_delete_threads ( process , client_xp ); 525 else if (action_type == BLOCK_ALL_THREADS ) process_block_threads ( process , client_xp ); 526 else if (action_type == UNBLOCK_ALL_THREADS ) process_unblock_threads( process ); 527 528 process_dmsg("\n[DBG] %s : exit after %s process %x in cluster %x\n", 529 __FUNCTION__ , process_action_str( action_type ) , process->pid , local_cxy ); 555 if (action_type == DELETE_ALL_THREADS ) process_delete_threads ( process ); 556 else if (action_type == BLOCK_ALL_THREADS ) process_block_threads ( process ); 557 else if (action_type == UNBLOCK_ALL_THREADS ) process_unblock_threads( process ); 558 559 #if CONFIG_DEBUG_PROCESS_SIGACTION 560 cycle = (uint32_t)hal_get_cycles(); 561 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 562 printk("\n[DBG] %s : thread %x exit after %s process %x in cluster %x / cycle %d\n", 563 __FUNCTION__ , CURRENT_THREAD, process_action_str( action_type ) , 564 process->pid , local_cxy , cycle ); 565 #endif 530 566 531 567 } // end process_sigaction() 532 568 533 //////////////////////////////////////////////// 534 void process_block_threads( process_t * process, 535 xptr_t client_xp ) 569 ///////////////////////////////////////////////// 570 void process_block_threads( process_t * process ) 536 571 { 537 572 thread_t * target; // pointer on target thread 573 thread_t * this; // pointer on calling thread 538 574 uint32_t ltid; // index in process th_tbl 539 thread_t * requester; // requesting thread pointer540 575 uint32_t count; // requests counter 541 576 volatile uint32_t rsp_count; // responses counter 542 577 543 578 // get calling thread pointer 544 requester = CURRENT_THREAD; 545 546 sigaction_dmsg("\n[DBG] %s : enter for process %x in cluster %x\n", 547 __FUNCTION__ , process->pid , local_cxy ); 579 this = CURRENT_THREAD; 580 581 #if CONFIG_DEBUG_PROCESS_SIGACTION 582 uint32_t cycle = (uint32_t)hal_get_cycles(); 583 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 584 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", 585 __FUNCTION__ , this , process->pid , local_cxy , cycle ); 586 #endif 548 587 549 588 // get lock protecting process th_tbl[] … … 559 598 target = process->th_tbl[ltid]; 560 599 600 assert( (target != this) , __FUNCTION__ , "calling thread cannot be a target\n" ); 601 561 602 if( target != NULL ) // thread found 562 603 { 563 604 count++; 564 605 565 // - if the target thread is the client thread, we do nothing,566 // and we simply decrement the responses counter.567 606 // - if the calling thread and the target thread are on the same core, 568 607 // we block the target thread, we don't need confirmation from scheduler, … … 572 611 // to be sure that the target thread is not running. 573 612 574 if( XPTR( local_cxy , target ) == client_xp ) 575 { 576 // decrement responses counter 577 hal_atomic_add( (void *)&rsp_count , -1 ); 578 } 579 else if( requester->core->lid == target->core->lid ) 613 if( this->core->lid == target->core->lid ) 580 614 { 581 615 // set the global blocked bit in target thread descriptor. … … 612 646 } 613 647 614 sigaction_dmsg("\n[DBG] %s : exit for process %x in cluster %x / %d threads blocked\n", 615 __FUNCTION__ , process->pid , local_cxy , count ); 648 #if CONFIG_DEBUG_PROCESS_SIGACTION 649 cycle = (uint32_t)hal_get_cycles(); 650 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 651 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n", 652 __FUNCTION__ , this , process->pid , local_cxy , cycle ); 653 #endif 616 654 617 655 } // end process_block_threads() … … 621 659 { 622 660 thread_t * target; // pointer on target thead 661 thread_t * this; // pointer on calling thread 623 662 uint32_t ltid; // index in process th_tbl 624 663 uint32_t count; // requests counter 625 664 626 sigaction_dmsg("\n[DBG] %s : enter for process %x in cluster %x\n", 627 __FUNCTION__ , process->pid , local_cxy ); 665 // get calling thread pointer 666 this = CURRENT_THREAD; 667 668 #if CONFIG_DEBUG_PROCESS_SIGACTION 669 uint32_t cycle = (uint32_t)hal_get_cycles(); 670 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 671 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", 672 __FUNCTION__ , this , process->pid , local_cxy , cycle ); 673 #endif 628 674 629 675 // get lock protecting process th_tbl[] … … 636 682 target = process->th_tbl[ltid]; 637 683 684 assert( (target != this) , __FUNCTION__ , "calling thread cannot be a target\n" ); 685 638 686 if( target != NULL ) // thread found 639 687 { … … 648 696 spinlock_unlock( &process->th_lock ); 649 697 650 sigaction_dmsg("\n[DBG] %s : exit for process %x in cluster %x / %d threads blocked\n", 651 __FUNCTION__ , process->pid , local_cxy , count ); 698 #if CONFIG_DEBUG_PROCESS_SIGACTION 699 cycle = (uint32_t)hal_get_cycles(); 700 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 701 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n", 702 __FUNCTION__ , this , process->pid , local_cxy , cycle ); 703 #endif 652 704 653 705 } // end process_unblock_threads() 654 706 655 ///////////////////////////////////////////////// 656 void process_delete_threads( process_t * process, 657 xptr_t client_xp ) 707 ////////////////////////////////////////////////// 708 void process_delete_threads( process_t * process ) 658 709 { 659 710 thread_t * target; // pointer on target thread 711 thread_t * this; // pointer on calling thread 660 712 uint32_t ltid; // index in process th_tbl 661 713 uint32_t count; // request counter 662 663 sigaction_dmsg("\n[DBG] %s : enter for process %x in cluster %x at cycle %d\n", 664 __FUNCTION__ , process->pid , local_cxy , (uint32_t)hal_get_cycles() ); 714 cxy_t owner_cxy; // owner cluster identifier 715 716 // get calling thread pointer 717 this = CURRENT_THREAD; 718 owner_cxy = CXY_FROM_PID( process->pid ); 719 720 #if CONFIG_DEBUG_PROCESS_SIGACTION 721 uint32_t cycle = (uint32_t)hal_get_cycles(); 722 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 723 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", 724 __FUNCTION__ , this , process->pid , local_cxy , cycle ); 725 #endif 665 726 666 727 // get lock protecting process th_tbl[] … … 673 734 target = process->th_tbl[ltid]; 674 735 675 if( target != NULL ) // thread found 736 assert( (target != this) , __FUNCTION__ , "calling thread cannot be a target\n" ); 737 738 if( target != NULL ) // thread found 676 739 { 677 740 count++; 678 679 // delete only if the target is not the client680 if( XPTR( local_cxy , target ) != client_xp )681 { 741 742 // the main thread should not be deleted 743 if( (owner_cxy != local_cxy) || (ltid != 0) ) 744 { 682 745 hal_atomic_or( &target->flags , THREAD_FLAG_REQ_DELETE ); 683 746 } … … 688 751 spinlock_unlock( &process->th_lock ); 689 752 690 sigaction_dmsg("\n[DBG] %s : exit for process %x in cluster %x at cycle %d\n", 691 __FUNCTION__ , process->pid , local_cxy , (uint32_t)hal_get_cycles() ); 753 #if CONFIG_DEBUG_PROCESS_SIGACTION 754 cycle = (uint32_t)hal_get_cycles(); 755 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 756 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n", 757 __FUNCTION__ , this , process->pid , local_cxy , cycle ); 758 #endif 692 759 693 760 } // end process_delete_threads() … … 988 1055 "parent process must be the reference process\n" ); 989 1056 990 fork_dmsg("\n[DBG] %s : core[%x,%d] enter at cycle %d\n", 991 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid , (uint32_t)hal_get_cycles() ); 1057 #if CONFIG_DEBUG_PROCESS_MAKE_FORK 1058 uint32_t cycle = (uint32_t)hal_get_cycles(); 1059 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle ) 1060 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 1061 __FUNCTION__, CURRENT_THREAD, parent_pid, cycle ); 1062 #endif 992 1063 993 1064 // allocate a process descriptor … … 999 1070 return -1; 1000 1071 } 1001 1002 fork_dmsg("\n[DBG] %s : core[%x,%d] created child process %x at cycle %d\n",1003 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process, (uint32_t)hal_get_cycles() );1004 1072 1005 1073 // allocate a child PID from local cluster … … 1012 1080 return -1; 1013 1081 } 1014 1015 fork_dmsg("\n[DBG] %s : core[%x, %d] child process PID = %x at cycle %d\n",1016 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, new_pid , (uint32_t)hal_get_cycles() );1017 1082 1018 1083 // initializes child process descriptor from parent process descriptor … … 1022 1087 parent_process_xp ); 1023 1088 1024 fork_dmsg("\n[DBG] %s : core[%x, %d] child process initialised at cycle %d\n", 1025 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() ); 1089 #if CONFIG_DEBUG_PROCESS_MAKE_FORK 1090 cycle = (uint32_t)hal_get_cycles(); 1091 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle ) 1092 printk("\n[DBG] %s : thread %x created child_process %x / child_pid %x / cycle %d\n", 1093 __FUNCTION__, CURRENT_THREAD, process, new_pid, cycle ); 1094 #endif 1026 1095 1027 1096 // copy VMM from parent descriptor to child descriptor … … 1037 1106 } 1038 1107 1039 fork_dmsg("\n[DBG] %s : core[%x, %d] child process VMM copied at cycle %d\n", 1040 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() ); 1108 #if CONFIG_DEBUG_PROCESS_MAKE_FORK 1109 cycle = (uint32_t)hal_get_cycles(); 1110 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle ) 1111 printk("\n[DBG] %s : thread %x copied VMM from parent %x to child %x / cycle %d\n", 1112 __FUNCTION__ , CURRENT_THREAD , parent_pid, new_pid, cycle ); 1113 #endif 1041 1114 1042 1115 // update extended pointer on .elf file … … 1059 1132 assert( (thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" ); 1060 1133 1061 fork_dmsg("\n[DBG] %s : core[%x,%d] child thread created at cycle %d\n", 1062 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() ); 1063 1064 // update parent process GPT to set Copy_On_Write for shared data vsegs 1134 #if CONFIG_DEBUG_PROCESS_MAKE_FORK 1135 cycle = (uint32_t)hal_get_cycles(); 1136 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle ) 1137 printk("\n[DBG] %s : thread %x created child thread %x / cycle %d\n", 1138 __FUNCTION__ , CURRENT_THREAD, thread, cycle ); 1139 #endif 1140 1141 // set Copy_On_Write flag in parent process GPT 1065 1142 // this includes all replicated GPT copies 1066 1143 if( parent_process_cxy == local_cxy ) // reference is local … … 1074 1151 } 1075 1152 1076 fork_dmsg("\n[DBG] %s : core[%x,%d] COW set in parent_process at cycle %d\n", 1077 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() ); 1153 // set Copy_On_Write flag in child process GPT 1154 vmm_set_cow( process ); 1155 1156 #if CONFIG_DEBUG_PROCESS_MAKE_FORK 1157 cycle = (uint32_t)hal_get_cycles(); 1158 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle ) 1159 printk("\n[DBG] %s : thread %x set COW in parent and child / cycle %d\n", 1160 __FUNCTION__ , CURRENT_THREAD, cycle ); 1161 #endif 1078 1162 1079 1163 // get extended pointers on parent children_root, children_lock and children_nr … … 1092 1176 *child_pid = new_pid; 1093 1177 1094 1095 fork_dmsg("\n[DBG] %s : core[%x,%d] exit at cycle %d\n", 1096 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() ); 1178 #if CONFIG_DEBUG_PROCESS_MAKE_FORK 1179 cycle = (uint32_t)hal_get_cycles(); 1180 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle ) 1181 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 1182 __FUNCTION__, CURRENT_THREAD, cycle ); 1183 #endif 1097 1184 1098 1185 return 0; … … 1105 1192 { 1106 1193 char * path; // pathname to .elf file 1107 pid_t pid; // old_process PID given to new_process1108 pid_t temp_pid; // temporary PID given to old_process1194 pid_t pid; // old_process PID / given to new_process 1195 pid_t temp_pid; // temporary PID / given to old_process 1109 1196 process_t * old_process; // local pointer on old process 1197 thread_t * old_thread; // local pointer on old thread 1110 1198 process_t * new_process; // local pointer on new process 1111 thread_t * new_thread; // local pointer on main thread 1112 pthread_attr_t attr; // main thread attributes 1199 thread_t * new_thread; // local pointer on new thread 1200 xptr_t parent_xp; // extended pointer on parent process 1201 pthread_attr_t attr; // new thread attributes 1113 1202 lid_t lid; // selected core local index 1114 1203 error_t error; 1115 1204 1116 // get .elf pathname and PID from exec_info 1205 // get old_thread / old_process / PID / parent_xp 1206 old_thread = CURRENT_THREAD; 1207 old_process = old_thread->process; 1208 pid = old_process->pid; 1209 parent_xp = old_process->parent_xp; 1210 1211 // get .elf pathname from exec_info 1117 1212 path = exec_info->path; 1118 pid = exec_info->pid;1119 1213 1120 1214 // this function must be executed by a thread running in owner cluster 1121 1215 assert( (CXY_FROM_PID( pid ) == local_cxy), __FUNCTION__, 1122 "local cluster %x is not owner for process %x\n", local_cxy, pid ); 1123 1124 exec_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x / %s / cycle %d\n", 1125 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid, path, (uint32_t)hal_get_cycles() ); 1126 1127 // get old_process local pointer 1128 old_process = (process_t *)cluster_get_local_process_from_pid( pid ); 1129 1130 if( old_process == NULL ) 1131 { 1132 printk("\n[ERROR] in %s : cannot get old process descriptor\n", __FUNCTION__ ); 1133 return -1; 1134 } 1216 "local_cluster must be owner_cluster\n" ); 1217 1218 assert( (LTID_FROM_TRDID( old_thread->trdid ) == 0) , __FUNCTION__, 1219 "must be called by the main thread\n" ); 1220 1221 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC 1222 uint32_t cycle = (uint32_t)hal_get_cycles(); 1223 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle ) 1224 printk("\n[DBG] %s : thread %x enters for process %x / %s / cycle %d\n", 1225 __FUNCTION__, old_thread, pid, path, cycle ); 1226 #endif 1135 1227 1136 1228 // allocate memory for new_process descriptor … … 1144 1236 } 1145 1237 1146 // get a newPID for old_process1238 // get a temporary PID for old_process 1147 1239 error = cluster_pid_alloc( old_process , &temp_pid ); 1148 1240 if( error ) … … 1154 1246 } 1155 1247 1156 // request blocking for all threads in old_process (but the calling thread) 1157 process_sigaction( old_process , BLOCK_ALL_THREADS ); 1158 1159 // request destruction for all threads in old_process (but the calling thread) 1160 process_sigaction( old_process , DELETE_ALL_THREADS ); 1161 1162 exec_dmsg("\n[DBG] %s : core[%x,%d] marked old threads for destruction / cycle %d\n", 1163 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid , (uint32_t)hal_get_cycles() ); 1164 1165 // set new PID to old_process 1248 // set temporary PID to old_process 1166 1249 old_process->pid = temp_pid; 1167 1250 … … 1169 1252 process_reference_init( new_process, 1170 1253 pid, 1171 old_process->parent_xp,// parent_process_xp1172 XPTR(local_cxy , old_process) ); // model_process _xp1254 parent_xp, // parent_process_xp 1255 XPTR(local_cxy , old_process) ); // model_process 1173 1256 1174 1257 // give TXT ownership to new_process 1175 1258 process_txt_set_ownership( XPTR( local_cxy , new_process ) ); 1176 1259 1177 exec_dmsg("\n[DBG] %s : core[%x,%d] initialised new process %x / cycle %d \n", 1178 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, new_process, (uint32_t)hal_get_cycles() ); 1260 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC 1261 cycle = (uint32_t)hal_get_cycles(); 1262 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle ) 1263 printk("\n[DBG] %s : thread %x created new process %x / cycle %d \n", 1264 __FUNCTION__ , old_thread , new_process , cycle ); 1265 #endif 1179 1266 1180 1267 // register code & data vsegs as well as entry-point in new process VMM, … … 1188 1275 } 1189 1276 1190 exec_dmsg("\n[DBG] %s : core[%x,%d] vsegs registered in new process %x / cycle %d\n", 1191 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, new_process, (uint32_t)hal_get_cycles() ); 1277 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC 1278 cycle = (uint32_t)hal_get_cycles(); 1279 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle ) 1280 printk("\n[DBG] %s : thread %x registered code/data vsegs in new process %x / cycle %d\n", 1281 __FUNCTION__, old_thread , new_process->pid , cycle ); 1282 #endif 1192 1283 1193 1284 // select a core in local cluster to execute the main thread … … 1216 1307 assert( (new_thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" ); 1217 1308 1218 exec_dmsg("\n[DBG] %s : core[%x,%d] created new_process main thread / cycle %d\n", 1219 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() ); 1220 1221 // get pointers on parent process 1222 xptr_t parent_xp = new_process->parent_xp; 1309 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC 1310 cycle = (uint32_t)hal_get_cycles(); 1311 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle ) 1312 printk("\n[DBG] %s : thread %x created new_process main thread %x / cycle %d\n", 1313 __FUNCTION__ , old_thread , new_thread , cycle ); 1314 #endif 1315 1316 // get cluster and local pointer on parent process 1223 1317 process_t * parent_ptr = GET_PTR( parent_xp ); 1224 1318 cxy_t parent_cxy = GET_CXY( parent_xp ); … … 1235 1329 remote_spinlock_unlock( lock_xp ); 1236 1330 1237 exec_dmsg("\n[DBG] %s : core[%x,%d] updated parent process children list / cycle %d\n",1238 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );1239 1240 // block and mark calling thread for deletion1241 // only when it is an user thread1242 thread_t * this = CURRENT_THREAD;1243 if( this->type == THREAD_USER )1244 {1245 thread_block( this , THREAD_BLOCKED_GLOBAL );1246 hal_atomic_or( &this->flags , THREAD_FLAG_REQ_DELETE );1247 }1248 1249 1331 // activate new thread 1250 1332 thread_unblock( XPTR( local_cxy , new_thread ) , THREAD_BLOCKED_GLOBAL ); 1251 1333 1334 // request old_thread destruction => old_process destruction 1335 thread_block( old_thread , THREAD_BLOCKED_GLOBAL ); 1336 hal_atomic_or( &old_thread->flags , THREAD_FLAG_REQ_DELETE ); 1337 1252 1338 hal_fence(); 1253 1339 1254 exec_dmsg("\n[DBG] %s : core[%x,%d] exit for path = %s / cycle %d\n", 1255 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path , (uint32_t)hal_get_cycles() ); 1256 1340 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC 1341 cycle = (uint32_t)hal_get_cycles(); 1342 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle ) 1343 printk("\n[DBG] %s : old_thread %x blocked / new_thread %x activated / cycle %d\n", 1344 __FUNCTION__ , old_thread , new_thread , cycle ); 1345 #endif 1346 1257 1347 return 0; 1258 1348 1259 1349 } // end process_make_exec() 1260 1350 1261 /////////////////////////////////////// 1262 void process_make_kill( pid_t pid, 1263 uint32_t sig_id ) 1264 { 1265 // this function must be executed by a thread running in owner cluster 1266 assert( (CXY_FROM_PID( pid ) == local_cxy) , __FUNCTION__ , 1267 "must execute in owner cluster" ); 1268 1351 //////////////////////////////////////////// 1352 void process_make_kill( process_t * process, 1353 bool_t is_exit, 1354 uint32_t exit_status ) 1355 { 1269 1356 thread_t * this = CURRENT_THREAD; 1270 1357 1271 kill_dmsg("\n[DBG] %s : core[%x,%d] enter / process %x / sig %d\n", 1272 __FUNCTION__, local_cxy, this->core->lid, pid , sig_id ); 1273 1274 // get pointer on local target process descriptor 1275 process_t * process = process_get_local_copy( pid ); 1276 1277 // does nothing if process does not exist 1278 if( process == NULL ) 1279 { 1280 printk("\n[WARNING] %s : process %x does not exist => do nothing\n", 1281 __FUNCTION__ , pid ); 1282 return; 1283 } 1284 1285 // analyse signal type 1286 switch( sig_id ) 1287 { 1288 case SIGSTOP: 1289 { 1290 // block all threads in all clusters 1291 process_sigaction( process , BLOCK_ALL_THREADS ); 1292 1293 // remove TXT ownership to target process 1294 process_txt_reset_ownership( XPTR( local_cxy , process ) ); 1295 } 1296 break; 1297 case SIGCONT: // unblock all threads in all clusters 1298 { 1299 process_sigaction( process , UNBLOCK_ALL_THREADS ); 1300 } 1301 break; 1302 case SIGKILL: // block all threads, then delete all threads 1303 { 1304 // block all threads in all clusters 1305 process_sigaction( process , BLOCK_ALL_THREADS ); 1306 1307 // remove TXT ownership to target process 1308 process_txt_reset_ownership( XPTR( local_cxy , process ) ); 1309 1310 // delete all threads (but the calling thread) 1311 process_sigaction( process , DELETE_ALL_THREADS ); 1312 1313 // delete the calling thread if required 1314 if( CURRENT_THREAD->process == process ) 1315 { 1316 // set REQ_DELETE flag 1317 hal_atomic_or( &this->flags , THREAD_FLAG_REQ_DELETE ); 1318 1319 // deschedule 1320 sched_yield( "suicide after kill" ); 1321 } 1322 } 1323 break; 1324 } 1325 1326 kill_dmsg("\n[DBG] %s : core[%x,%d] exit / process %x / sig %d \n", 1327 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid , sig_id ); 1358 assert( (CXY_FROM_PID( process->pid ) == local_cxy) , __FUNCTION__ , 1359 "must be executed in process owner cluster\n" ); 1360 1361 assert( ( this->type == THREAD_RPC ) , __FUNCTION__ , 1362 "must be executed by an RPC thread\n" ); 1363 1364 #if CONFIG_DEBUG_PROCESS_MAKE_KILL 1365 uint32_t cycle = (uint32_t)hal_get_cycles(); 1366 if( CONFIG_DEBUG_PROCESS_MAKE_KILL < cycle ) 1367 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 1368 __FUNCTION__, this , process->pid , cycle ); 1369 #endif 1370 1371 // register exit_status in owner process descriptor 1372 if( is_exit ) process->term_state = exit_status; 1373 1374 // atomically update owner process descriptor flags 1375 if( is_exit ) hal_atomic_or( &process->term_state , PROCESS_FLAG_EXIT ); 1376 else hal_atomic_or( &process->term_state , PROCESS_FLAG_KILL ); 1377 1378 // remove TXT ownership from owner process descriptor 1379 process_txt_reset_ownership( XPTR( local_cxy , process ) ); 1380 1381 // block all process threads in all clusters 1382 process_sigaction( process , BLOCK_ALL_THREADS ); 1383 1384 // mark all process threads in all clusters for delete 1385 process_sigaction( process , DELETE_ALL_THREADS ); 1386 1387 /* unused if sys_wait deschedules without blocking [AG] 1388 1389 // get cluster and pointers on reference parent process 1390 xptr_t parent_xp = process->parent_xp; 1391 process_t * parent_ptr = GET_PTR( parent_xp ); 1392 cxy_t parent_cxy = GET_CXY( parent_xp ); 1393 1394 // get loal pointer on parent main thread 1395 thread_t * main_ptr = hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->th_tbl[0] ) ); 1396 1397 // reset THREAD_BLOCKED_WAIT bit in parent process main thread 1398 thread_unblock( XPTR( parent_cxy , main_ptr ) , THREAD_BLOCKED_WAIT ); 1399 */ 1400 1401 #if CONFIG_DEBUG_PROCESS_MAKE_KILL 1402 cycle = (uint32_t)hal_get_cycles(); 1403 if( CONFIG_DEBUG_PROCESS_MAKE_KILL < cycle ) 1404 printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n", 1405 __FUNCTION__, this, process->pid , cycle ); 1406 #endif 1328 1407 1329 1408 } // end process_make_kill() 1330 1331 /////////////////////////////////////////1332 void process_make_exit( pid_t pid,1333 uint32_t status )1334 {1335 // this function must be executed by a thread running in owner cluster1336 assert( (CXY_FROM_PID( pid ) == local_cxy) , __FUNCTION__ ,1337 "must execute in owner cluster" );1338 1339 // get pointer on local process descriptor1340 process_t * process = process_get_local_copy( pid );1341 1342 // does nothing if process does not exist1343 if( process == NULL )1344 {1345 printk("\n[WARNING] %s : process %x does not exist => do nothing\n",1346 __FUNCTION__ , pid );1347 return;1348 }1349 1350 // block all threads in all clusters (but the calling thread)1351 process_sigaction( process , BLOCK_ALL_THREADS );1352 1353 // delete all threads in all clusters (but the calling thread)1354 process_sigaction( process , DELETE_ALL_THREADS );1355 1356 // delete the calling thread1357 hal_atomic_or( &CURRENT_THREAD->flags , THREAD_FLAG_REQ_DELETE );1358 1359 // deschedule1360 sched_yield( "suicide after exit" );1361 1362 } // end process_make_exit()1363 1409 1364 1410 /////////////////////////////////////////////// … … 1366 1412 { 1367 1413 1368 process_dmsg("\n[DBG] %s : core[%x,%d] enter at cycle %d\n", 1369 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() ); 1414 #if CONFIG_DEBUG_PROCESS_ZERO_CREATE 1415 uint32_t cycle = (uint32_t)hal_get_cycles(); 1416 if( CONFIG_DEBUG_PROCESS_ZERO_CREATE < cycle ) 1417 printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 1418 #endif 1370 1419 1371 1420 // initialize PID, REF_XP, PARENT_XP, and STATE 1372 process->pid = 0;1373 process->ref_xp = XPTR( local_cxy , process );1374 process->parent_xp = XPTR_NULL;1375 process-> state = PROCESS_STATE_RUNNING;1421 process->pid = 0; 1422 process->ref_xp = XPTR( local_cxy , process ); 1423 process->parent_xp = XPTR_NULL; 1424 process->term_state = 0; 1376 1425 1377 1426 // reset th_tbl[] array as empty … … 1391 1440 hal_fence(); 1392 1441 1393 process_dmsg("\n[DBG] %s : core[%x,%d] exit at cycle %d\n", 1394 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , (uint32_t)hal_get_cycles() ); 1442 #if CONFIG_DEBUG_PROCESS_ZERO_CREATE 1443 cycle = (uint32_t)hal_get_cycles(); 1444 if( CONFIG_DEBUG_PROCESS_ZERO_CREATE < cycle ) 1445 printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 1446 #endif 1395 1447 1396 1448 } // end process_zero_init() … … 1406 1458 error_t error; 1407 1459 1408 process_dmsg("\n[DBG] %s : core[%x,%d] enters at cycle %d\n", 1409 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid ); 1460 #if CONFIG_DEBUG_PROCESS_INIT_CREATE 1461 uint32_t cycle = (uint32_t)hal_get_cycles(); 1462 if( CONFIG_DEBUG_PROCESS_INIT_CREATE < cycle ) 1463 printk("\n[DBG] %s : thread %x enter / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 1464 #endif 1410 1465 1411 1466 // allocates memory for process descriptor from local cluster … … 1434 1489 XPTR( local_cxy , &process_zero ), // parent 1435 1490 XPTR( local_cxy , &process_zero ) ); // model 1436 1437 process_dmsg("\n[DBG] %s : core[%x,%d] / initialisation done\n",1438 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid );1439 1491 1440 1492 // register "code" and "data" vsegs as well as entry-point … … 1446 1498 process_destroy( process ); 1447 1499 } 1448 1449 process_dmsg("\n[DBG] %s : core[%x,%d] vsegs registered / path = %s\n",1450 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, CONFIG_PROCESS_INIT_PATH );1451 1500 1452 1501 // get extended pointers on process_zero children_root, children_lock … … 1489 1538 hal_fence(); 1490 1539 1491 process_dmsg("\n[DBG] %s : core[%x,%d] exit / main thread = %x\n", 1492 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, thread ); 1540 #if CONFIG_DEBUG_PROCESS_INIT_CREATE 1541 cycle = (uint32_t)hal_get_cycles(); 1542 if( CONFIG_DEBUG_PROCESS_INIT_CREATE < cycle ) 1543 printk("\n[DBG] %s : thread %x exit / cycle %d\n", __FUNCTION__, CURRENT_THREAD, cycle ); 1544 #endif 1493 1545 1494 1546 } // end process_init_create() 1495 1496 //////////////////////////////////////////1497 char * process_state_str( uint32_t state )1498 {1499 if ( state == PROCESS_STATE_RUNNING ) return "RUNNING";1500 else if( state == PROCESS_STATE_KILLED ) return "KILLED";1501 else if( state == PROCESS_STATE_EXITED ) return "EXITED";1502 else return "undefined";1503 }1504 1547 1505 1548 ///////////////////////////////////////// … … 1542 1585 // get PID and state 1543 1586 pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) ); 1544 state = hal_remote_lw( XPTR( process_cxy , &process_ptr-> state ) );1587 state = hal_remote_lw( XPTR( process_cxy , &process_ptr->term_state ) ); 1545 1588 1546 1589 // get PPID … … 1577 1620 if( owner_xp == process_xp ) 1578 1621 { 1579 printk("PID %X | PPID %X | %s\t| %s (FG) | %X | %d | %s\n",1580 pid, ppid, process_state_str(state), txt_name, process_ptr, th_nr, elf_name );1622 printk("PID %X | PPID %X | STS %X | %s (FG) | %X | %d | %s\n", 1623 pid, ppid, state, txt_name, process_ptr, th_nr, elf_name ); 1581 1624 } 1582 1625 else 1583 1626 { 1584 printk("PID %X | PPID %X | %s\t| %s (BG) | %X | %d | %s\n",1585 pid, ppid, process_state_str(state), txt_name, process_ptr, th_nr, elf_name );1627 printk("PID %X | PPID %X | STS %X | %s (BG) | %X | %d | %s\n", 1628 pid, ppid, state, txt_name, process_ptr, th_nr, elf_name ); 1586 1629 } 1587 1630 } // end process_display() … … 1632 1675 xptr_t lock_xp; // extended pointer on list lock in chdev 1633 1676 1634 process_dmsg("\n[DBG] %s : core[%x,%d] enter for process %x at cycle\n", 1635 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid, (uint32_t)hal_get_cycles() ); 1677 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH 1678 uint32_t cycle = (uint32_t)hal_get_cycles(); 1679 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle ) 1680 printk("\n[DBG] %s : thread %x enter for process %x / txt_id = %d / cycle %d\n", 1681 __FUNCTION__, CURRENT_THREAD, process->pid, txt_id, cycle ); 1682 #endif 1636 1683 1637 1684 // check process is reference … … 1657 1704 remote_spinlock_unlock( lock_xp ); 1658 1705 1659 process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x at cycle\n", 1660 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid, (uint32_t)hal_get_cycles() ); 1706 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH 1707 cycle = (uint32_t)hal_get_cycles(); 1708 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle ) 1709 printk("\n[DBG] %s : thread %x exit for process %x / txt_id = %d / cycle %d\n", 1710 __FUNCTION__, CURRENT_THREAD, process->pid, txt_id , cycle ); 1711 #endif 1661 1712 1662 1713 } // end process_txt_attach() … … 1670 1721 xptr_t lock_xp; // extended pointer on list lock in chdev 1671 1722 1672 process_dmsg("\n[DBG] %s : core[%x,%d] enter for process %x at cycle\n", 1673 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid, (uint32_t)hal_get_cycles() ); 1723 #if CONFIG_DEBUG_PROCESS_TXT_DETACH 1724 uint32_t cycle = (uint32_t)hal_get_cycles(); 1725 if( CONFIG_DEBUG_PROCESS_TXT_DETACH < cycle ) 1726 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 1727 __FUNCTION__, CURRENT_THREAD, process->pid , cycle ); 1728 #endif 1674 1729 1675 1730 // check process is reference … … 1690 1745 remote_spinlock_unlock( lock_xp ); 1691 1746 1692 process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x at cycle %d\n", 1693 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid, (uint32_t)hal_get_cycles() ); 1747 #if CONFIG_DEBUG_PROCESS_TXT_DETACH 1748 cycle = (uint32_t)hal_get_cycles(); 1749 if( CONFIG_DEBUG_PROCESS_TXT_DETACH < cycle ) 1750 printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n", 1751 __FUNCTION__, CURRENT_THREAD, process->pid, cycle ); 1752 #endif 1694 1753 1695 1754 } // end process_txt_detach() … … 1732 1791 xptr_t file_xp; // extended pointer on TXT_RX pseudo file 1733 1792 xptr_t txt_xp; // extended pointer on TXT_RX chdev 1734 chdev_t * txt_ptr; 1735 cxy_t txt_cxy; 1793 chdev_t * txt_ptr; // local pointer on TXT_RX chdev 1794 cxy_t txt_cxy; // cluster of TXT_RX chdev 1795 uint32_t txt_id; // TXT_RX channel 1736 1796 xptr_t owner_xp; // extended pointer on current TXT_RX owner 1737 1797 xptr_t root_xp; // extended pointer on root of attached process list 1738 1798 xptr_t iter_xp; // iterator for xlist 1739 1799 xptr_t current_xp; // extended pointer on current process 1740 process_t * current_ptr; 1741 cxy_t current_cxy; 1742 pid_t ppid; 1800 process_t * current_ptr; // local pointer on current process 1801 cxy_t current_cxy; // cluster for current process 1802 pid_t ppid; // parent process identifier for current process 1743 1803 1744 1804 // get cluster and local pointer on process … … 1752 1812 txt_xp = chdev_from_file( file_xp ); 1753 1813 txt_cxy = GET_CXY( txt_xp ); 1754 txt_ptr = (chdev_t *)GET_PTR( txt_xp );1755 1756 // get extended pointer on TXT_RX owner 1814 txt_ptr = GET_PTR( txt_xp ); 1815 1816 // get extended pointer on TXT_RX owner and TXT channel 1757 1817 owner_xp = hal_remote_lwd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) ); 1818 txt_id = hal_remote_lw ( XPTR( txt_cxy , &txt_ptr->channel ) ); 1758 1819 1759 1820 // transfer ownership to KSH if required 1760 if( owner_xp == process_xp)1821 if( (owner_xp == process_xp) && (txt_id > 0) ) 1761 1822 { 1762 1823 // get extended pointer on root of list of attached processes … … 1782 1843 } 1783 1844 } 1784 } 1785 1786 assert( false , __FUNCTION__ , "KSH process not found" ); 1787 1845 1846 assert( false , __FUNCTION__ , "KSH process not found" ); 1847 } 1788 1848 } // end process_txt_reset_ownership() 1789 1849 -
trunk/kernel/kern/process.h
r428 r433 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017 )6 * Alain Greiner (2016,2017,2018) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 65 65 66 66 /********************************************************************************************* 67 * This enum defines the process states for ALMOS_MKH. 68 ********************************************************************************************/ 69 70 enum process_states 71 { 72 PROCESS_STATE_RUNNING = 0, /*! process is executing */ 73 PROCESS_STATE_STOPPED = 1, /*! process has been stopped by a signal */ 74 PROCESS_STATE_KILLED = 2, /*! process has been killed by a signal */ 75 PROCESS_STATE_EXITED = 3, /*! process terminated with an exit */ 76 }; 67 * The termination state is a 32 bits word: 68 * - the 8 LSB bits contain the user defined exit status 69 * - the 24 other bits contain the flags defined below 70 ********************************************************************************************/ 71 72 #define PROCESS_FLAG_BLOCK 0x100 /*! process received as SIGSTOP signal */ 73 #define PROCESS_FLAG_KILL 0x200 /*! process terminated by a sys_kill() */ 74 #define PROCESS_FLAG_EXIT 0x400 /*! process terminated by a sys_exit() */ 75 #define PROCESS_FLAG_WAIT 0x800 /*! parent process executed successfully a sys_wait() */ 77 76 78 77 /********************************************************************************************* … … 118 117 * 6) The <local_list>, <copies_list>, <th_tbl>, <th_nr>, <th_lock> fields 119 118 * are defined in all process descriptors copies. 119 * 7) The termination <flags> and <exit_status> are only defined in the reference cluster. 120 120 ********************************************************************************************/ 121 121 … … 155 155 remote_spinlock_t sync_lock; /*! lock protecting sem,mutex,barrier,condvar lists */ 156 156 157 uint32_t state; /*! RUNNING / STOPPED / KILLED / EXITED*/157 uint32_t term_state; /*! termination status (flags & exit status) */ 158 158 159 159 bool_t txt_owner; /*! current TXT owner */ … … 168 168 typedef struct exec_info_s 169 169 { 170 pid_t pid; /*! process identifier (both parent and child) */171 172 170 char path[CONFIG_VFS_MAX_PATH_LENGTH]; /*! .elf file path */ 173 171 … … 276 274 277 275 /********************************************************************************************* 278 * This function returns a printable string defining the process state.279 *********************************************************************************************280 * @ state : RUNNING / BLOCKED / EXITED / KILLED281 * @ return a string pointer.282 ********************************************************************************************/283 char * process_state_str( uint32_t state );284 285 /*********************************************************************************************286 276 * This debug function diplays on the kernel terminal TXT0 detailed informations on a 287 277 * reference process identified by the <process_xp> argument. … … 324 314 325 315 /********************************************************************************************* 326 * This function blocks all threads (but the client thread defined by the <client_xp>327 * argument) for a given <process> in a given cluster.316 * This function blocks all threads for a given <process> in a given cluster. 317 * The calling thread cannot be a target thread. 328 318 * It loops on all local threads of the process, set the THREAD_BLOCKED_GLOBAL bit, 329 319 * and request the relevant schedulers to acknowledge the blocking, using IPI if required. … … 332 322 ********************************************************************************************* 333 323 * @ process : pointer on the target process descriptor. 334 * @ client_xp : extended pointer on the client thread, that should not be blocked. 335 ********************************************************************************************/ 336 void process_block_threads( process_t * process, 337 xptr_t client_xp ); 324 ********************************************************************************************/ 325 void process_block_threads( process_t * process ); 338 326 339 327 /********************************************************************************************* … … 345 333 346 334 /********************************************************************************************* 347 * This function delete all threads, (but the client thread defined by the <client_xp> 348 * argument) for a given <process> in a given cluster. 335 * This function marks for deletion all threads - but one _ for a given <process> 336 * in a given cluster. The main thread in owner cluster is NOT marked. 337 * It will be marked for deletion by the parent process sys_wait(). 338 * The calling thread cannot be a target thread. 349 339 * It loops on all local threads of the process, and set the THREAD_FLAG_REQ_DELETE bit. 350 340 * For each marked thread, the following actions will be done by the scheduler at the next … … 357 347 ********************************************************************************************* 358 348 * @ process : pointer on the process descriptor. 359 * @ client_xp : extended pointer on the client thread, that should not be deleted. 360 ********************************************************************************************/ 361 void process_delete_threads( process_t * process, 362 xptr_t client_xp ); 349 ********************************************************************************************/ 350 void process_delete_threads( process_t * process ); 363 351 364 352 /********************************************************************************************* … … 396 384 * associated "child" thread descriptor in the local cluster. This function can involve 397 385 * up to three different clusters : 398 * - the local (child) cluster can be any cluster defined by the sys_fork function.386 * - the child (local) cluster can be any cluster defined by the sys_fork function. 399 387 * - the parent cluster must be the reference cluster for the parent process. 400 388 * - the client cluster containing the thread requesting the fork can be any cluster. … … 416 404 417 405 /********************************************************************************************* 418 * This function implement the "exit" system call, and is called by the sys_exit() function. 419 * It must be executed by a thread running in the calling process owner cluster. 420 * It uses twice the multicast RPC_PROCESS_SIGNAL to first block all process threads 421 * in all clusters, and then delete all threads and process descriptors. 422 ********************************************************************************************* 423 * @ pid : process identifier. 424 * @ status : exit return value. 425 ********************************************************************************************/ 426 void process_make_exit( pid_t pid, 427 uint32_t status ); 428 429 /********************************************************************************************* 430 * This function implement the "kill" system call, and is called by the sys_kill() function. 431 * It must be executed by a thread running in the target process owner cluster. 432 * Only the SIGKILL, SIGSTOP, and SIGCONT signals are supported. 433 * User defined handlers are not supported. 434 * It uses once or twice the multicast RPC_PROCESS_SIGNAL to block, unblock or delete 435 * all process threads in all clusters, and then delete process descriptors. 436 ********************************************************************************************* 437 * @ pid : process identifier. 438 * @ sig_id : signal type. 439 ********************************************************************************************/ 440 void process_make_kill( pid_t pid, 441 uint32_t sig_id ); 406 * This function is called by both the sys_kill() and sys_exit() system calls. 407 * It must be executed by an RPC thread running in the target process owner cluster. 408 * It uses twice the process_sigaction() function: 409 * - first, to block all target process threads, in all clusters. 410 * - second, to delete all target process threads in all clusters. 411 * Finally, it synchronizes with the parent process sys_wait() function that MUST be called 412 * by the parent process main thread. 413 ********************************************************************************************* 414 * @ process : pointer on process descriptor in owner cluster. 415 * @ is_exit : true when called by sys_exit() / false when called by sys_kill(). 416 * @ exit_status : exit status, when called by sys_exit(). 417 ********************************************************************************************/ 418 void process_make_kill( process_t * process, 419 bool_t is_exit, 420 uint32_t exit_status ); 442 421 443 422 -
trunk/kernel/kern/rpc.c
r428 r433 42 42 #include <rpc.h> 43 43 44 45 ///////////////////////////////////////////////////////////////////////////////////////// 46 // Debug macros for marshalling functions 47 ///////////////////////////////////////////////////////////////////////////////////////// 48 49 #if CONFIG_DEBUG_RPC_MARSHALING 50 51 #define RPC_DEBUG_ENTER \ 52 uint32_t cycle = (uint32_t)hal_get_cycles(); \ 53 if( cycle > CONFIG_DEBUG_RPC_MARSHALING ) \ 54 printk("\n[DBG] %s : enter thread %x on core[%x,%d] / cycle %d\n", \ 55 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, CURRENT_THREAD->core->lid , cycle ); 56 57 #define RPC_DEBUG_EXIT \ 58 cycle = (uint32_t)hal_get_cycles(); \ 59 if( cycle > CONFIG_DEBUG_RPC_MARSHALING ) \ 60 printk("\n[DBG] %s : exit thread %x on core[%x,%d] / cycle %d\n", \ 61 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, CURRENT_THREAD->core->lid , cycle ); 62 63 #else 64 65 #define RPC_DEBUG_ENTER 66 67 #define RPC_DEBUG_EXIT 68 69 #endif 70 44 71 ///////////////////////////////////////////////////////////////////////////////////////// 45 72 // array of function pointers (must be consistent with enum in rpc.h) … … 50 77 &rpc_pmem_get_pages_server, // 0 51 78 &rpc_pmem_release_pages_server, // 1 52 &rpc_ process_make_exec_server, // 279 &rpc_undefined, // 2 unused slot 53 80 &rpc_process_make_fork_server, // 3 54 &rpc_ process_make_exit_server, // 481 &rpc_undefined, // 4 unused slot 55 82 &rpc_process_make_kill_server, // 5 56 83 &rpc_thread_user_create_server, // 6 … … 68 95 &rpc_vfs_mapper_load_all_server, // 17 69 96 &rpc_fatfs_get_cluster_server, // 18 70 &rpc_undefined, // 19 97 &rpc_undefined, // 19 unused slot 71 98 72 99 &rpc_vmm_get_vseg_server, // 20 … … 497 524 498 525 ///////////////////////////////////////////////////////////////////////////////////////// 499 // [2] Marshaling functions attached to RPC_PROCESS_MAKE_EXEC (blocking) 500 ///////////////////////////////////////////////////////////////////////////////////////// 501 502 ///////////////////////////////////////////////////// 503 void rpc_process_make_exec_client( cxy_t cxy, 504 exec_info_t * info, // in 505 error_t * error ) // out 506 { 507 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 508 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 509 CURRENT_THREAD->core->lid , hal_time_stamp() ); 510 511 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 512 513 // initialise RPC descriptor header 514 rpc_desc_t rpc; 515 rpc.index = RPC_PROCESS_MAKE_EXEC; 516 rpc.response = 1; 517 rpc.blocking = true; 518 519 // set input arguments in RPC descriptor 520 rpc.args[0] = (uint64_t)(intptr_t)info; 521 522 // register RPC request in remote RPC fifo (blocking function) 523 rpc_send( cxy , &rpc ); 524 525 // get output arguments from RPC descriptor 526 *error = (error_t)rpc.args[1]; 527 528 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 529 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 530 CURRENT_THREAD->core->lid , hal_time_stamp() ); 531 } 532 533 ////////////////////////////////////////////// 534 void rpc_process_make_exec_server( xptr_t xp ) 535 { 536 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 537 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 538 CURRENT_THREAD->core->lid , hal_time_stamp() ); 539 540 exec_info_t * ptr; // local pointer on remote exec_info structure 541 exec_info_t info; // local copy of exec_info structure 542 error_t error; // local error error status 543 544 // get client cluster identifier and pointer on RPC descriptor 545 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); 546 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp ); 547 548 // get pointer on exec_info structure in client cluster from RPC descriptor 549 ptr = (exec_info_t*)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) ); 550 551 // copy exec_info structure from client buffer to server buffer 552 hal_remote_memcpy( XPTR( client_cxy , ptr ), 553 XPTR( local_cxy , &info ), 554 sizeof(exec_info_t) ); 555 556 // call local kernel function 557 error = process_make_exec( &info ); 558 559 // set output argument into client RPC descriptor 560 hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); 561 562 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 563 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 564 CURRENT_THREAD->core->lid , hal_time_stamp() ); 565 } 526 // [2] undefined slot 527 ///////////////////////////////////////////////////////////////////////////////////////// 566 528 567 529 ///////////////////////////////////////////////////////////////////////////////////////// … … 644 606 645 607 ///////////////////////////////////////////////////////////////////////////////////////// 646 // [4] Marshaling functions attached to RPC_PROCESS_MAKE_EXIT (blocking) 608 // [4] undefined slot 609 ///////////////////////////////////////////////////////////////////////////////////////// 610 611 ///////////////////////////////////////////////////////////////////////////////////////// 612 // [5] Marshaling functions attached to RPC_PROCESS_MAKE_KILL (blocking) 647 613 ///////////////////////////////////////////////////////////////////////////////////////// 648 614 649 615 /////////////////////////////////////////////////// 650 void rpc_process_make_exit_client( cxy_t cxy, 651 pid_t pid, 616 void rpc_process_make_kill_client( cxy_t cxy, 617 process_t * process, 618 bool_t is_exit, 652 619 uint32_t status ) 653 620 { … … 656 623 CURRENT_THREAD->core->lid , hal_time_stamp() ); 657 624 658 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 659 660 // initialise RPC descriptor header 661 rpc_desc_t rpc; 662 rpc.index = RPC_PROCESS_MAKE_EXIT; 625 // initialise RPC descriptor header 626 rpc_desc_t rpc; 627 rpc.index = RPC_PROCESS_MAKE_KILL; 663 628 rpc.response = 1; 664 629 rpc.blocking = true; 665 630 666 631 // set input arguments in RPC descriptor 667 rpc.args[0] = (uint64_t)pid; 668 rpc.args[1] = (uint64_t)status; 632 rpc.args[0] = (uint64_t)(intptr_t)process; 633 rpc.args[1] = (uint64_t)is_exit; 634 rpc.args[2] = (uint64_t)status; 669 635 670 636 // register RPC request in remote RPC fifo (blocking function) … … 677 643 678 644 ////////////////////////////////////////////// 679 void rpc_process_make_ exit_server( xptr_t xp )645 void rpc_process_make_kill_server( xptr_t xp ) 680 646 { 681 647 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", … … 683 649 CURRENT_THREAD->core->lid , hal_time_stamp() ); 684 650 685 pid_t pid; 686 uint32_t status; 651 process_t * process; 652 bool_t is_exit; 653 uint32_t status; 687 654 688 655 // get client cluster identifier and pointer on RPC descriptor … … 691 658 692 659 // get arguments from RPC descriptor 693 pid = (uint32_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) ); 694 status = (uint32_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) ); 660 process = (process_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) ); 661 is_exit = (bool_t) hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) ); 662 status = (uint32_t) hal_remote_lwd( XPTR( client_cxy , &desc->args[2] ) ); 695 663 696 664 // call local kernel function 697 process_make_ exit( pid, status );665 process_make_kill( process , is_exit , status ); 698 666 699 667 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", … … 703 671 704 672 ///////////////////////////////////////////////////////////////////////////////////////// 705 // [5] Marshaling functions attached to RPC_PROCESS_MAKE_KILL (blocking) 706 ///////////////////////////////////////////////////////////////////////////////////////// 707 708 /////////////////////////////////////////////////// 709 void rpc_process_make_kill_client( cxy_t cxy, 710 pid_t pid, 711 uint32_t sig_id ) 712 { 713 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 714 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 715 CURRENT_THREAD->core->lid , hal_time_stamp() ); 716 717 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 718 719 // initialise RPC descriptor header 720 rpc_desc_t rpc; 721 rpc.index = RPC_PROCESS_MAKE_KILL; 722 rpc.response = 1; 723 rpc.blocking = true; 724 725 // set input arguments in RPC descriptor 726 rpc.args[0] = (uint64_t)pid; 727 rpc.args[1] = (uint64_t)sig_id; 728 729 // register RPC request in remote RPC fifo (blocking function) 730 rpc_send( cxy , &rpc ); 731 732 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 733 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 734 CURRENT_THREAD->core->lid , hal_time_stamp() ); 735 } 736 737 ////////////////////////////////////////////// 738 void rpc_process_make_kill_server( xptr_t xp ) 739 { 740 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 741 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 742 CURRENT_THREAD->core->lid , hal_time_stamp() ); 743 744 pid_t pid; 745 uint32_t sig_id; 746 747 // get client cluster identifier and pointer on RPC descriptor 748 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); 749 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp ); 750 751 // get arguments from RPC descriptor 752 pid = (uint32_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) ); 753 sig_id = (uint32_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) ); 754 755 // call local kernel function 756 process_make_exit( pid , sig_id ); 757 758 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 759 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 760 CURRENT_THREAD->core->lid , hal_time_stamp() ); 761 } 762 763 ///////////////////////////////////////////////////////////////////////////////////////// 764 // [6] Marshaling functions attached to RPC_THREAD_USER_CREATE (blocking) 673 // [6] Marshaling functions attached to RPC_THREAD_USER_CREATE (blocking) 765 674 ///////////////////////////////////////////////////////////////////////////////////////// 766 675 … … 1036 945 1037 946 // call relevant kernel function 1038 if (action == DELETE_ALL_THREADS ) process_delete_threads ( process , client_xp);1039 else if (action == BLOCK_ALL_THREADS ) process_block_threads ( process , client_xp);1040 else if (action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process 947 if (action == DELETE_ALL_THREADS ) process_delete_threads ( process ); 948 else if (action == BLOCK_ALL_THREADS ) process_block_threads ( process ); 949 else if (action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process ); 1041 950 1042 951 // decrement the responses counter in RPC descriptor, -
trunk/kernel/kern/rpc.h
r428 r433 62 62 RPC_PMEM_GET_PAGES = 0, 63 63 RPC_PMEM_RELEASE_PAGES = 1, 64 RPC_ PROCESS_MAKE_EXEC= 2,64 RPC_UNDEFINED_2 = 2, 65 65 RPC_PROCESS_MAKE_FORK = 3, 66 RPC_ PROCESS_MAKE_EXIT= 4,66 RPC_UNDEFINED_4 = 4, 67 67 RPC_PROCESS_MAKE_KILL = 5, 68 68 RPC_THREAD_USER_CREATE = 6, … … 80 80 RPC_VFS_MAPPER_LOAD_ALL = 17, 81 81 RPC_FATFS_GET_CLUSTER = 18, 82 RPC_UNDEFINED_19 = 19, 82 83 83 84 RPC_VMM_GET_VSEG = 20, … … 210 211 211 212 /*********************************************************************************** 212 * [2] The RPC_PROCESS_MAKE_EXEC creates a new process descriptor, from an existing 213 * process descriptor in a remote server cluster. This server cluster must be 214 * the owner cluster for the existing process. The new process descriptor is 215 * initialized from informations found in the <exec_info> structure. 216 * A new main thread descriptor is created in the server cluster. 217 * All copies of the old process descriptor and all old threads are destroyed. 218 *********************************************************************************** 219 * @ cxy : server cluster identifier. 220 * @ process : [in] local pointer on the exec_info structure in client cluster. 221 * @ error : [out] error status (0 if success). 222 **********************************************************************************/ 223 void rpc_process_make_exec_client( cxy_t cxy, 224 struct exec_info_s * info, 225 error_t * error ); 226 227 void rpc_process_make_exec_server( xptr_t xp ); 213 * [2] undefined slot 214 **********************************************************************************/ 228 215 229 216 /*********************************************************************************** … … 251 238 252 239 /*********************************************************************************** 253 * [4] The RPC_PROCESS_MAKE_EXIT can be called by any thread to request the owner 254 * cluster to execute the process_make_exit() function for the target process. 255 *********************************************************************************** 256 * @ cxy : owner cluster identifier. 257 * @ pid : target process identifier. 258 * @ status : calling process exit status. 259 **********************************************************************************/ 260 void rpc_process_make_exit_client( cxy_t cxy, 261 pid_t pid, 262 uint32_t status ); 263 264 void rpc_process_make_exit_server( xptr_t xp ); 240 * [4] undefined slot 241 **********************************************************************************/ 265 242 266 243 /*********************************************************************************** … … 269 246 *********************************************************************************** 270 247 * @ cxy : owner cluster identifier. 271 * @ pid : target process identifier. 272 * @ seg_id : signal type (only SIGKILL / SIGSTOP / SIGCONT are supported). 248 * @ process : pointer on process in owner cluster. 249 * @ is_exit : true if called by sys_exit() / false if called by sys_kill() 250 * @ status : exit status (only when called by sys_exit() 273 251 **********************************************************************************/ 274 252 void rpc_process_make_kill_client( cxy_t cxy, 275 pid_t pid, 276 uint32_t seg_id ); 253 struct process_s * process, 254 bool_t is_exit, 255 uint32_t status ); 277 256 278 257 void rpc_process_make_kill_server( xptr_t xp ); … … 517 496 518 497 /*********************************************************************************** 498 * [19] undefined slot 499 **********************************************************************************/ 500 501 /*********************************************************************************** 519 502 * [20] The RPC_VMM_GET_VSEG returns an extended pointer 520 503 * on the vseg containing a given virtual address in a given process. -
trunk/kernel/kern/scheduler.c
r428 r433 178 178 179 179 /////////////////////////////////////////// 180 void sched_handle_ requests( core_t * core )180 void sched_handle_signals( core_t * core ) 181 181 { 182 182 list_entry_t * iter; … … 231 231 thread_destroy( thread ); 232 232 233 sched_dmsg("\n[DBG] %s : thread %x deleted thread %x / cycle %d\n", 234 __FUNCTION__ , CURRENT_THREAD , thread , (uint32_t)hal_get_cycles() ); 235 233 #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS 234 uint32_t cycle = (uint32_t)hal_get_cycles(); 235 if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 236 printk("\n[DBG] %s : thread %x deleted thread %x / cycle %d\n", 237 __FUNCTION__ , CURRENT_THREAD , thread , cycle ); 238 #endif 236 239 // destroy process descriptor if no more threads 237 240 if( process->th_nr == 0 ) … … 240 243 process_destroy( process ); 241 244 242 sched_dmsg("\n[DBG] %s : thread %x deleted process %x / cycle %d\n", 243 __FUNCTION__ , CURRENT_THREAD , process , (uint32_t)hal_get_cycles() ); 245 #if CONFIG_DEBUG_SCHED_HANDLE_SIGNALS 246 cycle = (uint32_t)hal_get_cycles(); 247 if( CONFIG_DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 248 printk("\n[DBG] %s : thread %x deleted process %x / cycle %d\n", 249 __FUNCTION__ , CURRENT_THREAD , process , cycle ); 250 #endif 244 251 245 252 } … … 251 258 spinlock_unlock( &sched->lock ); 252 259 253 } // end sched_handle_ requests()260 } // end sched_handle_signals() 254 261 255 262 //////////////////////////////// … … 261 268 scheduler_t * sched = &core->scheduler; 262 269 263 #if( CONFIG_SCHED_DEBUG & 0x1 ) 264 if( hal_time_stamp() > CONFIG_SCHED_DEBUG ) sched_display( core->lid ); 270 #if (CONFIG_DEBUG_SCHED_YIELD & 0x1) 271 if( CONFIG_DEBUG_SCHED_YIELD < (uint32_t)hal_get_cycles() ) 272 sched_display( core->lid ); 265 273 #endif 266 274 … … 291 299 { 292 300 293 sched_dmsg("\n[DBG] %s : core[%x,%d] / cause = %s\n" 301 #if CONFIG_DEBUG_SCHED_YIELD 302 uint32_t cycle = (uint32_t)hal_get_cycles(); 303 if( CONFIG_DEBUG_SCHED_YIELD < cycle ) 304 printk("\n[DBG] %s : core[%x,%d] / cause = %s\n" 294 305 " thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n", 295 306 __FUNCTION__, local_cxy, core->lid, cause, 296 307 current, thread_type_str(current->type), current->process->pid, current->trdid, 297 next , thread_type_str(next->type) , next->process->pid , next->trdid,298 (uint32_t)hal_get_cycles() ); 308 next , thread_type_str(next->type) , next->process->pid , next->trdid , cycle ); 309 #endif 299 310 300 311 // update scheduler … … 316 327 { 317 328 318 #if( CONFIG_ SCHED_DEBUG& 0x1 )319 if( hal_time_stamp() > CONFIG_SCHED_DEBUG ) 320 printk("\n[DBG] %s : core[%x,%d] / cause = %s\n" 321 "thread %x (%s) (%x,%x) continue / cycle %d\n",329 #if( CONFIG_DEBUG_SCHED_YIELD & 0x1 ) 330 uint32_t cycle = (uint32_t)hal_get_cycles(); 331 if( CONFIG_DEBUG_SCHED_YIELD < cycle ) 332 printk("\n[DBG] %s : core[%x,%d] / cause = %s / thread %x (%s) (%x,%x) continue / cycle %d\n", 322 333 __FUNCTION__, local_cxy, core->lid, cause, 323 current, thread_type_str(current->type), current->process->pid, current->trdid, 324 (uint32_t)hal_get_cycles() ); 334 current, thread_type_str(current->type), current->process->pid, current->trdid, cycle ); 325 335 #endif 326 336 … … 328 338 329 339 // handle pending requests for all threads executing on this core. 330 sched_handle_ requests( core );340 sched_handle_signals( core ); 331 341 332 342 // exit critical section / restore SR from next thread context -
trunk/kernel/kern/scheduler.h
r428 r433 91 91 * @ core : local pointer on the core descriptor. 92 92 ********************************************************************************************/ 93 void sched_handle_ requests( struct core_s * core );93 void sched_handle_signals( struct core_s * core ); 94 94 95 95 /********************************************************************************************* -
trunk/kernel/kern/thread.c
r428 r433 227 227 assert( (attr != NULL) , __FUNCTION__, "pthread attributes must be defined" ); 228 228 229 thread_dmsg("\n[DBG] %s : core[%x,%d] enter for process %x\n", 230 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid ); 229 #if CONFIG_DEBUG_THREAD_USER_CREATE 230 uint32_t cycle = (uint32_t)hal_get_cycles(); 231 if( CONFIG_DEBUG_THREAD_USER_CREATE < cycle ) 232 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n", 233 __FUNCTION__, CURRENT_THREAD, pid , cycle ); 234 #endif 231 235 232 236 // get process descriptor local copy 233 237 process = process_get_local_copy( pid ); 234 235 238 if( process == NULL ) 236 239 { … … 326 329 dqdt_local_update_threads( 1 ); 327 330 328 thread_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x / trdid = %x / core = %d\n", 329 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid, thread->trdid, core_lid ); 331 #if CONFIG_DEBUG_THREAD_USER_CREATE 332 cycle = (uint32_t)hal_get_cycles(); 333 if( CONFIG_DEBUG_THREAD_USER_CREATE < cycle ) 334 printk("\n[DBG] %s : thread %x exit / process %x / new_thread %x / core %d / cycle %d\n", 335 __FUNCTION__, CURRENT_THREAD, pid, thread, core_lid, cycle ); 336 #endif 330 337 331 338 *new_thread = thread; … … 359 366 vseg_t * vseg; // child thread STACK vseg 360 367 361 thread_dmsg("\n[DBG] %s : core[%x,%d] enters at cycle %d\n", 362 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , hal_get_cycles() ); 368 #if CONFIG_DEBUG_THREAD_USER_FORK 369 uint32_t cycle = (uint32_t)hal_get_cycles(); 370 if( CONFIG_DEBUG_THREAD_USER_FORK < cycle ) 371 printk("\n[DBG] %s : thread %x enter / child_process %x / cycle %d\n", 372 __FUNCTION__, CURRENT_THREAD, child_process->pid, cycle ); 373 #endif 363 374 364 375 // select a target core in local cluster … … 474 485 } 475 486 476 // increment p age descriptor fork_nr for the referencedpage if mapped487 // increment pending forks counter for the page if mapped 477 488 if( mapped ) 478 489 { … … 480 491 cxy_t page_cxy = GET_CXY( page_xp ); 481 492 page_t * page_ptr = (page_t *)GET_PTR( page_xp ); 482 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->fork_nr ) , 1 ); 483 484 thread_dmsg("\n[DBG] %s : core[%x,%d] copied PTE to child GPT : vpn %x\n", 485 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 493 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 ); 494 495 #if (CONFIG_DEBUG_THREAD_USER_FORK & 1) 496 cycle = (uint32_t)hal_get_cycles(); 497 if( CONFIG_DEBUG_THREAD_USER_FORK < cycle ) 498 printk("\n[DBG] %s : thread %x copied stack PTE to child GPT : vpn %x\n", 499 __FUNCTION__, CURRENT_THREAD, vpn ); 500 #endif 486 501 487 502 } 488 503 } 489 504 490 // set COW flag for STAK vseg in parent thread GPT 491 hal_gpt_flip_cow( true, // set cow 492 parent_gpt_xp, 493 vpn_base, 494 vpn_size ); 505 // set COW flag for all mapped entries of STAK vseg in parent thread GPT 506 hal_gpt_set_cow( parent_gpt_xp, 507 vpn_base, 508 vpn_size ); 495 509 496 510 // update DQDT for child thread 497 511 dqdt_local_update_threads( 1 ); 498 512 499 thread_dmsg("\n[DBG] %s : core[%x,%d] exit / created main thread %x for process %x\n", 500 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, child_ptr->trdid, child_process->pid ); 513 #if CONFIG_DEBUG_THREAD_USER_FORK 514 cycle = (uint32_t)hal_get_cycles(); 515 if( CONFIG_DEBUG_THREAD_USER_FORK < cycle ) 516 printk("\n[DBG] %s : thread %x exit / child_process %x / child_thread %x / cycle %d\n", 517 __FUNCTION__, CURRENT_THREAD, child_process->pid, child_ptr, cycle ); 518 #endif 501 519 502 520 return 0; … … 514 532 thread_t * thread; // pointer on new thread descriptor 515 533 516 thread_dmsg("\n[DBG] %s : core[%x,%d] enters / type %s / cycle %d\n",517 __FUNCTION__ , local_cxy , core_lid , thread_type_str( type ) , hal_time_stamp() );518 519 534 assert( ( (type == THREAD_IDLE) || (type == THREAD_RPC) || (type == THREAD_DEV) ) , 520 535 __FUNCTION__ , "illegal thread type" ); … … 522 537 assert( (core_lid < LOCAL_CLUSTER->cores_nr) , 523 538 __FUNCTION__ , "illegal core_lid" ); 539 540 #if CONFIG_DEBUG_THREAD_KERNEL_CREATE 541 uint32_t cycle = (uint32_t)hal_get_cycles(); 542 if( CONFIG_DEBUG_THREAD_KERNEL_CREATE < cycle ) 543 printk("\n[DBG] %s : thread %x enter / requested_type %s / cycle %d\n", 544 __FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle ); 545 #endif 524 546 525 547 // allocate memory for new thread descriptor … … 549 571 dqdt_local_update_threads( 1 ); 550 572 551 thread_dmsg("\n[DBG] %s : core = [%x,%d] exit / trdid = %x / type %s / cycle %d\n", 552 __FUNCTION__, local_cxy, core_lid, thread->trdid, thread_type_str(type), hal_time_stamp() ); 573 #if CONFIG_DEBUG_THREAD_KERNEL_CREATE 574 cycle = (uint32_t)hal_get_cycles(); 575 if( CONFIG_DEBUG_THREAD_KERNEL_CREATE < cycle ) 576 printk("\n[DBG] %s : thread %x exit / new_thread %x / type %s / cycle %d\n", 577 __FUNCTION__, CURRENT_THREAD, thread, thread_type_str(type), cycle ); 578 #endif 553 579 554 580 *new_thread = thread; … … 589 615 void thread_destroy( thread_t * thread ) 590 616 { 591 uint32_t tm_start;592 uint32_t tm_end;593 617 reg_t save_sr; 594 618 … … 596 620 core_t * core = thread->core; 597 621 598 thread_dmsg("\n[DBG] %s : enters for thread %x in process %x / type = %s\n", 599 __FUNCTION__ , thread->trdid , process->pid , thread_type_str( thread->type ) ); 622 #if CONFIG_DEBUG_THREAD_DESTROY 623 uint32_t cycle = (uint32_t)hal_get_cycles(); 624 if( CONFIG_DEBUG_THREAD_DESTROY < cycle ) 625 printk("\n[DBG] %s : thread %x enter to destroy thread %x in process %x / cycle %d\n", 626 __FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle ); 627 #endif 600 628 601 629 assert( (thread->children_nr == 0) , __FUNCTION__ , "still attached children" ); … … 604 632 605 633 assert( (thread->remote_locks == 0) , __FUNCTION__ , "all remote locks not released" ); 606 607 tm_start = hal_get_cycles();608 634 609 635 // update intrumentation values … … 635 661 thread_release( thread ); 636 662 637 tm_end = hal_get_cycles(); 638 639 thread_dmsg("\n[DBG] %s : exit for thread %x in process %x / duration = %d\n", 640 __FUNCTION__, thread->trdid , process->pid , tm_end - tm_start ); 663 #if CONFIG_DEBUG_THREAD_DESTROY 664 cycle = (uint32_t)hal_get_cycles(); 665 if( CONFIG_DEBUG_THREAD_DESTROY < cycle ) 666 printk("\n[DBG] %s : thread %x exit / destroyed thread %x in process %x / cycle %d\n", 667 __FUNCTION__, CURRENT_THREAD, thread, process->pid, cycle ); 668 #endif 641 669 642 670 } // end thread_destroy() … … 779 807 hal_fence(); 780 808 809 #if CONFIG_DEBUG_THREAD_BLOCK 810 uint32_t cycle = (uint32_t)hal_get_cycles(); 811 if( CONFIG_DEBUG_THREAD_BLOCK < cycle ) 812 printk("\n[DBG] %s : thread %x blocked thread %x / cause %x / state %x / cycle %d\n", 813 __FUNCTION__ , CURRENT_THREAD , thread , cause , thread->blocked , cycle ); 814 #endif 815 781 816 } // end thread_block() 782 817 783 ///////////////////////////////////////// 784 uint32_t thread_unblock( xptr_t thread ,818 //////////////////////////////////////////// 819 uint32_t thread_unblock( xptr_t thread_xp, 785 820 uint32_t cause ) 786 821 { 787 822 // get thread cluster and local pointer 788 cxy_t cxy = GET_CXY( thread );789 thread_t * ptr = (thread_t *)GET_PTR( thread);823 cxy_t cxy = GET_CXY( thread_xp ); 824 thread_t * ptr = GET_PTR( thread_xp ); 790 825 791 826 // reset blocking cause … … 793 828 hal_fence(); 794 829 830 #if CONFIG_DEBUG_THREAD_BLOCK 831 uint32_t cycle = (uint32_t)hal_get_cycles(); 832 if( CONFIG_DEBUG_THREAD_BLOCK < cycle ) 833 printk("\n[DBG] %s : thread %x unblocked thread %x / cause %x / state %x / cycle %d\n", 834 __FUNCTION__ , CURRENT_THREAD , ptr , cause , ptr->blocked , cycle ); 835 #endif 836 795 837 // return a non zero value if the cause bit is modified 796 838 return( previous & cause ); … … 805 847 thread_t * killer = CURRENT_THREAD; 806 848 807 thread_dmsg("\n[DBG] %s : killer thread %x enter for target thread %x\n", 808 __FUNCTION__, local_cxy, killer->trdid , target->trdid ); 849 #if CONFIG_DEBUG_THREAD_KILL 850 uint32_t cycle = (uint32_t)hal_get_cycles; 851 if( CONFIG_DEBUG_THREAD_KILL < cycle ) 852 printk("\n[DBG] %s : thread %x enter for target thread %x / cycle %d\n", 853 __FUNCTION__, killer, target, cycle ); 854 #endif 809 855 810 856 // set the global blocked bit in target thread descriptor. … … 835 881 hal_atomic_or( &target->flags , THREAD_FLAG_REQ_DELETE ); 836 882 837 thread_dmsg("\n[DBG] %s : killer thread %x exit for target thread %x\n", 838 __FUNCTION__, local_cxy, killer->trdid , target->trdid ); 883 #if CONFIG_DEBUG_THREAD_KILL 884 cycle = (uint32_t)hal_get_cycles; 885 if( CONFIG_DEBUG_THREAD_KILL < cycle ) 886 printk("\n[DBG] %s : thread %x exit for target thread %x / cycle %d\n", 887 __FUNCTION__, killer, target, cycle ); 888 #endif 839 889 840 890 } // end thread_kill() … … 851 901 { 852 902 853 idle_dmsg("\n[DBG] %s : core[%x][%d] goes to sleep at cycle %d\n", 854 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , hal_get_cycles() ); 903 #if CONFIG_DEBUG_THREAD_IDLE 904 uint32_t cycle = (uint32_t)hal_get_cycles; 905 thread_t * this = CURRENT_THREAD; 906 if( CONFIG_DEBUG_THREAD_IDLE < cycle ) 907 printk("\n[DBG] %s : idle thread %x on core[%x,%d] goes to sleep / cycle %d\n", 908 __FUNCTION__, this, local_cxy, this->core->lid, cycle ); 909 #endif 855 910 856 911 hal_core_sleep(); 857 912 858 idle_dmsg("\n[DBG] %s : core[%x][%d] wake up at cycle %d\n", 859 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , hal_get_cycles() ); 913 #if CONFIG_DEBUG_THREAD_IDLE 914 cycle = (uint32_t)hal_get_cycles; 915 if( CONFIG_DEBUG_THREAD_IDLE < cycle ) 916 printk("\n[DBG] %s : idle thread %x on core[%x,%d] wake up / cycle %d\n", 917 __FUNCTION__, this, local_cxy, this->core->lid, cycle ); 918 #endif 860 919 861 920 }
Note: See TracChangeset
for help on using the changeset viewer.