Changeset 409 for trunk/kernel/kern
- Timestamp:
- Dec 20, 2017, 4:51:09 PM (7 years ago)
- Location:
- trunk/kernel/kern
- Files:
-
- 17 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/cluster.c
r408 r409 281 281 lpid_t lpid = LPID_FROM_PID( pid ); 282 282 283 pmgr_t * pm = &LOCAL_CLUSTER->pmgr; 284 283 285 // check pid argument 284 if( (lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER) || (owner_cxy != local_cxy) )285 {286 panic("illegal PID"); 287 }288 289 pmgr_t * pm = &LOCAL_CLUSTER->pmgr;286 assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER) && (owner_cxy == local_cxy) , 287 __FUNCTION__ , "illegal PID" ); 288 289 // check number of copies 290 assert( (pm->copies_nr[lpid] == 0) , 291 __FUNCTION__ , "number of copies must be 0" ); 290 292 291 293 // get the process manager lock -
trunk/kernel/kern/cluster.h
r408 r409 130 130 int32_t threads_var; /*! threads number increment from last DQDT update */ 131 131 132 dqdt_node_t dqdt_tbl[CONFIG_ MAX_DQDT_DEPTH]; /*! embedded DQDT nodes*/132 dqdt_node_t dqdt_tbl[CONFIG_DQDT_LEVELS_NR]; /*! embedded DQDT nodes in cluster */ 133 133 134 134 // Local process manager -
trunk/kernel/kern/core.c
r408 r409 107 107 ticks = core->ticks_nr++; 108 108 109 // handle signals for all threads executing on this core 110 sched_handle_signals( core ); 111 112 // handle scheduler 109 // handle scheduler 113 110 if( (ticks % CONFIG_SCHED_TICKS_PER_QUANTUM) == 0 ) sched_yield( "TICK"); 114 111 -
trunk/kernel/kern/core.h
r367 r409 60 60 struct thread_s * fpu_owner; /*! pointer on current FPU owner thread */ 61 61 uint32_t rand_last; /*! last computed random value */ 62 62 63 scheduler_t scheduler; /*! embedded private scheduler */ 63 64 -
trunk/kernel/kern/do_syscall.c
r408 r409 54 54 sys_thread_join, // 3 55 55 sys_thread_detach, // 4 56 sys_ undefined,// 556 sys_thread_cancel, // 5 57 57 sys_sem, // 6 58 58 sys_condvar, // 7 … … 110 110 else if( index == SYS_THREAD_JOIN ) return "THREAD_JOIN"; // 3 111 111 else if( index == SYS_THREAD_DETACH ) return "THREAD_DETACH"; // 4 112 else if( index == SYS_THREAD_CANCEL ) return "THREAD_CANCEL"; // 5 112 113 else if( index == SYS_SEM ) return "SEM"; // 6 113 114 else if( index == SYS_CONDVAR ) return "CONDVAR"; // 7 … … 189 190 error = syscall_tbl[service_num] ( arg0 , arg1 , arg2 , arg3 ); 190 191 192 // check kernel stack overflow 193 assert( (this->signature == THREAD_SIGNATURE), __FUNCTION__, "kernel stack overflow\n" ); 194 191 195 // update kernel time 192 196 thread_kernel_time_update( this ); -
trunk/kernel/kern/kernel_init.c
r408 r409 770 770 thread->core = &LOCAL_CLUSTER->core_tbl[core_lid]; 771 771 772 // each core initializes locks_root" and "xlocks_root" in idle thread descriptor 772 #if CONFIG_LOCKS_DEBUG 773 773 list_root_init( &thread->locks_root ); 774 774 xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) ); 775 #endif 775 776 776 777 // CP0 in I/O cluster initialises TXT0 chdev descriptor -
trunk/kernel/kern/printk.h
r408 r409 253 253 #endif 254 254 255 #if CONFIG_KILL_DEBUG 256 #define kill_dmsg(...) if(hal_time_stamp() > CONFIG_KILL_DEBUG) printk(__VA_ARGS__) 257 #else 258 #define kill_dmsg(...) 259 #endif 260 255 261 #if CONFIG_KINIT_DEBUG 256 262 #define kinit_dmsg(...) if(hal_time_stamp() > CONFIG_KINIT_DEBUG) printk(__VA_ARGS__) -
trunk/kernel/kern/process.c
r408 r409 8 8 * Copyright (c) UPMC Sorbonne Universites 9 9 * 10 * This file is part of ALMOS-MKH. .10 * This file is part of ALMOS-MKH. 11 11 * 12 12 * ALMOS-MKH is free software; you can redistribute it and/or modify it … … 28 28 #include <hal_remote.h> 29 29 #include <hal_uspace.h> 30 #include <hal_irqmask.h> 30 31 #include <errno.h> 31 32 #include <printk.h> … … 49 50 #include <elf.h> 50 51 #include <syscalls.h> 52 #include <signal.h> 51 53 52 54 ////////////////////////////////////////////////////////////////////////////////////////// … … 124 126 uint32_t stderr_id; 125 127 126 process_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x \n",127 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid );128 process_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x / ppid = %x\n", 129 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid , ppid ); 128 130 129 131 // get model process cluster and local pointer … … 136 138 process->ref_xp = XPTR( local_cxy , process ); 137 139 138 // initialize vmm 140 // initialize vmm as empty 139 141 vmm_init( process ); 140 142 … … 142 144 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid ); 143 145 144 // initialize fd_array (not for kernel)146 // initialize fd_array as empty 145 147 process_fd_init( process ); 146 148 147 149 // create stdin / stdout / stderr pseudo-files 148 if( ppid == 0 ) 150 if( ppid == 0 ) // process_init 149 151 { 150 152 error1 = vfs_open( process, … … 169 171 &stderr_id ); 170 172 } 171 else // other user process173 else // any other process 172 174 { 173 175 error1 = vfs_open( process, … … 199 201 "bad indexes : stdin %d / stdout %d / stderr %d \n", stdin_id , stdout_id , stderr_id ); 200 202 201 // initialize specific files, cwd_lock, and fd_array203 // initialize specific inodes root and cwd 202 204 process->vfs_root_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy, 203 205 &model_ptr->vfs_root_xp ) ); 204 206 process->vfs_cwd_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy, 205 207 &model_ptr->vfs_cwd_xp ) ); 206 process->vfs_bin_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy,207 &model_ptr->vfs_bin_xp ));208 vfs_file_count_up( process->vfs_root_xp ); 209 vfs_file_count_up( process->vfs_cwd_xp);210 vfs_file_count_up( process->vfs_bin_xp ); 211 208 vfs_inode_remote_up( process->vfs_root_xp ); 209 vfs_inode_remote_up( process->vfs_cwd_xp ); 210 211 remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) ); 212 213 // copy all open file descriptors (other than stdin / stdout / stderr) 212 214 process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ), 213 215 XPTR( model_cxy , &model_ptr->fd_array ) ); 214 216 215 remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) ); 216 217 process_dmsg("\n[DBG] %s : core[%x,%d] / fd array initialised for process %x\n", 217 process_dmsg("\n[DBG] %s : core[%x,%d] / fd array for process %x\n", 218 218 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid ); 219 219 … … 352 352 remote_spinlock_unlock( copies_lock ); 353 353 354 // synchronize memory355 354 hal_fence(); 356 355 357 356 // From this point, the process descriptor is unreachable 358 357 359 // close all open files and update dirty TODO [AG] 360 361 // release signal manager TODO [AG] 358 // FIXME close all open files and update dirty [AG] 362 359 363 360 // Decrease refcount for bin file, root file and cwd file … … 374 371 } // end process_destroy() 375 372 373 ///////////////////////////////////////////////// 374 char * process_action_str( uint32_t action_type ) 375 { 376 if ( action_type == BLOCK_ALL_THREADS ) return "BLOCK"; 377 else if( action_type == UNBLOCK_ALL_THREADS ) return "UNBLOCK"; 378 else if( action_type == DELETE_ALL_THREADS ) return "DELETE"; 379 else return "undefined"; 380 } 381 382 //////////////////////////////////////////// 383 void process_sigaction( process_t * process, 384 uint32_t action_type ) 385 { 386 cxy_t owner_cxy; // owner cluster identifier 387 lpid_t lpid; // process index in owner cluster 388 cluster_t * cluster; // pointer on cluster manager 389 xptr_t root_xp; // extended pointer on root of copies 390 xptr_t lock_xp; // extended pointer on lock protecting copies 391 xptr_t client_xp; // extended pointer on client thread 392 uint32_t rsp_count; // number of expected responses 393 xptr_t rsp_xp; // extended pointer on responses counter 394 xptr_t iter_xp; // iterator on copies list 395 xptr_t process_xp; // extended pointer on process copy 396 cxy_t process_cxy; // process copy cluster identifier 397 process_t * process_ptr; // local pointer on process copy 398 399 signal_dmsg("\n[DBG] %s : enter for signal %s to process %x in cluster %x\n", 400 __FUNCTION__ , process_action_str( action_type ) , process , local_cxy ); 401 402 thread_t * this = CURRENT_THREAD; 403 404 // get extended pointer on client thread and response counter 405 client_xp = XPTR( local_cxy , this ); 406 rsp_xp = XPTR( local_cxy , &rsp_count ); 407 408 // get owner cluster identifier and process lpid 409 owner_cxy = CXY_FROM_PID( process->pid ); 410 lpid = LPID_FROM_PID( process->pid ); 411 412 assert( (owner_cxy == local_cxy) , __FUNCTION__ , "illegal cluster\n" ); 413 414 // get local pointer on local cluster manager 415 cluster = LOCAL_CLUSTER; 416 417 // get extended pointers on copies root, copies lock, and number of copies 418 root_xp = XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] ); 419 lock_xp = XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] ); 420 421 // initialize responses number 422 rsp_count = cluster->pmgr.copies_nr[lpid]; 423 424 // take the lock protecting the copies 425 remote_spinlock_lock( lock_xp ); 426 427 // send RPCs to all process copies 428 XLIST_FOREACH( root_xp , iter_xp ) 429 { 430 process_xp = XLIST_ELEMENT( iter_xp , process_t , copies_list ); 431 process_cxy = GET_CXY( process_xp ); 432 process_ptr = (process_t *)GET_PTR( process_xp ); 433 434 printk("\n @@@ %s : process = %x / pid = %x / ppid = %x\n", 435 __FUNCTION__ , process_ptr , process_ptr->pid , process_ptr->ppid ); 436 437 rpc_process_sigaction_client( process_cxy, 438 process_ptr, 439 action_type, 440 rsp_xp, 441 client_xp ); 442 } 443 444 // release the lock protecting process copies 445 remote_spinlock_unlock( lock_xp ); 446 447 // block and deschedule to wait response 448 thread_block( CURRENT_THREAD , THREAD_BLOCKED_RPC ); 449 sched_yield("BLOCKED on RPC"); 450 451 signal_dmsg("\n[DBG] %s : exit for signal %s to process %x in cluster %x\n", 452 __FUNCTION__ , process_action_str( action_type ) , process , local_cxy ); 453 454 } // end process_sigaction() 455 376 456 //////////////////////////////////////// 377 void process_kill( process_t * process ) 378 { 379 thread_t * thread; // pointer on current thead descriptor 380 uint32_t ltid; // index in process th_tbl 381 uint32_t count; // thread counter 382 383 printk("\n[@@@] %s enter\n", __FUNCTION__ ); 384 385 // get lock protecting th_tbl[] 457 void process_block( process_t * process, 458 xptr_t rsp_xp, 459 xptr_t client_xp ) 460 { 461 thread_t * target; // pointer on target thread 462 uint32_t ltid; // index in process th_tbl 463 thread_t * killer; // killer thread pointer 464 uint32_t count; // requests counter 465 volatile uint32_t sig_rsp_count; // responses counter 466 cxy_t client_cxy; // client thread cluster identifier 467 thread_t * client_ptr; // client thread pointer 468 core_t * client_core; // client thread core pointer 469 470 // get local killer thread pointer 471 killer = CURRENT_THREAD; 472 473 signal_dmsg("\n[DBG] %s : enter for process %x in cluster %x\n", 474 __FUNCTION__ , process->pid , local_cxy ); 475 476 // get lock protecting process th_tbl[] 386 477 spinlock_lock( &process->th_lock ); 387 478 388 // first loop on threads to send the THREAD_SIG_KILL signal to all process threads 389 // we use both "ltid" and "count" indexes, because it can exist "holes" in th_tbl 390 for( ltid = 0 , count = 0 ; 391 (ltid < CONFIG_THREAD_MAX_PER_CLUSTER) && (count < process->th_nr) ; 479 // initialize local responses counter 480 sig_rsp_count = process->th_nr; 481 482 // loop on process threads to block and deschedule all threads in cluster 483 // we use both "ltid" and "count" because it can exist "holes" in th_tbl 484 for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ ) 485 { 486 target = process->th_tbl[ltid]; 487 488 if( target != NULL ) // thread found 489 { 490 count++; 491 492 // set signal in target thread descriptor 493 thread_set_signal( target , (uint32_t *)sig_rsp_count ); 494 495 // set the global blocked bit in target thread descriptor. 496 thread_block( target , THREAD_BLOCKED_GLOBAL ); 497 498 // - if the killer thread and the target thread are not on the same core 499 // we want the scheduler of target thread to acknowlege the signal 500 // to be sure that the target thread is descheduled 501 // - if the killer thread and the target thread are on the same core 502 // we simply decrement the response counter. 503 if( killer->core->lid != target->core->lid ) 504 { 505 dev_pic_send_ipi( local_cxy , target->core->lid ); 506 } 507 else 508 { 509 hal_atomic_add( (void *)&sig_rsp_count , -1 ); 510 } 511 } 512 } 513 514 // poll the reponses counter 515 while( 1 ) 516 { 517 // exit loop when all responses received 518 if ( sig_rsp_count == 0 ) break; 519 520 // wait 1000 cycles before retry 521 hal_fixed_delay( 1000 ); 522 } 523 524 // acknowledge client thread & unblock client thread if last response 525 client_cxy = GET_CXY( client_xp ); 526 client_ptr = (thread_t *)GET_PTR( client_xp ); 527 client_core = (core_t *)hal_remote_lpt( XPTR( client_cxy , &client_ptr->core ) ); 528 if( hal_remote_atomic_add( rsp_xp , -1 ) == 1 ) 529 { 530 thread_unblock( client_xp , THREAD_BLOCKED_RPC); 531 dev_pic_send_ipi( client_cxy , client_core->lid ); 532 } 533 534 signal_dmsg("\n[DBG] %s : exit for process %x in cluster %x / %d threads blocked\n", 535 __FUNCTION__ , process->pid , local_cxy , count ); 536 537 } // end process_block() 538 539 ////////////////////////////////////////// 540 void process_unblock( process_t * process, 541 xptr_t rsp_xp, 542 xptr_t client_xp ) 543 { 544 thread_t * target; // pointer on target thead 545 uint32_t ltid; // index in process th_tbl 546 thread_t * killer; // killer thread pointer 547 uint32_t req_count; // requests counter 548 cxy_t client_cxy; // client thread cluster identifier 549 thread_t * client_ptr; // client thread pointer 550 core_t * client_core; // client thread core pointer 551 552 // get local killer thread pointer 553 killer = CURRENT_THREAD; 554 555 signal_dmsg("\n[DBG] %s : enter for process %x in cluster %x\n", 556 __FUNCTION__ , process->pid , local_cxy ); 557 558 // get lock protecting process th_tbl[] 559 spinlock_lock( &process->th_lock ); 560 561 // loop on process threads to unblock all threads in cluster 562 // we use both "ltid" and "req_count" because it can exist "holes" in th_tbl 563 for( ltid = 0 , req_count = 0 ; 564 req_count < process->th_nr ; 392 565 ltid++ ) 393 566 { 567 target = process->th_tbl[ltid]; 568 569 if( target != NULL ) // thread found 570 { 571 req_count++; 572 573 // reset the global blocked bit in target thread descriptor. 574 thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL ); 575 } 576 } 577 578 // acknowledge client thread & unblock client thread if last response 579 client_cxy = GET_CXY( client_xp ); 580 client_ptr = (thread_t *)GET_PTR( client_xp ); 581 client_core = (core_t *)hal_remote_lpt( XPTR( client_cxy , &client_ptr->core ) ); 582 if( hal_remote_atomic_add( rsp_xp , -1 ) == 1 ) 583 { 584 thread_unblock( client_xp , THREAD_BLOCKED_RPC); 585 dev_pic_send_ipi( client_cxy , client_core->lid ); 586 } 587 588 signal_dmsg("\n[DBG] %s : exit for process %x in cluster %x / %d threads blocked\n", 589 __FUNCTION__ , process->pid , local_cxy , req_count ); 590 591 } // end process_unblock() 592 593 ///////////////////////////////////////// 594 void process_delete( process_t * process, 595 xptr_t rsp_xp, 596 xptr_t client_xp ) 597 { 598 thread_t * thread; // pointer on target thread 599 uint32_t ltid; // index in process th_tbl 600 uint32_t count; // request counter 601 pid_t pid; // process PID 602 cxy_t client_cxy; // client thread cluster identifier 603 thread_t * client_ptr; // client thread pointer 604 core_t * client_core; // client thread core pointer 605 606 // get process PID 607 pid = process->pid; 608 609 signal_dmsg("\n[DBG] %s : enter for process %x in cluster %x at cycle %d\n", 610 __FUNCTION__ , pid , local_cxy , (uint32_t)hal_get_cycles() ); 611 612 // loop on threads to release memory allocated to threads 613 for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ ) 614 { 394 615 thread = process->th_tbl[ltid]; 395 616 396 if( thread != NULL ) 617 if( thread != NULL ) // thread found 397 618 { 398 thread_kill( thread );399 619 count++; 620 621 // detach thread from parent if attached 622 if( (thread->flags & THREAD_FLAG_DETACHED) == 0 ) 623 thread_child_parent_unlink( thread->parent , XPTR( local_cxy , thread ) ); 624 625 // detach thread from process 626 process_remove_thread( thread ); 627 628 // remove thread from scheduler 629 sched_remove_thread( thread ); 630 631 // release memory allocated to thread 632 thread_destroy( thread ); 400 633 } 401 634 } 402 635 403 printk("\n[@@@] %s : %d signal(s) sent\n", __FUNCTION__, count ); 404 405 // second loop on threads to wait acknowledge from scheduler, 406 // unlink thread from process and parent thread, and release thread descriptor 407 for( ltid = 0 , count = 0 ; 408 (ltid < CONFIG_THREAD_MAX_PER_CLUSTER) && (count < process->th_nr) ; 409 ltid++ ) 410 { 411 thread = process->th_tbl[ltid]; 412 413 if( thread != NULL ) 414 { 415 416 printk("\n[@@@] %s start polling at cycle %d\n", __FUNCTION__ , hal_time_stamp() ); 417 418 // poll the THREAD_SIG_KILL bit until reset 419 while( thread->signals & THREAD_SIG_KILL ) asm volatile( "nop" ); 420 421 printk("\n[@@@] %s exit polling\n", __FUNCTION__ ); 422 423 // detach target thread from parent if attached 424 if( (thread->flags & THREAD_FLAG_DETACHED) != 0 ) 425 thread_child_parent_unlink( thread->parent , XPTR( local_cxy , thread ) ); 426 427 // unlink thread from process 428 process_remove_thread( thread ); 429 430 // release memory for thread descriptor 431 thread_destroy( thread ); 432 433 count++; 434 } 435 } 436 437 printk("\n[@@@] %s : %d ack(s) received\n", __FUNCTION__, count ); 438 439 // release lock protecting th_tbl[] 440 spinlock_unlock( &process->th_lock ); 441 442 // release memory allocated for process descriptor 443 process_destroy( process ); 444 445 printk("\n[DBG] %s : core[%x,%d] exit\n", 446 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid ); 447 448 } // end process_kill() 636 // release memory allocated to process descriptors 637 // for all clusters other than the owner cluster 638 if( local_cxy != CXY_FROM_PID( process->pid ) ) process_destroy( process ); 639 640 // acknowledge client thread & unblock client thread if last response 641 client_cxy = GET_CXY( client_xp ); 642 client_ptr = (thread_t *)GET_PTR( client_xp ); 643 client_core = (core_t *)hal_remote_lpt( XPTR( client_cxy , &client_ptr->core ) ); 644 if( hal_remote_atomic_add( rsp_xp , -1 ) == 1 ) 645 { 646 thread_unblock( client_xp , THREAD_BLOCKED_RPC); 647 dev_pic_send_ipi( client_cxy , client_core->lid ); 648 } 649 650 signal_dmsg("\n[DBG] %s : exit for process %x in cluster %x at cycle %d\n", 651 __FUNCTION__ , pid , local_cxy , (uint32_t)hal_get_cycles() ); 652 653 } // end process_delete() 449 654 450 655 /////////////////////////////////////////////// … … 496 701 497 702 return process_ptr; 498 } 703 704 } // end process_get_local_copy() 499 705 500 706 ////////////////////////////////////////////////////////////////////////////////////////// … … 621 827 remote_spinlock_lock( XPTR( src_cxy , &src_ptr->lock ) ); 622 828 623 // loop on all entries in source process fd_array 624 for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ ) 829 // loop on all entries other than 830 // the three first entries: stdin/stdout/stderr 831 for( fd = 3 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ ) 625 832 { 626 833 entry = (xptr_t)hal_remote_lwd( XPTR( src_cxy , &src_ptr->array[fd] ) ); … … 724 931 "parent process must be the reference process\n" ); 725 932 726 process_dmsg("\n[DBG] %s : core[%x,%d] enter at cycle %d\n",727 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid , hal_get_cycles() );933 fork_dmsg("\n[DBG] %s : core[%x,%d] enter at cycle %d\n", 934 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid , (uint32_t)hal_get_cycles() ); 728 935 729 936 // allocate a process descriptor … … 736 943 } 737 944 738 process_dmsg("\n[DBG] %s : core[%x,%d] child process descriptor allocated at cycle %d\n",739 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() );945 fork_dmsg("\n[DBG] %s : core[%x,%d] child process descriptor allocated at cycle %d\n", 946 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() ); 740 947 741 948 // allocate a child PID from local cluster … … 749 956 } 750 957 751 process_dmsg("\n[DBG] %s : core[%x, %d] child process PID allocated = %x at cycle %d\n",752 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, new_pid , hal_get_cycles() );958 fork_dmsg("\n[DBG] %s : core[%x, %d] child process PID allocated = %x at cycle %d\n", 959 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, new_pid , (uint32_t)hal_get_cycles() ); 753 960 754 961 // initializes child process descriptor from parent process descriptor … … 758 965 parent_process_xp ); 759 966 760 process_dmsg("\n[DBG] %s : core[%x, %d] child process initialised at cycle %d\n",967 fork_dmsg("\n[DBG] %s : core[%x, %d] child process initialised at cycle %d\n", 761 968 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() ); 762 969 … … 773 980 } 774 981 775 process_dmsg("\n[DBG] %s : core[%x, %d] child process VMM copied at cycle %d\n",776 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() );982 fork_dmsg("\n[DBG] %s : core[%x, %d] child process VMM copied at cycle %d\n", 983 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() ); 777 984 778 985 // create child thread descriptor from parent thread descriptor … … 789 996 } 790 997 791 process_dmsg("\n[DBG] %s : core[%x,%d] child thread created at cycle %d\n",792 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() );998 fork_dmsg("\n[DBG] %s : core[%x,%d] child thread created at cycle %d\n", 999 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() ); 793 1000 794 1001 // update parent process GPT to set Copy_On_Write for shared data vsegs … … 804 1011 } 805 1012 806 process_dmsg("\n[DBG] %s : core[%x,%d] COW set in parent_process at cycle %d\n",807 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() );1013 fork_dmsg("\n[DBG] %s : core[%x,%d] COW set in parent_process at cycle %d\n", 1014 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() ); 808 1015 809 1016 // update children list in parent process … … 821 1028 *child_pid = new_pid; 822 1029 1030 fork_dmsg("\n[DBG] %s : core[%x,%d] exit at cycle %d\n", 1031 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() ); 1032 823 1033 return 0; 824 1034 825 1035 } // end process_make_fork() 1036 1037 /* deprecated because we don't wand to destroy the existing process descriptor 826 1038 827 1039 ///////////////////////////////////////////////////// … … 841 1053 pid = exec_info->pid; 842 1054 843 // check local cluster is oldprocess owner1055 // check local cluster is process owner 844 1056 assert( (CXY_FROM_PID( pid ) == local_cxy), __FUNCTION__, 845 1057 "local cluster %x is not owner for process %x\n", local_cxy, pid ); … … 876 1088 } 877 1089 878 exec_dmsg("\n[DBG] %s : core[%x,%d] registered code/data vsegs / process %x/ path = %s\n",879 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, p id, path );1090 exec_dmsg("\n[DBG] %s : core[%x,%d] vsegs registered / path = %s\n", 1091 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path ); 880 1092 881 1093 // select a core in local cluster to execute the main thread … … 908 1120 XPTR( local_cxy , &new->brothers_list ) ); 909 1121 910 // FIXME request destruction of old process copies and threads in all clusters 1122 // request destruction of old process copies and threads in all clusters 1123 process_sigaction( old , SIGKILL ); 911 1124 912 1125 // activate new thread … … 920 1133 } // end process_make_exec() 921 1134 1135 */ 1136 1137 ///////////////////////////////////////////////////// 1138 error_t process_make_exec( exec_info_t * exec_info ) 1139 { 1140 char * path; // pathname to .elf file 1141 process_t * process; // local pointer on old process 1142 pid_t pid; // old process identifier 1143 thread_t * thread; // pointer on new main thread 1144 pthread_attr_t attr; // main thread attributes 1145 lid_t lid; // selected core local index 1146 error_t error; 1147 1148 // get .elf pathname and PID from exec_info 1149 path = exec_info->path; 1150 pid = exec_info->pid; 1151 1152 // check local cluster is process owner 1153 assert( (CXY_FROM_PID( pid ) == local_cxy), __FUNCTION__, 1154 "local cluster %x is not owner for process %x\n", local_cxy, pid ); 1155 1156 exec_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x / path = %s\n", 1157 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid , path ); 1158 1159 // get process local pointer 1160 process = (process_t *)cluster_get_local_process_from_pid( pid ); 1161 1162 assert( (process != NULL ) , __FUNCTION__ , 1163 "process %x not found in cluster %x\n", pid , local_cxy ); 1164 1165 // reset the existing vmm 1166 vmm_destroy( process ); 1167 1168 exec_dmsg("\n[DBG] %s : core[%x,%d] VMM cleared\n", 1169 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid ); 1170 1171 // block all existing process threads 1172 process_sigaction( process , BLOCK_ALL_THREADS ); 1173 1174 // kill all existing threads and process descriptors (other than owner) 1175 process_sigaction( process , DELETE_ALL_THREADS ); 1176 1177 // check no threads 1178 assert( (process->th_nr == 0) , __FUNCTION__ , "no threads at this point" ); 1179 1180 exec_dmsg("\n[DBG] %s : core[%x,%d] all threads deleted\n", 1181 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid ); 1182 1183 // re-initialize VMM 1184 vmm_init( process ); 1185 1186 // register "code" and "data" vsegs as well as entry-point and vfs_bin_xp 1187 // in VMM, using information contained in the elf file. 1188 if( elf_load_process( path , process ) ) 1189 { 1190 printk("\n[ERROR] in %s : failed to access .elf file for process %x / path = %s\n", 1191 __FUNCTION__, pid , path ); 1192 process_destroy( process ); 1193 return -1; 1194 } 1195 1196 exec_dmsg("\n[DBG] %s : core[%x,%d] new vsegs registered / path = %s\n", 1197 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path ); 1198 1199 // @@@ 1200 vmm_display( process , true ); 1201 // @@@ 1202 1203 // select a core in local cluster to execute the new main thread 1204 lid = cluster_select_local_core(); 1205 1206 // initialize pthread attributes for new main thread 1207 attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED; 1208 attr.cxy = local_cxy; 1209 attr.lid = lid; 1210 1211 // create and initialize thread descriptor 1212 error = thread_user_create( pid, 1213 (void *)process->vmm.entry_point, 1214 exec_info->args_pointers, 1215 &attr, 1216 &thread ); 1217 if( error ) 1218 { 1219 printk("\n[ERROR] in %s : cannot create thread for process %x / path = %s\n", 1220 __FUNCTION__, pid , path ); 1221 process_destroy( process ); 1222 return -1; 1223 } 1224 1225 exec_dmsg("\n[DBG] %s : core[%x,%d] created main thread %x for new process %x\n", 1226 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, thread->trdid, pid ); 1227 1228 // activate new thread 1229 thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL ); 1230 1231 exec_dmsg("\n[DBG] %s : core[%x,%d] exit for path = %s\n", 1232 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path ); 1233 1234 return 0; 1235 1236 } // end process_make_exec() 1237 1238 //////////////////////////////////////////// 1239 void process_make_kill( process_t * process, 1240 uint32_t sig_id ) 1241 { 1242 // this function must be executed by a thread running in owner cluster 1243 assert( (CXY_FROM_PID( process->pid ) == local_cxy) , __FUNCTION__ , 1244 "must execute in owner cluster" ); 1245 1246 // analyse signal type 1247 switch( sig_id ) 1248 { 1249 case SIGSTOP: // block all threads 1250 { 1251 process_sigaction( process , BLOCK_ALL_THREADS ); 1252 } 1253 break; 1254 case SIGCONT: // unblock all threads 1255 { 1256 process_sigaction( process , UNBLOCK_ALL_THREADS ); 1257 } 1258 break; 1259 case SIGKILL: // block all threads, then delete all threads 1260 { 1261 process_sigaction( process , BLOCK_ALL_THREADS ); 1262 process_sigaction( process , DELETE_ALL_THREADS ); 1263 process_destroy( process ); 1264 } 1265 break; 1266 } 1267 } // end process_make_kill() 1268 1269 //////////////////////////////////////////// 1270 void process_make_exit( process_t * process, 1271 uint32_t status ) 1272 { 1273 // this function must be executed by a thread running in owner cluster 1274 assert( (CXY_FROM_PID( process->pid ) == local_cxy) , __FUNCTION__ , 1275 "must execute in owner cluster" ); 1276 1277 // block all threads in all clusters 1278 process_sigaction( process , BLOCK_ALL_THREADS ); 1279 1280 // delete all threads in all clusters 1281 process_sigaction( process , DELETE_ALL_THREADS ); 1282 1283 // delete local process descriptor 1284 process_destroy( process ); 1285 1286 } // end process_make_exit() 1287 922 1288 ////////////////////////// 923 1289 void process_init_create() 924 1290 { 925 exec_info_t exec_info; // structure to be passed to process_make_exec() 926 process_t * process; // local pointer on process_init descriptor 927 pid_t pid; // process_init identifier 928 error_t error; 929 930 process_dmsg("\n[DBG] %s : enters in cluster %x\n", 931 __FUNCTION__ , local_cxy ); 1291 process_t * process; // local pointer on process_init descriptor 1292 pid_t pid; // process_init identifier 1293 thread_t * thread; // local pointer on main thread 1294 pthread_attr_t attr; // main thread attributes 1295 lid_t lid; // selected core local index for main thread 1296 error_t error; 1297 1298 kinit_dmsg("\n[DBG] %s : core[%x,%d] enters\n", 1299 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid ); 932 1300 933 1301 // allocates memory for process descriptor from local cluster … … 936 1304 { 937 1305 printk("\n[PANIC] in %s : no memory for process descriptor in cluster %x\n", 938 __FUNCTION__, local_cxy );939 } 940 941 // get newPID from local cluster1306 __FUNCTION__, local_cxy ); 1307 } 1308 1309 // get PID from local cluster 942 1310 error = cluster_pid_alloc( XPTR( local_cxy , process ) , &pid ); 943 1311 if( error ) … … 945 1313 printk("\n[PANIC] in %s : cannot allocate PID in cluster %x\n", 946 1314 __FUNCTION__, local_cxy ); 947 } 948 949 // initialise the process desciptor (parent is local kernel process) 950 process_reference_init( process, 1315 process_destroy( process ); 1316 } 1317 1318 assert( (LPID_FROM_PID(pid) == 1) , __FUNCTION__ , "LPID must be 1 for process_init" ); 1319 1320 // initialize process descriptor / parent is local process_zero 1321 process_reference_init( process, 951 1322 pid, 952 process_zero.pid,1323 0, 953 1324 XPTR( local_cxy , &process_zero ) ); 954 1325 955 // initialize the exec_info structure 956 exec_info.pid = pid; 957 exec_info.args_nr = 0; 958 exec_info.envs_nr = 0; 959 strcpy( exec_info.path , CONFIG_PROCESS_INIT_PATH ); 960 961 // update process descriptor and create thread descriptor 962 error = process_make_exec( &exec_info ); 963 1326 kinit_dmsg("\n[DBG] %s : core[%x,%d] / process initialised\n", 1327 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid ); 1328 1329 // register "code" and "data" vsegs as well as entry-point 1330 // in process VMM, using information contained in the elf file. 1331 if( elf_load_process( CONFIG_PROCESS_INIT_PATH , process ) ) 1332 { 1333 printk("\n[PANIC] in %s : cannot access .elf file / path = %s\n", 1334 __FUNCTION__, CONFIG_PROCESS_INIT_PATH ); 1335 process_destroy( process ); 1336 } 1337 1338 kinit_dmsg("\n[DBG] %s : core[%x,%d] vsegs registered / path = %s\n", 1339 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, CONFIG_PROCESS_INIT_PATH ); 1340 1341 // select a core in local cluster to execute the main thread 1342 lid = cluster_select_local_core(); 1343 1344 // initialize pthread attributes for main thread 1345 attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED; 1346 attr.cxy = local_cxy; 1347 attr.lid = lid; 1348 1349 // create and initialize thread descriptor 1350 error = thread_user_create( pid, 1351 (void *)process->vmm.entry_point, 1352 NULL, 1353 &attr, 1354 &thread ); 964 1355 if( error ) 965 { 966 printk("\n[PANIC] in %s : cannot exec %s in cluster %x\n", 967 __FUNCTION__, CONFIG_PROCESS_INIT_PATH , local_cxy ); 968 } 969 970 process_dmsg("\n[DBG] %s : exit in cluster %x\n", 971 __FUNCTION__ , local_cxy ); 972 1356 { 1357 printk("\n[PANIC] in %s : cannot create main thread / path = %s\n", 1358 __FUNCTION__, CONFIG_PROCESS_INIT_PATH ); 1359 process_destroy( process ); 1360 } 1361 1362 // activate thread 1363 thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL ); 1364 973 1365 hal_fence(); 974 1366 1367 kinit_dmsg("\n[DBG] %s : core[%x,%d] exit / main thread = %x\n", 1368 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, thread ); 1369 975 1370 } // end process_init_create() 976 1371 -
trunk/kernel/kern/process.h
r408 r409 54 54 55 55 /********************************************************************************************* 56 * This enum defines the actions that can be executed by the process_signal() function. 57 ********************************************************************************************/ 58 59 enum process_sigactions 60 { 61 BLOCK_ALL_THREADS, 62 UNBLOCK_ALL_THREADS, 63 DELETE_ALL_THREADS, 64 }; 65 66 /********************************************************************************************* 56 67 * This structure defines an array of extended pointers on the open file descriptors 57 68 * for a given process. We use an extended pointer because the open file descriptor … … 76 87 * - The PID 16 LSB bits contain the LPID (Local Process Index) 77 88 * - The PID 16 MSB bits contain the owner cluster CXY. 78 * In each cluster, the process manager allocates LPID values for the process that are 79 * allocated to this cluster. 80 * The process descriptor for a PID process is replicated in all clusters containing 81 * at least one thread of the PID process, with the following rules : 82 * 89 * In each cluster, the process manager allocates the LPID values for the process that 90 * are owned by this cluster. 91 * The process descriptor is replicated in all clusters containing at least one thread 92 * of the PID process, with the following rules : 83 93 * 1) The <pid>, <ppid>, <ref_xp>, <vfs_root_xp>, <vfs_bin_xp> fields are defined 84 94 * in all process descriptor copies. 85 95 * 2) The <vfs_cwd_xp> and associated <cwd_lock>, that can be dynamically modified, 86 96 * are only defined in the reference process descriptor. 87 * 2) The <vmm>, containing the list of registered vsegs, and the page table, are only 88 * complete in the reference process cluster, other copies are read-only caches. 97 * 2) The <vmm>, containing the VSL (list of registered vsegs), and the GPT (generic 98 * page table), are only complete in the reference process cluster, other copies 99 * are actually use as read-only caches. 89 100 * 3) the <fd_array>, containing extended pointers on the open file descriptors, is only 90 101 * complete in the reference process cluster, other copies are read-only caches. … … 95 106 * 6) The <brothers_list>, <local_list>, <copies_list>, <th_tbl>, <th_nr>, <th_lock> fields 96 107 * are defined in all process descriptors copies. 97 * 7) The <sig_mgr> field is only defined in the reference cluster. TODO98 108 ********************************************************************************************/ 99 109 … … 130 140 131 141 remote_spinlock_t sync_lock; /*! lock protecting sem,mutex,barrier,condvar lists */ 132 133 sig_mgr_t sig_mgr; /*! embedded signal manager TODO [AG] */134 142 } 135 143 process_t; … … 137 145 /********************************************************************************************* 138 146 * This structure defines the information required by the process_make_exec() function 139 * to create a new reference process descriptor, and the associated main thread, 140 * in the parent process owner cluster. 147 * to create a new reference process descriptor, and the associated main thread. 141 148 ********************************************************************************************/ 142 149 … … 176 183 /********************************************************************************************* 177 184 * This function allocates memory and initializes the "process_init" descriptor and the 178 * associated "thread_init" descriptor in the local cluster. It is called once at the end179 * of the kernel initialisation procedure, by the local kernel process.185 * associated "thread_init" descriptor. It is called once at the end of the kernel 186 * initialisation procedure, by the kernel process in cluster_IO. 180 187 * The "process_init" is the first user process, and all other user processes will be forked 181 188 * from this process. The code executed by "process_init" is stored in a .elf file, whose 182 * pathname is defined by the CONFIG_PROCESS_INIT_PATH argument.183 * Practically, it builds the exec_info structure, and calls the process_make_exec()184 * function, that make the real job.189 * pathname is defined by the CONFIG_PROCESS_INIT_PATH configuration variable. 190 * The process_init streams are defined by the CONFIG_INIT_[STDIN/STDOUT/STDERR] variables. 191 * Its local process identifier is 1, and parent process is the local kernel process_zero. 185 192 ********************************************************************************************/ 186 193 void process_init_create(); … … 200 207 * descriptor, defined by the <model_xp> argument. The <process> descriptor, the <pid>, and 201 208 * the <ppid> arguments must be previously defined by the caller. 202 * It can be called by t hreefunctions, depending on the process type:203 * 1) if "process" is the user"process_init", the parent is the kernel process. It is209 * It can be called by two functions, depending on the process type: 210 * 1) if "process" is the "process_init", the parent is the kernel process. It is 204 211 * called once, by the process_init_create() function in cluster[xmax-1][ymax-1]. 205 212 * 2) if the caller is the process_make_fork() function, the model is generally a remote 206 213 * process, that is also the parent process. 214 207 215 * 3) if the caller is the process_make_exec() function, the model is always a local process, 208 * but the parent is the parent of the model process.209 * 216 * and the parent is the parent of the model process. DEPRECATED [AG] 217 210 218 * The following fields are initialised (for all process but process_zero). 211 219 * - It set the pid / ppid / ref_xp fields. 212 * - It initializes an empty VMM (no vsegs registered in VSL and GPT).220 * - It initializes the VMM (register the kentry, args, envs vsegs in VSL) 213 221 * - It initializes the FDT, defining the three pseudo files STDIN / STDOUT / STDERR. 214 222 * - It set the root_xp, bin_xp, cwd_xp fields. … … 251 259 252 260 /********************************************************************************************* 253 * This function kills a user process in a given cluster. 254 * It can be directly called in the reference cluster, or it can be called through the 255 * PROCESS_KILL RPC. 256 * - In a first loop, it set the THREAD_SIG_KILL signal to all threads of process. 257 * - In a second loop, it wait, for each thread the reset of the THREAD_SIG_KILL signal 258 * by the scheduler, and completes the thread descriptor destruction. 261 * This function returns a printable string defining the action for process_signa(). 262 ********************************************************************************************* 263 * @ action_type : BLOCK_ALL_THREADS / UNBLOCK_ALL_THREADS / DELETE_ALL_THREADS 264 * @ return a string pointer. 265 ********************************************************************************************/ 266 char * process_action_str( uint32_t action_type ); 267 268 /********************************************************************************************* 269 * This function allows any thread running in any cluster to block, unblock or delete 270 * all threads of a given process identified by the <process> argument, dependig on the 271 * <acion_type> argument. 272 * It can be called by the sys_kill() or sys_exit() functions to handle the "kill" & "exit" 273 * system calls, or by the process_make_exec() function to handle the "exec" system call. 274 * It must be executed in the owner cluster for the target process (using the relevant RPC 275 * (RPC_PROCESS_SIGNAL or RPC_PROCESS_EXEC) if the client thread in not running in the 276 * owner cluster. 277 * It uses the multicast, non blocking, RPC_PROCESS_KILL to send the signal to all process 278 * copies in parallel, block & deschedule when all signals have been sent, and finally 279 * returns only when all responses have been received and the operation is completed. 259 280 ********************************************************************************************* 260 281 * @ process : pointer on the process descriptor. 261 ********************************************************************************************/ 262 void process_kill( process_t * process ); 282 * @ action_type : BLOCK_ALL_THREADS / UNBLOCK_ALL_THREADS / DELETE_ALL_THREADS 283 ********************************************************************************************/ 284 void process_sigaction( process_t * process, 285 uint32_t action_type ); 286 287 /********************************************************************************************* 288 * This function blocks all threads of a given user process in a given cluster. 289 * It is always called by a local RPC thread, through the multicast RPC_PROCESS_KILL. 290 * It loop on all local threads of the process, requesting the relevant schedulers to 291 * block and deschedule these threads, using IPI if required. The threads are not detached 292 * from the scheduler, and not detached from the local process. 293 * It acknowledges the client thread in the owner cluster only when all process threads 294 * are descheduled and blocked on the BLOCKED_GLOBAL condition, using the <rsp_xp> argument. 295 ********************************************************************************************* 296 * @ process : pointer on the target process descriptor. 297 * @ rsp_xp : extended pointer on the response counter. 298 * # client_xp : extended pointer on client thread descriptor. 299 ********************************************************************************************/ 300 void process_block( process_t * process, 301 xptr_t rsp_xp, 302 xptr_t client_xp ); 303 304 /********************************************************************************************* 305 * This function unblocks all threads of a given user process in a given cluster. 306 * It is always called by a local RPC thread, through the multicast RPC_PROCESS_KILL. 307 * It loops on local threads of the process, to reset the BLOCKED_GLOBAL bit in all threads. 308 * It acknowledges directly the client thread in the owner cluster when this is done, 309 * using the <rsp_xp> argument. 310 ********************************************************************************************* 311 * @ process : pointer on the process descriptor. 312 * @ rsp_xp : extended pointer on the response counter. 313 * # client_xp : extended pointer on client thread descriptor. 314 ********************************************************************************************/ 315 void process_unblock( process_t * process, 316 xptr_t rsp_xp, 317 xptr_t client_xp ); 318 319 /********************************************************************************************* 320 * This function delete all threads descriptors, of given user process in a given cluster. 321 * It is always called by a local RPC thread, through the multicast RPC_PROCESS_KILL. 322 * It detach all process threads from the scheduler, detach the threads from the local 323 * process, and release the local memory allocated to threads descriptors (including the 324 * associated structures such as CPU and FPU context). Finally, it release the memory 325 * allocated to the local process descriptor itself, but only when the local cluster 326 * is NOT the process owner, but only a copy. It acknowledges directly the client thread 327 * in the owner cluster, using ithe <rsp_xp> argument. 328 ********************************************************************************************* 329 * @ process : pointer on the process descriptor. 330 * @ rsp_xp : extended pointer on the response counter. 331 * # client_xp : extended pointer on client thread descriptor. 332 ********************************************************************************************/ 333 void process_delete( process_t * process, 334 xptr_t rsp_xp, 335 xptr_t client_xp ); 263 336 264 337 /********************************************************************************************* … … 274 347 275 348 /********************************************************************************************* 276 * This function implements the exec() system call, and is called by the sys_exec() function. 277 * It is also called by the process_init_create() function to build the "init" process. 349 * This function implements the "exec" system call, and is called by the sys_exec() function. 278 350 * The "new" process keep the "old" process PID and PPID, all open files, and env variables, 279 351 * the vfs_root and vfs_cwd, but build a brand new memory image (new VMM from the new .elf). 280 * It actually creates a "new" reference process descriptor, saves all relevant information281 * from the "old" referenceprocess descriptor to the "new" process descriptor.352 * It actually creates a "new" reference process descriptor, and copies all relevant 353 * information from the "old" process descriptor to the "new" process descriptor. 282 354 * It completes the "new" process descriptor, from information found in the <exec_info> 283 355 * structure (defined in the process.h file), that must be built by the caller. 284 356 * It creates and initializes the associated main thread. It finally destroys all copies 285 * of the "old" process in all clusters, and all theold associated threads.357 * of the "old" process in all clusters, and destroys all old associated threads. 286 358 * It is executed in the local cluster, that becomes both the "owner" and the "reference" 287 359 * cluster for the "new" process. … … 293 365 294 366 /********************************************************************************************* 295 * This function implement the fork()system call, and is called by the sys_fork() function.367 * This function implements the "fork" system call, and is called by the sys_fork() function. 296 368 * It allocates memory and initializes a new "child" process descriptor, and the 297 369 * associated "child" thread descriptor in the local cluster. This function can involve 298 370 * up to three different clusters : 299 371 * - the local (child) cluster can be any cluster defined by the sys_fork function. 300 * - the parent cluster must be the reference cluster for the parent process.301 * - the client cluster containing the thread requesting the fork can be any cluster.372 * - the parent cluster must be the reference cluster for the parent process. 373 * - the client cluster containing the thread requesting the fork can be any cluster. 302 374 * The new "child" process descriptor is initialised from informations found in the "parent" 303 375 * reference process descriptor, containing the complete process description. … … 315 387 pid_t * child_pid, 316 388 struct thread_s ** child_thread_ptr ); 389 390 /********************************************************************************************* 391 * This function implement the "exit" system call, and is called by the sys_exit() function. 392 * It must be executed by a thread running in the calling process owner cluster. 393 * It uses twice the multicast RPC_PROCESS_SIGNAL to first block all process threads 394 * in all clusters, and then delete all thread and process descriptors. 395 ********************************************************************************************* 396 * @ process : pointer on process descriptor in owner cluster. 397 * @ status : exit return value. 398 ********************************************************************************************/ 399 void process_make_exit( process_t * process, 400 uint32_t status ); 401 402 /********************************************************************************************* 403 * This function implement the "kill" system call, and is called by the sys_kill() function. 404 * It must be executed by a thread running in the target process owner cluster. 405 * Only the SIGKILL, SIGSTOP, and SIGCONT signals are supported. 406 * User defined handlers are not supported. 407 * It uses once or twice the multicast RPC_PROCESS_SIGNAL to block, unblock or delete 408 * all process threads in all clusters, and then delete process descriptors. 409 ********************************************************************************************* 410 * @ process : pointer on process descriptor in owner cluster. 411 * @ sig_id : signal type. 412 ********************************************************************************************/ 413 void process_make_kill( process_t * process, 414 uint32_t sig_id ); 415 317 416 318 417 /******************** File Management Operations ****************************************/ … … 376 475 377 476 /********************************************************************************************* 378 * This function copies all non-zero entries from a remote <src_xp> fd_array, 379 * embedded in a process descriptor, to another remote <dst_xp> fd_array, embedded 380 * in another process descriptor. The calling thread can be running in any cluster. 477 * This function copies all non-zero entries (other than the three first stdin/stdout/stderr) 478 * from a remote <src_xp> fd_array, embedded in a process descriptor, to another remote 479 * <dst_xp> fd_array, embedded in another process descriptor. 480 * The calling thread can be running in any cluster. 381 481 * It takes the remote lock protecting the <src_xp> fd_array during the copy. 382 482 * For each involved file descriptor, the refcount is incremented. -
trunk/kernel/kern/rpc.c
r408 r409 49 49 { 50 50 &rpc_pmem_get_pages_server, // 0 51 &rpc_p rocess_make_exec_server,// 152 &rpc_process_make_ fork_server, // 253 &rpc_process_ kill_server,// 354 &rpc_ thread_user_create_server,// 455 &rpc_ thread_kernel_create_server,// 556 &rpc_ signal_rise_server, // 657 &rpc_ undefined,// 758 &rpc_ undefined, // 859 &rpc_ undefined,// 951 &rpc_pmem_release_pages_server, // 1 52 &rpc_process_make_exec_server, // 2 53 &rpc_process_make_fork_server, // 3 54 &rpc_process_make_exit_server, // 4 55 &rpc_process_make_kill_server, // 5 56 &rpc_thread_user_create_server, // 6 57 &rpc_thread_kernel_create_server, // 7 58 &rpc_thread_kill_server, // 8 59 &rpc_process_sigaction_server, // 9 60 60 61 61 &rpc_vfs_inode_create_server, // 10 … … 88 88 } 89 89 90 ///////////////////////////////////////////////////////////////////////////////////////// 91 // [0] Marshaling functions attached to RPC_PMEM_GET_PAGES 90 /***************************************************************************************/ 91 /************ Generic functions supporting RPCs : client side **************************/ 92 /***************************************************************************************/ 93 94 /////////////////////////////////////// 95 void rpc_send( cxy_t server_cxy, 96 rpc_desc_t * rpc, 97 bool_t block ) 98 { 99 error_t error; 100 101 thread_t * this = CURRENT_THREAD; 102 core_t * core = this->core; 103 104 // register client thread pointer and core lid in RPC descriptor 105 rpc->thread = this; 106 rpc->lid = core->lid; 107 108 // build an extended pointer on the RPC descriptor 109 xptr_t desc_xp = XPTR( local_cxy , rpc ); 110 111 // get local pointer on rpc_fifo in remote cluster, with the 112 // assumption that local pointers are identical in all clusters 113 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo; 114 115 // try to post an item in remote fifo 116 // deschedule and retry if remote fifo full 117 do 118 { 119 error = remote_fifo_put_item( XPTR( server_cxy , rpc_fifo ), 120 (uint64_t )desc_xp ); 121 if ( error ) 122 { 123 printk("\n[WARNING] %s : cluster %x cannot post RPC to cluster %x\n", 124 __FUNCTION__ , local_cxy , server_cxy ); 125 126 if( thread_can_yield() ) sched_yield("RPC fifo full"); 127 } 128 } 129 while( error ); 130 131 hal_fence(); 132 133 // send IPI to the remote core corresponding to the client core 134 dev_pic_send_ipi( server_cxy , core->lid ); 135 136 // wait RPC completion if blocking 137 // - busy waiting policy during kernel_init, or if threads cannot yield 138 // - block and deschedule in all other cases 139 if ( block ) 140 { 141 if( (this->type == THREAD_IDLE) || (thread_can_yield() == false) ) // busy waiting 142 { 143 144 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s busy waiting after registering RPC\n" 145 " rpc = %d / server = %x / cycle %d\n", 146 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) , 147 rpc->index , server_cxy , hal_time_stamp() ); 148 149 while( rpc->response ) hal_fixed_delay( 100 ); 150 151 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s exit after RPC completion\n", 152 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) ); 153 154 } 155 else // block & deschedule 156 { 157 158 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s deschedule after registering RPC\n" 159 " rpc = %d / server = %x / cycle %d\n", 160 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) , 161 rpc->index , server_cxy , hal_time_stamp() ); 162 163 thread_block( this , THREAD_BLOCKED_RPC ); 164 sched_yield("BLOCKED on RPC"); 165 166 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s resumes after RPC completion\n", 167 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) ); 168 169 } 170 171 // check response available 172 assert( (rpc->response == 0) , __FUNCTION__, "illegal RPC response\n" ); 173 174 // acknowledge the IPI sent by the server 175 dev_pic_ack_ipi(); 176 } 177 178 } // end rpc_send() 179 180 181 /***************************************************************************************/ 182 /************ Generic functions supporting RPCs : server side **************************/ 183 /***************************************************************************************/ 184 185 //////////////// 186 void rpc_check() 187 { 188 error_t error; 189 thread_t * thread; 190 uint32_t sr_save; 191 192 bool_t found = false; 193 thread_t * this = CURRENT_THREAD; 194 core_t * core = this->core; 195 scheduler_t * sched = &core->scheduler; 196 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo; 197 198 grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s / cycle %d\n", 199 __FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() ); 200 201 // interrupted thread not preemptable during RPC chek 202 hal_disable_irq( &sr_save ); 203 204 // check RPC FIFO not empty and no RPC thread handling it 205 if( (rpc_fifo->owner == 0) && (local_fifo_is_empty(rpc_fifo) == false) ) 206 { 207 // search one non blocked RPC thread 208 list_entry_t * iter; 209 LIST_FOREACH( &sched->k_root , iter ) 210 { 211 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 212 if( (thread->type == THREAD_RPC) && (thread->blocked == 0 ) ) 213 { 214 found = true; 215 break; 216 } 217 } 218 219 // create new RPC thread if not found 220 if( found == false ) 221 { 222 error = thread_kernel_create( &thread, 223 THREAD_RPC, 224 &rpc_thread_func, 225 NULL, 226 this->core->lid ); 227 if( error ) 228 { 229 printk("\n[WARNING] in %s : no memory for new RPC thread in cluster %x\n", 230 __FUNCTION__ , local_cxy ); 231 } 232 else 233 { 234 // unblock created RPC thread 235 thread->blocked = 0; 236 237 // update core descriptor counter 238 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 ); 239 240 grpc_dmsg("\n[DBG] %s : core [%x,%d] creates a new RPC thread %x / cycle %d\n", 241 __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() ); 242 243 } 244 } 245 } 246 247 grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s deschedules / cycle %d\n", 248 __FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() ); 249 250 // interrupted thread deschedule always 251 sched_yield("IPI received"); 252 253 grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s resume / cycle %d\n", 254 __FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() ); 255 256 // interrupted thread restore IRQs after resume 257 hal_restore_irq( sr_save ); 258 259 } // end rpc_check() 260 261 262 ////////////////////// 263 void rpc_thread_func() 264 { 265 uint32_t count; // handled RPC requests counter 266 error_t empty; // local RPC fifo state 267 xptr_t desc_xp; // extended pointer on RPC request 268 cxy_t desc_cxy; // RPC request cluster (client) 269 rpc_desc_t * desc_ptr; // RPC request local pointer 270 uint32_t index; // RPC request index 271 uint32_t responses; // number of responses received by client 272 thread_t * thread_ptr; // local pointer on client thread 273 lid_t core_lid; // local index of client core 274 275 // makes RPC thread not preemptable 276 hal_disable_irq( NULL ); 277 278 thread_t * this = CURRENT_THREAD; 279 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo; 280 281 // two embedded loops: 282 // - external loop : "infinite" RPC thread 283 // - internal loop : handle up to CONFIG_RPC_PENDING_MAX RPC requests 284 285 while(1) // external loop 286 { 287 // try to take RPC_FIFO ownership 288 if( hal_atomic_test_set( &rpc_fifo->owner , this->trdid ) ) 289 { 290 // initializes RPC requests counter 291 count = 0; 292 293 // acknowledge local IPI 294 dev_pic_ack_ipi(); 295 296 // exit internal loop in three cases: 297 // - RPC fifo is empty 298 // - ownership has been lost (because descheduling) 299 // - max number of RPCs is reached 300 while( 1 ) // internal loop 301 { 302 empty = local_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp ); 303 304 if ( empty == 0 ) // one RPC request found 305 { 306 // get client cluster and pointer on RPC descriptor 307 desc_cxy = (cxy_t)GET_CXY( desc_xp ); 308 desc_ptr = (rpc_desc_t *)GET_PTR( desc_xp ); 309 310 // get rpc index from RPC descriptor 311 index = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->index ) ); 312 313 grpc_dmsg("\n[DBG] %s : core[%x,%d] / RPC thread %x / starts rpc %d / cycle %d\n", 314 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , index , hal_time_stamp() ); 315 316 // call the relevant server function 317 rpc_server[index]( desc_xp ); 318 319 grpc_dmsg("\n[DBG] %s : core[%x,%d] / RPC thread %x / completes rpc %d / cycle %d\n", 320 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , index , hal_time_stamp() ); 321 322 // increment handled RPC counter 323 count++; 324 325 // decrement response counter in RPC descriptor 326 responses = hal_remote_atomic_add(XPTR( desc_cxy, &desc_ptr->response ), -1); 327 328 // unblock client thread and send IPI to client core if last response 329 if( responses == 1 ) 330 { 331 // get pointer on client thread and unblock it 332 thread_ptr = (thread_t *)hal_remote_lpt(XPTR(desc_cxy,&desc_ptr->thread)); 333 thread_unblock( XPTR(desc_cxy,thread_ptr) , THREAD_BLOCKED_RPC ); 334 335 hal_fence(); 336 337 // get client core lid and send IPI 338 core_lid = hal_remote_lw(XPTR(desc_cxy, &desc_ptr->lid)); 339 dev_pic_send_ipi( desc_cxy , core_lid ); 340 } 341 } 342 343 // chek exit condition 344 if( local_fifo_is_empty( rpc_fifo ) || 345 (rpc_fifo->owner != this->trdid) || 346 (count >= CONFIG_RPC_PENDING_MAX) ) break; 347 } // end internal loop 348 349 // release rpc_fifo ownership if not lost 350 if( rpc_fifo->owner == this->trdid ) rpc_fifo->owner = 0; 351 } 352 353 // sucide if too many RPC threads in cluster 354 if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX ) 355 { 356 357 grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) suicide at cycle %d\n", 358 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() ); 359 360 // update RPC threads counter 361 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 ); 362 363 // suicide 364 thread_kill( this ); 365 } 366 367 grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) deschedules / cycle %d\n", 368 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() ); 369 370 // deschedule without blocking 371 sched_yield("RPC fifo empty or too much work"); 372 373 grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) resumes / cycle %d\n", 374 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() ); 375 376 } // end external loop 377 378 } // end rpc_thread_func() 379 380 381 ///////////////////////////////////////////////////////////////////////////////////////// 382 // [0] Marshaling functions attached to RPC_PMEM_GET_PAGES (blocking) 92 383 ///////////////////////////////////////////////////////////////////////////////////////// 93 384 … … 97 388 page_t ** page ) // out 98 389 { 99 100 101 390 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 391 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 392 CURRENT_THREAD->core->lid , hal_time_stamp() ); 102 393 103 394 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); … … 112 403 113 404 // register RPC request in remote RPC fifo (blocking function) 114 rpc_send _sync( cxy , &rpc);405 rpc_send( cxy , &rpc , true ); 115 406 116 407 // get output arguments from RPC descriptor 117 408 *page = (page_t *)(intptr_t)rpc.args[1]; 118 409 119 120 121 410 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 411 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 412 CURRENT_THREAD->core->lid , hal_time_stamp() ); 122 413 } 123 414 … … 125 416 void rpc_pmem_get_pages_server( xptr_t xp ) 126 417 { 127 128 129 418 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 419 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 420 CURRENT_THREAD->core->lid , hal_time_stamp() ); 130 421 131 422 // get client cluster identifier and pointer on RPC descriptor … … 134 425 135 426 // get input arguments from client RPC descriptor 136 uint32_t order = hal_remote_lw( XPTR( cxy , &desc->args[0] ) );427 uint32_t order = (uint32_t)hal_remote_lwd( XPTR( cxy , &desc->args[0] ) ); 137 428 138 429 // call local pmem allocator … … 142 433 hal_remote_swd( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page ); 143 434 144 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 145 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 146 CURRENT_THREAD->core->lid , hal_time_stamp() ); 147 } 148 149 ///////////////////////////////////////////////////////////////////////////////////////// 150 // [1] Marshaling functions attached to RPC_PROCESS_MAKE_EXEC 435 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 436 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 437 CURRENT_THREAD->core->lid , hal_time_stamp() ); 438 } 439 440 ///////////////////////////////////////////////////////////////////////////////////////// 441 // [1] Marshaling functions attached to RPC_PMEM_RELEASE_PAGES (blocking) 442 ///////////////////////////////////////////////////////////////////////////////////////// 443 444 ////////////////////////////////////////////////// 445 void rpc_pmem_release_pages_client( cxy_t cxy, 446 page_t * page ) // out 447 { 448 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 449 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 450 CURRENT_THREAD->core->lid , hal_time_stamp() ); 451 452 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 453 454 // initialise RPC descriptor header 455 rpc_desc_t rpc; 456 rpc.index = RPC_PMEM_RELEASE_PAGES; 457 rpc.response = 1; 458 459 // set input arguments in RPC descriptor 460 rpc.args[0] = (uint64_t)(intptr_t)page; 461 462 // register RPC request in remote RPC fifo (blocking function) 463 rpc_send( cxy , &rpc , true ); 464 465 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 466 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 467 CURRENT_THREAD->core->lid , hal_time_stamp() ); 468 } 469 470 /////////////////////////////////////////////// 471 void rpc_pmem_release_pages_server( xptr_t xp ) 472 { 473 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 474 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 475 CURRENT_THREAD->core->lid , hal_time_stamp() ); 476 477 // get client cluster identifier and pointer on RPC descriptor 478 cxy_t cxy = (cxy_t)GET_CXY( xp ); 479 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp ); 480 481 // get input arguments from client RPC descriptor 482 page_t * page = (page_t *)(intptr_t)hal_remote_lwd( XPTR( cxy , &desc->args[0] ) ); 483 484 // release memory to local pmem 485 kmem_req_t req; 486 req.type = KMEM_PAGE; 487 req.ptr = page; 488 kmem_free( &req ); 489 490 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 491 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 492 CURRENT_THREAD->core->lid , hal_time_stamp() ); 493 } 494 495 ///////////////////////////////////////////////////////////////////////////////////////// 496 // [2] Marshaling functions attached to RPC_PROCESS_MAKE_EXEC (blocking) 151 497 ///////////////////////////////////////////////////////////////////////////////////////// 152 498 … … 156 502 error_t * error ) // out 157 503 { 158 159 160 504 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 505 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 506 CURRENT_THREAD->core->lid , hal_time_stamp() ); 161 507 162 508 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); … … 171 517 172 518 // register RPC request in remote RPC fifo (blocking function) 173 rpc_send _sync( cxy , &rpc);519 rpc_send( cxy , &rpc , true ); 174 520 175 521 // get output arguments from RPC descriptor 176 522 *error = (error_t)rpc.args[1]; 177 523 178 179 180 524 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 525 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 526 CURRENT_THREAD->core->lid , hal_time_stamp() ); 181 527 } 182 528 … … 184 530 void rpc_process_make_exec_server( xptr_t xp ) 185 531 { 532 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 533 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 534 CURRENT_THREAD->core->lid , hal_time_stamp() ); 535 186 536 exec_info_t * ptr; // local pointer on remote exec_info structure 187 537 exec_info_t info; // local copy of exec_info structure 188 538 error_t error; // local error error status 189 190 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",191 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,192 CURRENT_THREAD->core->lid , hal_time_stamp() );193 539 194 540 // get client cluster identifier and pointer on RPC descriptor … … 210 556 hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error ); 211 557 212 213 214 215 } 216 217 ///////////////////////////////////////////////////////////////////////////////////////// 218 // [ 2] Marshaling functions attached to RPC_PROCESS_MAKE_FORK558 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 559 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 560 CURRENT_THREAD->core->lid , hal_time_stamp() ); 561 } 562 563 ///////////////////////////////////////////////////////////////////////////////////////// 564 // [3] Marshaling functions attached to RPC_PROCESS_MAKE_FORK (blocking) 219 565 ///////////////////////////////////////////////////////////////////////////////////////// 220 566 … … 227 573 error_t * error ) // out 228 574 { 229 230 231 575 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 576 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 577 CURRENT_THREAD->core->lid , hal_time_stamp() ); 232 578 233 579 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); … … 243 589 244 590 // register RPC request in remote RPC fifo (blocking function) 245 rpc_send _sync( cxy , &rpc);591 rpc_send( cxy , &rpc , true ); 246 592 247 593 // get output arguments from RPC descriptor … … 250 596 *error = (error_t)rpc.args[4]; 251 597 252 253 254 598 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 599 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 600 CURRENT_THREAD->core->lid , hal_time_stamp() ); 255 601 } 256 602 … … 258 604 void rpc_process_make_fork_server( xptr_t xp ) 259 605 { 606 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 607 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 608 CURRENT_THREAD->core->lid , hal_time_stamp() ); 609 260 610 xptr_t ref_process_xp; // extended pointer on reference parent process 261 611 xptr_t parent_thread_xp; // extended pointer on parent thread … … 264 614 error_t error; // local error status 265 615 266 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",267 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,268 CURRENT_THREAD->core->lid , hal_time_stamp() );269 270 616 // get client cluster identifier and pointer on RPC descriptor 271 617 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); … … 287 633 hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error ); 288 634 289 290 291 292 } 293 294 ///////////////////////////////////////////////////////////////////////////////////////// 295 // [ 3] Marshaling functions attached to RPC_PROCESS_KILL635 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 636 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 637 CURRENT_THREAD->core->lid , hal_time_stamp() ); 638 } 639 640 ///////////////////////////////////////////////////////////////////////////////////////// 641 // [4] Marshaling functions attached to RPC_PROCESS_MAKE_EXIT (blocking) 296 642 ///////////////////////////////////////////////////////////////////////////////////////// 297 643 298 644 /////////////////////////////////////////////////// 299 void rpc_process_kill_client( process_t * process ) 300 { 301 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 302 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 303 CURRENT_THREAD->core->lid , hal_time_stamp() ); 304 305 // only reference cluster can send this RPC 306 assert( (GET_CXY( process->ref_xp ) == local_cxy) , __FUNCTION__ , 307 "caller must be reference process cluster\n"); 308 309 // get local process index in reference cluster 310 lpid_t lpid = LPID_FROM_PID( process->pid ); 311 312 // get local process manager pointer 313 pmgr_t * pmgr = &LOCAL_CLUSTER->pmgr; 314 315 // get number of copies 316 uint32_t copies = pmgr->copies_nr[lpid]; 317 318 // initialise RPC descriptor 319 rpc_desc_t rpc; 320 rpc.index = RPC_PROCESS_KILL; 321 rpc.response = copies; 322 rpc.args[0] = (uint64_t)process->pid; 323 324 // loop on list of copies to send RPC 325 xptr_t iter; 326 XLIST_FOREACH( XPTR( local_cxy , &pmgr->copies_root[lpid] ) , iter ) 327 { 328 // get cluster_identifier for current copy 329 cxy_t target_cxy = GET_CXY( iter ); 330 331 // register RPC request in remote RPC fifo ... but the reference 332 if( target_cxy != local_cxy ) rpc_send_sync( target_cxy , &rpc ); 333 } 334 335 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 336 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 337 CURRENT_THREAD->core->lid , hal_time_stamp() ); 645 void rpc_process_make_exit_client( cxy_t cxy, 646 process_t * process, 647 uint32_t status ) 648 { 649 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 650 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 651 CURRENT_THREAD->core->lid , hal_time_stamp() ); 652 653 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 654 655 // initialise RPC descriptor header 656 rpc_desc_t rpc; 657 rpc.index = RPC_PROCESS_MAKE_EXIT; 658 rpc.response = 1; 659 660 // set input arguments in RPC descriptor 661 rpc.args[0] = (uint64_t)(intptr_t)process; 662 rpc.args[1] = (uint64_t)status; 663 664 // register RPC request in remote RPC fifo (blocking function) 665 rpc_send( cxy , &rpc , true ); 666 667 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 668 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 669 CURRENT_THREAD->core->lid , hal_time_stamp() ); 338 670 } 339 671 340 ///////////////////////////////////////// 341 void rpc_process_ kill_server( xptr_t xp )342 { 343 pid_t pid; 344 process_t * process;345 346 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 347 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,348 CURRENT_THREAD->core->lid , hal_time_stamp() );672 ////////////////////////////////////////////// 673 void rpc_process_make_exit_server( xptr_t xp ) 674 { 675 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 676 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 677 CURRENT_THREAD->core->lid , hal_time_stamp() ); 678 679 process_t * process; 680 uint32_t status; 349 681 350 682 // get client cluster identifier and pointer on RPC descriptor … … 352 684 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp ); 353 685 354 // get pid argument from RPC descriptor 355 pid = (pid_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) ); 356 357 // get process pointer to call local kernel function 358 process = cluster_get_local_process_from_pid( pid ); 359 360 if( process == NULL ) // process not found => do nothing 361 { 362 printk("\n[WARNING] in %s : process %x not found in cluster %x\n", 363 __FUNCTION__ , pid , local_cxy ); 364 } 365 else // destroy process 366 { 367 process_kill( process ); 368 } 369 370 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 371 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 372 CURRENT_THREAD->core->lid , hal_time_stamp() ); 686 // get arguments from RPC descriptor 687 process = (process_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) ); 688 status = (uint32_t) hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) ); 689 690 // call local kernel function 691 process_make_exit( process , status ); 692 693 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 694 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 695 CURRENT_THREAD->core->lid , hal_time_stamp() ); 373 696 } 374 697 375 376 ///////////////////////////////////////////////////////////////////////////////////////// 377 // [4] Marshaling functions attached to RPC_THREAD_USER_CREATE 698 ///////////////////////////////////////////////////////////////////////////////////////// 699 // [5] Marshaling functions attached to RPC_PROCESS_MAKE_KILL (blocking) 700 ///////////////////////////////////////////////////////////////////////////////////////// 701 702 /////////////////////////////////////////////////// 703 void rpc_process_make_kill_client( cxy_t cxy, 704 process_t * process, 705 uint32_t sig_id ) 706 { 707 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 708 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 709 CURRENT_THREAD->core->lid , hal_time_stamp() ); 710 711 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 712 713 // initialise RPC descriptor header 714 rpc_desc_t rpc; 715 rpc.index = RPC_PROCESS_MAKE_KILL; 716 rpc.response = 1; 717 718 // set input arguments in RPC descriptor 719 rpc.args[0] = (uint64_t)(intptr_t)process; 720 rpc.args[1] = (uint64_t)sig_id; 721 722 // register RPC request in remote RPC fifo (blocking function) 723 rpc_send( cxy , &rpc , true ); 724 725 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 726 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 727 CURRENT_THREAD->core->lid , hal_time_stamp() ); 728 } 729 730 ////////////////////////////////////////////// 731 void rpc_process_make_kill_server( xptr_t xp ) 732 { 733 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 734 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 735 CURRENT_THREAD->core->lid , hal_time_stamp() ); 736 737 process_t * process; 738 uint32_t sig_id; 739 740 // get client cluster identifier and pointer on RPC descriptor 741 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); 742 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp ); 743 744 // get arguments from RPC descriptor 745 process = (process_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) ); 746 sig_id = (uint32_t) hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) ); 747 748 // call local kernel function 749 process_make_exit( process , sig_id ); 750 751 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 752 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 753 CURRENT_THREAD->core->lid , hal_time_stamp() ); 754 } 755 756 ///////////////////////////////////////////////////////////////////////////////////////// 757 // [6] Marshaling functions attached to RPC_THREAD_USER_CREATE (blocking) 378 758 ///////////////////////////////////////////////////////////////////////////////////////// 379 759 … … 387 767 error_t * error ) // out 388 768 { 389 390 391 769 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 770 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 771 CURRENT_THREAD->core->lid , hal_time_stamp() ); 392 772 393 773 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); … … 404 784 rpc.args[3] = (uint64_t)(intptr_t)attr; 405 785 406 // register RPC request in remote RPC fifo 407 rpc_send _sync( cxy , &rpc);786 // register RPC request in remote RPC fifo (blocking function) 787 rpc_send( cxy , &rpc , true ); 408 788 409 789 // get output arguments from RPC descriptor … … 411 791 *error = (error_t)rpc.args[5]; 412 792 413 414 415 793 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 794 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 795 CURRENT_THREAD->core->lid , hal_time_stamp() ); 416 796 } 417 797 … … 419 799 void rpc_thread_user_create_server( xptr_t xp ) 420 800 { 801 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 802 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 803 CURRENT_THREAD->core->lid , hal_time_stamp() ); 804 421 805 pthread_attr_t * attr_ptr; // pointer on attributes structure in client cluster 422 806 pthread_attr_t attr_copy; // attributes structure copy in server cluster … … 428 812 void * start_arg; 429 813 error_t error; 430 431 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",432 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,433 CURRENT_THREAD->core->lid , hal_time_stamp() );434 814 435 815 // get client cluster identifier and pointer on RPC descriptor … … 462 842 hal_remote_swd( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error ); 463 843 464 465 466 467 } 468 469 ///////////////////////////////////////////////////////////////////////////////////////// 470 // [ 5] Marshaling functions attached to RPC_THREAD_KERNEL_CREATE844 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 845 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 846 CURRENT_THREAD->core->lid , hal_time_stamp() ); 847 } 848 849 ///////////////////////////////////////////////////////////////////////////////////////// 850 // [7] Marshaling functions attached to RPC_THREAD_KERNEL_CREATE (blocking) 471 851 ///////////////////////////////////////////////////////////////////////////////////////// 472 852 … … 479 859 error_t * error ) // out 480 860 { 481 482 483 861 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 862 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 863 CURRENT_THREAD->core->lid , hal_time_stamp() ); 484 864 485 865 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); … … 495 875 rpc.args[2] = (uint64_t)(intptr_t)args; 496 876 497 // register RPC request in remote RPC fifo 498 rpc_send _sync( cxy , &rpc);877 // register RPC request in remote RPC fifo (blocking function) 878 rpc_send( cxy , &rpc , true ); 499 879 500 880 // get output arguments from RPC descriptor … … 502 882 *error = (error_t)rpc.args[4]; 503 883 504 505 506 884 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 885 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 886 CURRENT_THREAD->core->lid , hal_time_stamp() ); 507 887 } 508 888 … … 510 890 void rpc_thread_kernel_create_server( xptr_t xp ) 511 891 { 892 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 893 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 894 CURRENT_THREAD->core->lid , hal_time_stamp() ); 895 512 896 thread_t * thread_ptr; // local pointer on thread descriptor 513 897 xptr_t thread_xp; // extended pointer on thread descriptor … … 515 899 error_t error; 516 900 517 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",518 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,519 CURRENT_THREAD->core->lid , hal_time_stamp() );520 521 901 // get client cluster identifier and pointer on RPC descriptor 522 902 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); … … 539 919 hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)thread_xp ); 540 920 541 542 543 544 } 545 546 ///////////////////////////////////////////////////////////////////////////////////////// 547 // [ 6] Marshaling functions attached to RPC_SIGNAL_RISE921 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 922 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 923 CURRENT_THREAD->core->lid , hal_time_stamp() ); 924 } 925 926 ///////////////////////////////////////////////////////////////////////////////////////// 927 // [8] Marshaling functions attached to RPC_THREAD_KILL (blocking) 548 928 ///////////////////////////////////////////////////////////////////////////////////////// 549 929 550 930 ///////////////////////////////////////////// 551 void rpc_signal_rise_client( cxy_t cxy, 552 process_t * process, // in 553 uint32_t sig_id ) // in 554 { 555 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 556 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 557 CURRENT_THREAD->core->lid , hal_time_stamp() ); 558 559 assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n"); 560 561 // initialise RPC descriptor header 562 rpc_desc_t rpc; 563 rpc.index = RPC_SIGNAL_RISE; 931 void rpc_thread_kill_client( cxy_t cxy, 932 thread_t * thread ) // in 933 { 934 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 935 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 936 CURRENT_THREAD->core->lid , hal_time_stamp() ); 937 938 // this RPC can be called in local cluster 939 940 // initialise RPC descriptor header 941 rpc_desc_t rpc; 942 rpc.index = RPC_THREAD_KILL; 564 943 rpc.response = 1; 565 944 566 945 // set input arguments in RPC descriptor 567 rpc.args[0] = (uint64_t)(intptr_t)process; 568 rpc.args[1] = (uint64_t)sig_id; 946 rpc.args[0] = (uint64_t)(intptr_t)thread; 569 947 570 // register RPC request in remote RPC fifo 571 rpc_send _sync( cxy , &rpc);572 573 574 575 948 // register RPC request in remote RPC fifo (blocking function) 949 rpc_send( cxy , &rpc , true ); 950 951 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 952 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 953 CURRENT_THREAD->core->lid , hal_time_stamp() ); 576 954 } 577 955 578 956 //////////////////////////////////////// 579 void rpc_signal_rise_server( xptr_t xp ) 580 { 581 process_t * process; // local pointer on process descriptor 582 uint32_t sig_id; // signal index 583 584 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 585 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 586 CURRENT_THREAD->core->lid , hal_time_stamp() ); 957 void rpc_thread_kill_server( xptr_t xp ) 958 { 959 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 960 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 961 CURRENT_THREAD->core->lid , hal_time_stamp() ); 962 963 thread_t * thread; // local pointer on process descriptor 587 964 588 965 // get client cluster identifier and pointer on RPC descriptor … … 591 968 592 969 // get attributes from RPC descriptor 593 process = (process_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) ); 594 sig_id = (uint32_t) hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) ); 970 thread = (thread_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) ); 595 971 596 972 // call local kernel function 597 signal_rise( process , sig_id ); 598 599 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 600 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 601 CURRENT_THREAD->core->lid , hal_time_stamp() ); 602 } 603 604 ///////////////////////////////////////////////////////////////////////////////////////// 605 // [10] Marshaling functions attached to RPC_VFS_INODE_CREATE 973 thread_kill( thread ); 974 975 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 976 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 977 CURRENT_THREAD->core->lid , hal_time_stamp() ); 978 } 979 980 981 ///////////////////////////////////////////////////////////////////////////////////////// 982 // [9] Marshaling functions attached to RPC_PROCESS_KILL (multicast / non blocking) 983 ///////////////////////////////////////////////////////////////////////////////////////// 984 985 /////////////////////////////////////////////////// 986 void rpc_process_sigaction_client( cxy_t cxy, 987 process_t * process, // in 988 uint32_t sigaction, // in 989 xptr_t rsp_xp, // in 990 xptr_t client_xp ) // in 991 { 992 signal_dmsg("\n[DBG] %s : enter for %s / thread %x on core[%x,%d] / cycle %d\n", 993 __FUNCTION__ , process_action_str( sigaction ) , CURRENT_THREAD , 994 local_cxy , CURRENT_THREAD->core->lid , hal_time_stamp() ); 995 996 // initialise RPC descriptor header 997 rpc_desc_t rpc; 998 rpc.index = RPC_PROCESS_SIGACTION; 999 1000 // set input arguments in RPC descriptor 1001 rpc.args[0] = (uint64_t)(intptr_t)process; 1002 rpc.args[1] = (uint64_t)sigaction; 1003 rpc.args[2] = (uint64_t)rsp_xp; 1004 rpc.args[3] = (uint64_t)client_xp; 1005 1006 // register RPC request in remote RPC fifo (non blocking) 1007 rpc_send( cxy , &rpc , false ); 1008 1009 signal_dmsg("\n[DBG] %s : exit for %s / thread %x on core[%x,%d] / cycle %d\n", 1010 __FUNCTION__ , process_action_str( sigaction ) , CURRENT_THREAD , 1011 local_cxy , CURRENT_THREAD->core->lid , hal_time_stamp() ); 1012 } 1013 1014 ////////////////////////////////////////////// 1015 void rpc_process_sigaction_server( xptr_t xp ) 1016 { 1017 process_t * process; 1018 uint32_t action; 1019 xptr_t rsp_xp; 1020 xptr_t client_xp; 1021 1022 // get client cluster identifier and pointer on RPC descriptor 1023 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); 1024 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp ); 1025 1026 // get arguments from RPC descriptor 1027 process = (process_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) ); 1028 action = (uint32_t) hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) ); 1029 rsp_xp = (xptr_t) hal_remote_lwd( XPTR( client_cxy , &desc->args[2] ) ); 1030 client_xp = (xptr_t) hal_remote_lwd( XPTR( client_cxy , &desc->args[3] ) ); 1031 1032 signal_dmsg("\n[DBG] %s : enter for %s / thread %x on core[%x,%d] / cycle %d\n", 1033 __FUNCTION__ , process_action_str( action ) , CURRENT_THREAD , 1034 local_cxy , CURRENT_THREAD->core->lid , hal_time_stamp() ); 1035 1036 // call relevant kernel function 1037 if (action == DELETE_ALL_THREADS ) process_delete ( process , rsp_xp , client_xp ); 1038 else if (action == BLOCK_ALL_THREADS ) process_block ( process , rsp_xp , client_xp ); 1039 else if (action == UNBLOCK_ALL_THREADS ) process_unblock( process , rsp_xp , client_xp ); 1040 1041 signal_dmsg("\n[DBG] %s : exit for %s / thread %x on core[%x,%d] / cycle %d\n", 1042 __FUNCTION__ , process_action_str( action ) , CURRENT_THREAD , 1043 local_cxy , CURRENT_THREAD->core->lid , hal_time_stamp() ); 1044 } 1045 1046 ///////////////////////////////////////////////////////////////////////////////////////// 1047 // [10] Marshaling functions attached to RPC_VFS_INODE_CREATE (blocking) 606 1048 ///////////////////////////////////////////////////////////////////////////////////////// 607 1049 … … 641 1083 642 1084 // register RPC request in remote RPC fifo (blocking function) 643 rpc_send _sync( cxy , &rpc);1085 rpc_send( cxy , &rpc , true ); 644 1086 645 1087 // get output values from RPC descriptor … … 705 1147 706 1148 ///////////////////////////////////////////////////////////////////////////////////////// 707 // [11] Marshaling functions attached to RPC_VFS_INODE_DESTROY 1149 // [11] Marshaling functions attached to RPC_VFS_INODE_DESTROY (blocking) 708 1150 ///////////////////////////////////////////////////////////////////////////////////////// 709 1151 … … 727 1169 728 1170 // register RPC request in remote RPC fifo (blocking function) 729 rpc_send _sync( cxy , &rpc);1171 rpc_send( cxy , &rpc , true ); 730 1172 731 1173 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", … … 759 1201 760 1202 ///////////////////////////////////////////////////////////////////////////////////////// 761 // [12] Marshaling functions attached to RPC_VFS_DENTRY_CREATE 1203 // [12] Marshaling functions attached to RPC_VFS_DENTRY_CREATE (blocking) 762 1204 ///////////////////////////////////////////////////////////////////////////////////////// 763 1205 … … 787 1229 788 1230 // register RPC request in remote RPC fifo (blocking function) 789 rpc_send _sync( cxy , &rpc);1231 rpc_send( cxy , &rpc , true ); 790 1232 791 1233 // get output values from RPC descriptor … … 841 1283 842 1284 ///////////////////////////////////////////////////////////////////////////////////////// 843 // [13] Marshaling functions attached to RPC_VFS_DENTRY_DESTROY 1285 // [13] Marshaling functions attached to RPC_VFS_DENTRY_DESTROY (blocking) 844 1286 ///////////////////////////////////////////////////////////////////////////////////////// 845 1287 … … 864 1306 865 1307 // register RPC request in remote RPC fifo (blocking function) 866 rpc_send _sync( cxy , &rpc);1308 rpc_send( cxy , &rpc , true ); 867 1309 868 1310 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", … … 897 1339 898 1340 ///////////////////////////////////////////////////////////////////////////////////////// 899 // [14] Marshaling functions attached to RPC_VFS_FILE_CREATE 1341 // [14] Marshaling functions attached to RPC_VFS_FILE_CREATE (blocking) 900 1342 ///////////////////////////////////////////////////////////////////////////////////////// 901 1343 … … 923 1365 924 1366 // register RPC request in remote RPC fifo (blocking function) 925 rpc_send _sync( cxy , &rpc);1367 rpc_send( cxy , &rpc , true ); 926 1368 927 1369 // get output values from RPC descriptor … … 969 1411 970 1412 ///////////////////////////////////////////////////////////////////////////////////////// 971 // [15] Marshaling functions attached to RPC_VFS_FILE_DESTROY 1413 // [15] Marshaling functions attached to RPC_VFS_FILE_DESTROY (blocking) 972 1414 ///////////////////////////////////////////////////////////////////////////////////////// 973 1415 … … 991 1433 992 1434 // register RPC request in remote RPC fifo (blocking function) 993 rpc_send _sync( cxy , &rpc);1435 rpc_send( cxy , &rpc , true ); 994 1436 995 1437 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", … … 1023 1465 1024 1466 ///////////////////////////////////////////////////////////////////////////////////////// 1025 // [16] Marshaling functions attached to RPC_VFS_INODE_LOAD 1467 // [16] Marshaling functions attached to RPC_VFS_INODE_LOAD (blocking) 1026 1468 ///////////////////////////////////////////////////////////////////////////////////////// 1027 1469 … … 1050 1492 1051 1493 // register RPC request in remote RPC fifo (blocking function) 1052 rpc_send _sync( cxy , &rpc);1494 rpc_send( cxy , &rpc , true ); 1053 1495 1054 1496 // get output values from RPC descriptor … … 1099 1541 1100 1542 ///////////////////////////////////////////////////////////////////////////////////////// 1101 // [17] Marshaling functions attached to RPC_VFS_MAPPER_LOAD_ALL 1543 // [17] Marshaling functions attached to RPC_VFS_MAPPER_LOAD_ALL (blocking) 1102 1544 ///////////////////////////////////////////////////////////////////////////////////////// 1103 1545 … … 1122 1564 1123 1565 // register RPC request in remote RPC fifo (blocking function) 1124 rpc_send _sync( cxy , &rpc);1566 rpc_send( cxy , &rpc , true ); 1125 1567 1126 1568 // get output values from RPC descriptor … … 1161 1603 1162 1604 ///////////////////////////////////////////////////////////////////////////////////////// 1163 // [18] Marshaling functions attached to RPC_FATFS_GET_CLUSTER 1605 // [18] Marshaling functions attached to RPC_FATFS_GET_CLUSTER (blocking) 1164 1606 ///////////////////////////////////////////////////////////////////////////////////////// 1165 1607 … … 1189 1631 1190 1632 // register RPC request in remote RPC fifo 1191 rpc_send _sync( cxy , &rpc);1633 rpc_send( cxy , &rpc , true ); 1192 1634 1193 1635 // get output argument from rpc descriptor … … 1235 1677 1236 1678 ///////////////////////////////////////////////////////////////////////////////////////// 1237 // [20] Marshaling functions attached to RPC_VMM_GET_VSEG 1679 // [20] Marshaling functions attached to RPC_VMM_GET_VSEG (blocking) 1238 1680 ///////////////////////////////////////////////////////////////////////////////////////// 1239 1681 … … 1261 1703 1262 1704 // register RPC request in remote RPC fifo (blocking function) 1263 rpc_send _sync( cxy , &rpc);1705 rpc_send( cxy , &rpc , true ); 1264 1706 1265 1707 // get output argument from rpc descriptor … … 1308 1750 1309 1751 ///////////////////////////////////////////////////////////////////////////////////////// 1310 // [21] Marshaling functions attached to RPC_VMM_GET_PTE 1752 // [21] Marshaling functions attached to RPC_VMM_GET_PTE (blocking) 1311 1753 ///////////////////////////////////////////////////////////////////////////////////////// 1312 1754 … … 1337 1779 1338 1780 // register RPC request in remote RPC fifo (blocking function) 1339 rpc_send _sync( cxy , &rpc);1781 rpc_send( cxy , &rpc , true ); 1340 1782 1341 1783 // get output argument from rpc descriptor … … 1386 1828 1387 1829 ///////////////////////////////////////////////////////////////////////////////////////// 1388 // [22] Marshaling functions attached to RPC_KCM_ALLOC 1830 // [22] Marshaling functions attached to RPC_KCM_ALLOC (blocking) 1389 1831 ///////////////////////////////////////////////////////////////////////////////////////// 1390 1832 … … 1408 1850 rpc.args[0] = (uint64_t)kmem_type; 1409 1851 1410 // register RPC request in remote RPC fifo 1411 rpc_send _sync( cxy , &rpc);1852 // register RPC request in remote RPC fifo (blocking function) 1853 rpc_send( cxy , &rpc , true ); 1412 1854 1413 1855 // get output arguments from RPC descriptor … … 1449 1891 1450 1892 ///////////////////////////////////////////////////////////////////////////////////////// 1451 // [23] Marshaling functions attached to RPC_KCM_FREE 1893 // [23] Marshaling functions attached to RPC_KCM_FREE (blocking) 1452 1894 ///////////////////////////////////////////////////////////////////////////////////////// 1453 1895 … … 1472 1914 rpc.args[1] = (uint64_t)kmem_type; 1473 1915 1474 // register RPC request in remote RPC fifo 1475 rpc_send _sync( cxy , &rpc);1916 // register RPC request in remote RPC fifo (blocking function) 1917 rpc_send( cxy , &rpc , true ); 1476 1918 1477 1919 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", … … 1540 1982 1541 1983 // register RPC request in remote RPC fifo (blocking function) 1542 rpc_send _sync( cxy , &rpc);1984 rpc_send( cxy , &rpc , true ); 1543 1985 1544 1986 // get output values from RPC descriptor … … 1608 2050 1609 2051 ///////////////////////////////////////////////////////////////////////////////////////// 1610 // [25] Marshaling functions attached to RPC_MAPPER_GET_PAGE 2052 // [25] Marshaling functions attached to RPC_MAPPER_GET_PAGE (blocking) 1611 2053 ///////////////////////////////////////////////////////////////////////////////////////// 1612 2054 … … 1633 2075 1634 2076 // register RPC request in remote RPC fifo (blocking function) 1635 rpc_send _sync( cxy , &rpc);2077 rpc_send( cxy , &rpc , true ); 1636 2078 1637 2079 // get output values from RPC descriptor … … 1670 2112 1671 2113 ///////////////////////////////////////////////////////////////////////////////////////// 1672 // [26] Marshaling functions attached to RPC_VMM_CREATE_VSEG 2114 // [26] Marshaling functions attached to RPC_VMM_CREATE_VSEG (blocking) 1673 2115 ///////////////////////////////////////////////////////////////////////////////////////// 1674 2116 … … 1707 2149 1708 2150 // register RPC request in remote RPC fifo (blocking function) 1709 rpc_send _sync( cxy , &rpc);2151 rpc_send( cxy , &rpc , true ); 1710 2152 1711 2153 // get output values from RPC descriptor … … 1757 2199 1758 2200 ///////////////////////////////////////////////////////////////////////////////////////// 1759 // [27] Marshaling functions attached to RPC_SCHED_DISPLAY 2201 // [27] Marshaling functions attached to RPC_SCHED_DISPLAY (blocking) 1760 2202 ///////////////////////////////////////////////////////////////////////////////////////// 1761 2203 … … 1779 2221 1780 2222 // register RPC request in remote RPC fifo (blocking function) 1781 rpc_send _sync( cxy , &rpc);2223 rpc_send( cxy , &rpc , true ); 1782 2224 1783 2225 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", … … 1809 2251 1810 2252 ///////////////////////////////////////////////////////////////////////////////////////// 1811 // [28] Marshaling functions attached to RPC_VMM_SET_COW 2253 // [28] Marshaling functions attached to RPC_VMM_SET_COW (blocking) 1812 2254 ///////////////////////////////////////////////////////////////////////////////////////// 1813 2255 … … 1831 2273 1832 2274 // register RPC request in remote RPC fifo (blocking function) 1833 rpc_send _sync( cxy , &rpc);2275 rpc_send( cxy , &rpc , true ); 1834 2276 1835 2277 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", … … 1862 2304 } 1863 2305 1864 /***************************************************************************************/ 1865 /************ Generic functions supporting RPCs : client side **************************/ 1866 /***************************************************************************************/ 1867 1868 //////////////////////////////////////////// 1869 void rpc_send_sync( cxy_t server_cxy, 1870 rpc_desc_t * rpc ) 1871 { 1872 error_t error; 1873 1874 thread_t * this = CURRENT_THREAD; 1875 core_t * core = this->core; 1876 1877 // register client thread pointer and core lid in RPC descriptor 1878 rpc->thread = this; 1879 rpc->lid = core->lid; 1880 1881 // build an extended pointer on the RPC descriptor 1882 xptr_t desc_xp = XPTR( local_cxy , rpc ); 1883 1884 // get local pointer on rpc_fifo in remote cluster, with the 1885 // assumption that local pointers are identical in all clusters 1886 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo; 1887 1888 // try to post an item in remote fifo 1889 // deschedule and retry if remote fifo full 1890 do 1891 { 1892 error = remote_fifo_put_item( XPTR( server_cxy , rpc_fifo ), 1893 (uint64_t )desc_xp ); 1894 if ( error ) 1895 { 1896 printk("\n[WARNING] %s : cluster %x cannot post RPC to cluster %x\n", 1897 __FUNCTION__ , local_cxy , server_cxy ); 1898 1899 if( thread_can_yield() ) sched_yield("RPC fifo full"); 1900 } 1901 } 1902 while( error ); 1903 1904 hal_fence(); 1905 1906 // send IPI to the remote core corresponding to the client core 1907 dev_pic_send_ipi( server_cxy , core->lid ); 1908 1909 // wait RPC completion: 1910 // - busy waiting policy during kernel_init, or if threads cannot yield 1911 // - block and deschedule in all other cases 1912 1913 if( (this->type == THREAD_IDLE) || (thread_can_yield() == false) ) // busy waiting 1914 { 1915 1916 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s busy waiting after registering RPC\n" 1917 " rpc = %d / server = %x / cycle %d\n", 1918 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) , 1919 rpc->index , server_cxy , hal_time_stamp() ); 1920 1921 while( rpc->response ) hal_fixed_delay( 100 ); 1922 1923 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s exit after RPC completion\n", 1924 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) ); 1925 1926 } 1927 else // block & deschedule 1928 { 1929 1930 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s deschedule after registering RPC\n" 1931 " rpc = %d / server = %x / cycle %d\n", 1932 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) , 1933 rpc->index , server_cxy , hal_time_stamp() ); 1934 1935 thread_block( this , THREAD_BLOCKED_RPC ); 1936 sched_yield("client blocked on RPC"); 1937 1938 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s resumes after RPC completion\n", 1939 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) ); 1940 1941 } 1942 1943 // check response available 1944 assert( (rpc->response == 0) , __FUNCTION__, "illegal RPC response\n" ); 1945 1946 // acknowledge the IPI sent by the server 1947 dev_pic_ack_ipi(); 1948 1949 } // end rpc_send_sync() 1950 1951 1952 1953 /***************************************************************************************/ 1954 /************ Generic functions supporting RPCs : server side **************************/ 1955 /***************************************************************************************/ 1956 1957 //////////////// 1958 void rpc_check() 1959 { 1960 error_t error; 1961 thread_t * thread; 1962 uint32_t sr_save; 1963 1964 bool_t found = false; 1965 thread_t * this = CURRENT_THREAD; 1966 core_t * core = this->core; 1967 scheduler_t * sched = &core->scheduler; 1968 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo; 1969 1970 grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s / cycle %d\n", 1971 __FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() ); 1972 1973 // interrupted thread not preemptable during RPC chek 1974 hal_disable_irq( &sr_save ); 1975 1976 // check RPC FIFO not empty and no RPC thread handling it 1977 if( (rpc_fifo->owner == 0) && (local_fifo_is_empty(rpc_fifo) == false) ) 1978 { 1979 // search one non blocked RPC thread 1980 list_entry_t * iter; 1981 LIST_FOREACH( &sched->k_root , iter ) 1982 { 1983 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 1984 if( (thread->type == THREAD_RPC) && (thread->blocked == 0 ) ) 1985 { 1986 found = true; 1987 break; 1988 } 1989 } 1990 1991 // create new RPC thread if not found 1992 if( found == false ) 1993 { 1994 error = thread_kernel_create( &thread, 1995 THREAD_RPC, 1996 &rpc_thread_func, 1997 NULL, 1998 this->core->lid ); 1999 if( error ) 2000 { 2001 printk("\n[WARNING] in %s : no memory for new RPC thread in cluster %x\n", 2002 __FUNCTION__ , local_cxy ); 2003 } 2004 else 2005 { 2006 // unblock created RPC thread 2007 thread->blocked = 0; 2008 2009 // update core descriptor counter 2010 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 ); 2011 2012 grpc_dmsg("\n[DBG] %s : core [%x,%d] creates a new RPC thread %x / cycle %d\n", 2013 __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() ); 2014 2015 } 2016 } 2017 } 2018 2019 grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s deschedules / cycle %d\n", 2020 __FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() ); 2021 2022 // interrupted thread deschedule always 2023 sched_yield("IPI received"); 2024 2025 grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s resume / cycle %d\n", 2026 __FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() ); 2027 2028 // interrupted thread restore IRQs after resume 2029 hal_restore_irq( sr_save ); 2030 2031 } // end rpc_check() 2032 2033 2034 ////////////////////// 2035 void rpc_thread_func() 2036 { 2037 uint32_t count; // handled RPC requests counter 2038 error_t empty; // local RPC fifo state 2039 xptr_t desc_xp; // extended pointer on RPC request 2040 cxy_t desc_cxy; // RPC request cluster (client) 2041 rpc_desc_t * desc_ptr; // RPC request local pointer 2042 uint32_t index; // RPC request index 2043 uint32_t responses; // number of responses received by client 2044 thread_t * thread_ptr; // local pointer on client thread 2045 lid_t core_lid; // local index of client core 2046 2047 // makes RPC thread not preemptable 2048 hal_disable_irq( NULL ); 2049 2050 thread_t * this = CURRENT_THREAD; 2051 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo; 2052 2053 // two embedded loops: 2054 // - external loop : "infinite" RPC thread 2055 // - internal loop : handle up to CONFIG_RPC_PENDING_MAX RPC requests 2056 2057 while(1) // external loop 2058 { 2059 // try to take RPC_FIFO ownership 2060 if( hal_atomic_test_set( &rpc_fifo->owner , this->trdid ) ) 2061 { 2062 // initializes RPC requests counter 2063 count = 0; 2064 2065 // acknowledge local IPI 2066 dev_pic_ack_ipi(); 2067 2068 // exit internal loop in three cases: 2069 // - RPC fifo is empty 2070 // - ownership has been lost (because descheduling) 2071 // - max number of RPCs is reached 2072 while( 1 ) // internal loop 2073 { 2074 empty = local_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp ); 2075 2076 if ( empty == 0 ) // one RPC request found 2077 { 2078 // get client cluster and pointer on RPC descriptor 2079 desc_cxy = (cxy_t)GET_CXY( desc_xp ); 2080 desc_ptr = (rpc_desc_t *)GET_PTR( desc_xp ); 2081 2082 // get rpc index from RPC descriptor 2083 index = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->index ) ); 2084 2085 grpc_dmsg("\n[DBG] %s : core[%x,%d] / RPC thread %x / starts rpc %d / cycle %d\n", 2086 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , index , hal_time_stamp() ); 2087 2088 // call the relevant server function 2089 rpc_server[index]( desc_xp ); 2090 2091 grpc_dmsg("\n[DBG] %s : core[%x,%d] / RPC thread %x / completes rpc %d / cycle %d\n", 2092 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , index , hal_time_stamp() ); 2093 2094 // increment handled RPC counter 2095 count++; 2096 2097 // decrement response counter in RPC descriptor 2098 responses = hal_remote_atomic_add(XPTR( desc_cxy, &desc_ptr->response ), -1); 2099 2100 // unblock client thread and send IPI to client core if last response 2101 if( responses == 1 ) 2102 { 2103 // get pointer on client thread and unblock it 2104 thread_ptr = (thread_t *)hal_remote_lpt(XPTR(desc_cxy,&desc_ptr->thread)); 2105 thread_unblock( XPTR(desc_cxy,thread_ptr) , THREAD_BLOCKED_RPC ); 2106 2107 hal_fence(); 2108 2109 // get client core lid and send IPI 2110 core_lid = hal_remote_lw(XPTR(desc_cxy, &desc_ptr->lid)); 2111 dev_pic_send_ipi( desc_cxy , core_lid ); 2112 } 2113 } 2114 2115 // chek exit condition 2116 if( local_fifo_is_empty( rpc_fifo ) || 2117 (rpc_fifo->owner != this->trdid) || 2118 (count >= CONFIG_RPC_PENDING_MAX) ) break; 2119 } // end internal loop 2120 2121 // release rpc_fifo ownership if not lost 2122 if( rpc_fifo->owner == this->trdid ) rpc_fifo->owner = 0; 2123 } 2124 2125 // sucide if too many RPC threads in cluster 2126 if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX ) 2127 { 2128 2129 grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) suicide at cycle %d\n", 2130 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() ); 2131 2132 // update RPC threads counter 2133 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 ); 2134 2135 // suicide 2136 thread_exit(); 2137 } 2138 2139 grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) deschedules / cycle %d\n", 2140 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() ); 2141 2142 // deschedule without blocking 2143 sched_yield("RPC fifo empty or too much work"); 2144 2145 grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) resumes / cycle %d\n", 2146 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() ); 2147 2148 } // end external loop 2149 2150 } // end rpc_thread_func() 2151 2152 2306 -
trunk/kernel/kern/rpc.h
r408 r409 32 32 #include <vseg.h> 33 33 #include <remote_fifo.h> 34 #include <signal.h> 34 35 35 36 /**** Forward declarations ****/ … … 60 61 { 61 62 RPC_PMEM_GET_PAGES = 0, 62 RPC_PROCESS_MAKE_EXEC = 1, 63 RPC_PROCESS_MAKE_FORK = 2, 64 RPC_PROCESS_KILL = 3, 65 RPC_THREAD_USER_CREATE = 4, 66 RPC_THREAD_KERNEL_CREATE = 5, 67 RPC_SIGNAL_RISE = 6, 63 RPC_PMEM_RELEASE_PAGES = 1, 64 RPC_PROCESS_MAKE_EXEC = 2, 65 RPC_PROCESS_MAKE_FORK = 3, 66 RPC_PROCESS_MAKE_EXIT = 4, 67 RPC_PROCESS_MAKE_KILL = 5, 68 RPC_THREAD_USER_CREATE = 6, 69 RPC_THREAD_KERNEL_CREATE = 7, 70 RPC_THREAD_KILL = 8, 71 RPC_PROCESS_SIGACTION = 9, 68 72 69 73 RPC_VFS_INODE_CREATE = 10, … … 86 90 RPC_SCHED_DISPLAY = 27, 87 91 RPC_VMM_SET_COW = 28, 92 88 93 RPC_MAX_INDEX = 30, 89 94 } … … 116 121 117 122 /*********************************************************************************** 118 * This blocking function executes on the client core. 119 * It puts one RPC extended pointer in the remote fifo. 120 * It sends an IPI if fifo is empty, and waits until RPC response available. 121 * The RPC descriptor must be allocated in the caller's stack 122 * and initialised by the caller. Exit with a Panic message if remote fifo 123 * is still full after (CONFIG_RPC_PUT_MAX_ITERATIONS) retries. 123 * This function is executed by the client thread in the client cluster. 124 * It puts one RPC descriptor defined by the <desc> argument in the remote fifo 125 * defined by the <cxy> argument. It sends an IPI to the server if fifo is empty. 126 * The RPC descriptor must be allocated in the caller's stack, and initialised by 127 * the caller. It exit with a Panic message if remote fifo is still full after 128 * (CONFIG_RPC_PUT_MAX_ITERATIONS) retries. 129 * - When the <block> argument is true, this function blocks and deschedule. 130 * It returns only when the server acknowledges the RPC by writing in the RPC 131 * "response" field, and unblocks the client. 132 * - When the <block> argument is false, this function returns as soon as the RPC 133 * has been registered in the FIFO, and the server thread must directly signal 134 * completion to the client thread. 124 135 *********************************************************************************** 125 136 * @ cxy : server cluster identifier 126 137 * @ desc : local pointer on RPC descriptor in client cluster 127 **********************************************************************************/ 128 void rpc_send_sync( cxy_t cxy, 129 rpc_desc_t * desc ); 138 * @ block : boolean true when blocking behaviour is required. 139 **********************************************************************************/ 140 void rpc_send( cxy_t cxy, 141 rpc_desc_t * desc, 142 bool_t block ); 130 143 131 144 … … 186 199 187 200 /*********************************************************************************** 188 * [1] The RPC_PROCESS_MAKE_EXEC creates a new process descriptor, from an existing 201 * [1] The RPC_PMEM_RELEASE_PAGES release one or several pages to a remote cluster. 202 *********************************************************************************** 203 * @ cxy : server cluster identifier 204 * @ page : [in] local pointer on page descriptor to release. 205 **********************************************************************************/ 206 void rpc_pmem_release_pages_client( cxy_t cxy, 207 struct page_s * page ); 208 209 void rpc_pmem_release_pages_server( xptr_t xp ); 210 211 /*********************************************************************************** 212 * [2] The RPC_PROCESS_MAKE_EXEC creates a new process descriptor, from an existing 189 213 * process descriptor in a remote server cluster. This server cluster must be 190 214 * the owner cluster for the existing process. The new process descriptor is … … 204 228 205 229 /*********************************************************************************** 206 * [ 2] The RPC_PROCESS_MAKE_FORK creates a "child" process descriptor, and the230 * [3] The RPC_PROCESS_MAKE_FORK creates a "child" process descriptor, and the 207 231 * associated "child" thread descriptor in a target remote cluster that can be 208 232 * any cluster. The child process is initialized from informations found in the … … 227 251 228 252 /*********************************************************************************** 229 * [3] The RPC_PROCESS_KILL is actually a multicast RPC sent by the reference cluster 230 * to other clusters containing a process descriptor copy, to destroy these copies. 231 *********************************************************************************** 232 * @ process : local pointer on target process. 233 **********************************************************************************/ 234 void rpc_process_kill_client( struct process_s * process ); 235 236 void rpc_process_kill_server( xptr_t xp ); 237 238 /*********************************************************************************** 239 * [4] The RPC_THREAD_USER_CREATE creates an user thread in the server cluster, 253 * [4] The RPC_PROCESS_MAKE_EXIT can be called by any thread to request the owner 254 * cluster to execute the process_make_exit() function for a calling process. 255 *********************************************************************************** 256 * @ cxy : server cluster identifier. 257 * @ process : local pointer on calling process in owner cluster. 258 * @ status : calling process exit status. 259 **********************************************************************************/ 260 void rpc_process_make_exit_client( cxy_t cxy, 261 struct process_s * process, 262 uint32_t status ); 263 264 void rpc_process_make_exit_server( xptr_t xp ); 265 266 /*********************************************************************************** 267 * [5] The RPC_PROCESS_MAKE_KILL can be called by any thread to request the owner 268 * cluster to execute the process_make_kill() function for a target process. 269 *********************************************************************************** 270 * @ cxy : server cluster identifier. 271 * @ process : local pointer on target process in owner cluster. 272 * @ seg_id : signal type (only SIGKILL / SIGSTOP / SIGCONT are supported). 273 **********************************************************************************/ 274 void rpc_process_make_kill_client( cxy_t cxy, 275 struct process_s * process, 276 uint32_t seg_id ); 277 278 void rpc_process_make_kill_server( xptr_t xp ); 279 280 /*********************************************************************************** 281 * [6] The RPC_THREAD_USER_CREATE creates an user thread in the server cluster, 240 282 * as specified by the arguments. It returns an extended pointer on the new 241 283 * thread descriptor in server cluster, and an error code. … … 258 300 259 301 /*********************************************************************************** 260 * [ 5] The RPC_THREAD_KERNEL_CREATE creates a kernel thread in the server cluster,302 * [7] The RPC_THREAD_KERNEL_CREATE creates a kernel thread in the server cluster, 261 303 * as specified by the type, func and args arguments. It returns the local pointer 262 304 * on the thread descriptor in server cluster and an error code. … … 280 322 281 323 /*********************************************************************************** 282 * [6] The RPC_SIGNAL_RISE ask a target cluster to register a given signal in 283 * all threads descriptors of a given process. 284 * It is used by the sys_kill() function. 324 * [8] The RPC_THREAD_KILL ask a target cluster to kill a given thread descriptor. 325 * It is called by the sys_thread_cancel() function for a remote thread. 285 326 *********************************************************************************** 286 327 * @ cxy : server cluster identifier. 287 * @ process : [in] local pointer on target process descriptor in server. 288 * @ sig_id : [in] signal index. 289 **********************************************************************************/ 290 void rpc_signal_rise_client( cxy_t cxy, 291 struct process_s * process, 292 uint32_t sig_id ); 328 * @ thread : [in] local pointer on target process descriptor in server. 329 **********************************************************************************/ 330 void rpc_thread_kill_client( cxy_t cxy, 331 struct thread_s * thread ); 293 332 294 void rpc_signal_rise_server( xptr_t xp ); 333 void rpc_thread_kill_server( xptr_t xp ); 334 335 /*********************************************************************************** 336 * [9] The RPC_PROCESS_SIGACTION allows the owner cluster to request any other 337 * cluster to execute a given sigaction (BLOCK / UNBLOCK / DELETE) for all threads 338 * of a given process. 339 * 340 * WARNING : It is implemented as a NON BLOCKING multicast RPC, that can be sent 341 * in parallel to all process copies. The various server threads must decrement the 342 * responses counter defined by the <rsp_xp> argument, and the last server thread 343 * must unblock the <client_xp> thread. 344 *********************************************************************************** 345 * @ cxy : server cluster identifier. 346 * @ process : [in] local pointer on target process in server cluster. 347 * @ sigaction : [in] action type (BLOCK / UNBLOCK / DELETE). 348 * @ rsp_xp : [in] extended pointer on response counter. 349 * @ client_xp : [in] extended pointer on client thread. 350 **********************************************************************************/ 351 void rpc_process_sigaction_client( cxy_t cxy, 352 struct process_s * process, 353 uint32_t sigaction, 354 xptr_t rsp_xp, 355 xptr_t client_xp ); 356 357 void rpc_process_sigaction_server( xptr_t xp ); 295 358 296 359 /*********************************************************************************** -
trunk/kernel/kern/scheduler.c
r408 r409 58 58 list_root_init( &sched->k_root ); 59 59 60 sched->sig_pending = false; // no pending signal 61 60 62 } // end sched_init() 61 63 … … 72 74 if( type == THREAD_USER ) 73 75 { 74 // register thread in scheduler user list75 76 list_add_last( &sched->u_root , &thread->sched_list ); 76 77 sched->u_threads_nr++; 77 78 // initialize u_last field if first user thread79 78 if( sched->u_last == NULL ) sched->u_last = &thread->sched_list; 80 79 } 81 80 else // kernel thread 82 81 { 83 // register thread in scheduler kernel list84 82 list_add_last( &sched->k_root , &thread->sched_list ); 85 83 sched->k_threads_nr++; 86 87 // initialize k_last field if first kernel thread88 84 if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; 89 85 } … … 92 88 spinlock_unlock( &sched->lock ); 93 89 94 } // end sched_register ()90 } // end sched_register_thread() 95 91 96 92 ///////////////////////////////////////////// 97 93 void sched_remove_thread( thread_t * thread ) 98 94 { 99 core_t * core = thread->core; 100 scheduler_t * sched = &core->scheduler; 101 thread_type_t type = thread->type; 95 scheduler_t * sched = &thread->core->scheduler; 96 thread_type_t type = thread->type; 102 97 103 98 // take lock protecting sheduler lists … … 106 101 if( type == THREAD_USER ) 107 102 { 108 // remove thread from user list109 103 list_unlink( &thread->sched_list ); 110 104 sched->u_threads_nr--; 111 112 // reset the u_last field if list empty113 105 if( sched->u_threads_nr == 0 ) sched->u_last = NULL; 114 106 } 115 else // kernel thread 116 { 117 // remove thread from kernel list 107 else // kernel thread 108 { 118 109 list_unlink( &thread->sched_list ); 119 110 sched->k_threads_nr--; 120 121 // reset the k_last field if list empty122 111 if( sched->k_threads_nr == 0 ) sched->k_last = NULL; 123 112 } 124 113 125 // release lock 114 // release lock 126 115 spinlock_unlock( &sched->lock ); 127 116 128 } // end sched_remove ()117 } // end sched_remove_thread() 129 118 130 119 ////////////////////////////////////////////// … … 214 203 } // end sched_select() 215 204 216 ///////////////////////////////////////////217 void sched_kill_thread( thread_t * thread )218 {219 // check locks220 if( thread_can_yield() == false )221 {222 panic("locks not released for thread %x in process %x on core[%x][%d]",223 thread->trdid , thread->process->pid, local_cxy , thread->core->lid );224 }225 226 // remove thread from scheduler227 sched_remove_thread( thread );228 229 // reset the THREAD_SIG_KILL signal230 thread_reset_signal( thread , THREAD_SIG_KILL );231 232 // detached thread can suicide233 if( thread->signals & THREAD_SIG_SUICIDE )234 {235 assert( (thread->flags & THREAD_FLAG_DETACHED), __FUNCTION__,236 "thread must be detached in case of suicide\n" );237 238 // remove thread from process239 process_remove_thread( thread );240 241 // release memory for thread descriptor242 thread_destroy( thread );243 }244 } // end sched_kill_thread()245 246 205 ////////////////////////////////////////// 247 206 void sched_handle_signals( core_t * core ) … … 249 208 list_entry_t * iter; 250 209 thread_t * thread; 210 251 211 scheduler_t * sched = &core->scheduler; 252 253 // signal_dmsg("\n@@@ %s enter at cycle %d\n",254 // __FUNCTION__ , hal_time_stamp() );255 212 256 213 // take lock protecting threads lists … … 261 218 { 262 219 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 263 if( thread->signals ) // sched_kill_thread( thread ); 264 { 265 printk("\n[WARNING] %s : thread %x has signal %x at cycle %d\n", 266 __FUNCTION__, thread, thread->signals, hal_time_stamp() ); 267 } 268 } 269 270 // handle kernel threads 271 LIST_FOREACH( &sched->k_root , iter ) 272 { 273 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 274 if( thread->signals ) // sched_kill_thread( thread ); 275 { 276 printk("\n[WARNING] %s : thread %x has signal %x at cycle %d\n", 277 __FUNCTION__, thread, thread->signals, hal_time_stamp() ); 278 220 221 if( thread->flags & THREAD_FLAG_SIGNAL ) // thread has signal 222 { 223 // decrement response counter to acknowledge signal 224 hal_atomic_add( thread->sig_rsp_count , -1 ); 225 226 // reset signal 227 thread_reset_signal( thread ); 279 228 } 280 229 } … … 283 232 spinlock_unlock( &sched->lock ); 284 233 285 // signal_dmsg("\n@@@ %s exit at cycle %d\n",286 // __FUNCTION__ , hal_time_stamp() );287 288 234 } // end sched_handle_signals() 289 235 … … 293 239 thread_t * next; 294 240 thread_t * current = CURRENT_THREAD; 295 scheduler_t * sched = ¤t->core->scheduler; 241 core_t * core = current->core; 242 scheduler_t * sched = &core->scheduler; 296 243 297 244 #if( CONFIG_SCHED_DEBUG & 0x1 ) 298 if( hal_time_stamp() > CONFIG_SCHED_DEBUG ) sched_display( c urrent->core->lid );245 if( hal_time_stamp() > CONFIG_SCHED_DEBUG ) sched_display( core->lid ); 299 246 #endif 300 247 … … 319 266 assert( (next->blocked == 0) || (next->type = THREAD_IDLE) , __FUNCTION__ , 320 267 "next thread %x (%s) is blocked on core[%x,%d]\n", 321 next->trdid , thread_type_str(next->type) , local_cxy , c urrent->core->lid );268 next->trdid , thread_type_str(next->type) , local_cxy , core->lid ); 322 269 323 270 // switch contexts and update scheduler state if next != current … … 327 274 sched_dmsg("\n[DBG] %s : core[%x,%d] / cause = %s\n" 328 275 " thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n", 329 __FUNCTION__, local_cxy, c urrent->core->lid, cause,276 __FUNCTION__, local_cxy, core->lid, cause, 330 277 current, thread_type_str(current->type), current->process->pid, current->trdid, 331 278 next , thread_type_str(next->type) , next->process->pid , next->trdid, … … 352 299 sched_dmsg("\n[DBG] %s : core[%x,%d] / cause = %s\n" 353 300 " thread %x (%s) (%x,%x) continue / cycle %d\n", 354 __FUNCTION__, local_cxy, c urrent->core->lid, cause,301 __FUNCTION__, local_cxy, core->lid, cause, 355 302 current, thread_type_str(current->type), current->process->pid, current->trdid, 356 303 (uint32_t)hal_get_cycles() ); 357 304 358 305 } 306 307 // handle signals for all threads executing on this core. 308 sched_handle_signals( core ); 359 309 360 310 // exit critical section / restore SR from next thread context -
trunk/kernel/kern/scheduler.h
r408 r409 49 49 struct thread_s * idle; /*! pointer on idle thread */ 50 50 struct thread_s * current; /*! pointer on current running thread */ 51 bool_t sig_pending; /*! signal_handller must be called when true */ 51 52 } 52 53 scheduler_t; … … 66 67 struct thread_s * thread ); 67 68 68 /********************************************************************************************* 69 * This function removes a thread from the set of threads attached to a given core.69 /********************************************************************************************* 70 * This function remove a thread from its scheduler.re scheduler. 70 71 ********************************************************************************************* 71 72 * @ thread : local pointer on the thread descriptor. … … 87 88 /********************************************************************************************* 88 89 * This function scan all threads attached to a given core scheduler, and executes 89 * the relevant actions for pending signals, such as the THREAD_SIG_KILL signal. 90 * the relevant actions for pending KILL or EXIT signals. 91 * It is called in by the sched_yield() function, with IRQ disabled. 90 92 ********************************************************************************************* 91 93 * @ core : local pointer on the core descriptor. 92 94 ********************************************************************************************/ 93 95 void sched_handle_signals( struct core_s * core ); 94 95 /*********************************************************************************************96 * This function is used by the scheduler of a given core to actually kill a thread that has97 * the SIG_KILL / SIG_SUICIDE signal set (following a thread_exit() or a thread_kill() event).98 * - It checks that the thread has released all locks => panic otherwise...99 * - It removes the thread from the scheduler.100 * - It reset the SIG_KILL signal to acknoledge the killer.101 * - In case of SIG_SUCIDE, it removes the detached thread from its process, and destroys it.102 *********************************************************************************************103 * @ thread : local pointer on the thread descriptor.104 ********************************************************************************************/105 void sched_kill_thread( struct thread_s * thread );106 96 107 97 /********************************************************************************************* … … 123 113 124 114 /********************************************************************************************* 115 * This function unlink a thread identified by the <thread> pointer from its process. 116 * It is called by the sched_handle_signals() function when one EXIT or KILL signal is set, 117 * and it implement the first step of a thread destructionebut can also be directly called by a local killer thread signal. 118 * - It detach the thread from the scheduler. 119 * - It detach the thread from the process. 120 * - It detach the thread from the parent thread when the thread is attached. 121 * - It destroys the thread descriptor. 122 * - It acknowledge the killer thread if it's a kill signal 123 ********************************************************************************************* 124 * @ thread : pointer on thread to be killed. 125 ********************************************************************************************/ 126 void sched_kill_thread( struct thread_s * thread ); 127 128 /********************************************************************************************* 125 129 * This function display the internal state of the local core identified by its <lid>. 126 130 ********************************************************************************************* -
trunk/kernel/kern/signal.c
r407 r409 2 2 * signal.c - signal-management related operations implementation 3 3 * 4 * Author Alain Greiner (2016,2017) 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017) 5 7 * 6 8 * Copyright (c) UPMC Sorbonne Universites … … 23 25 24 26 #include <hal_types.h> 25 #include <hal_atomic.h>26 27 #include <printk.h> 27 #include <thread.h>28 #include <spinlock.h>29 28 #include <signal.h> 30 31 //////////////////////////////////////32 void signal_rise( process_t * process,33 uint32_t sig_id )34 {35 // get the lock protecting the set of local threads36 spinlock_lock( &process->th_lock );37 38 // loop on local threads39 thread_t * thread;40 uint32_t i;41 for( i = 0 ; i < process->th_nr ; i++ )42 {43 thread = process->th_tbl[i];44 hal_atomic_or( &thread->signals , (1 << sig_id) );45 46 signal_dmsg("\n[DBG] %s : thread %x in process %x received signal %d\n",47 __FUNCTION__, thread->trdid , process->pid , sig_id );48 }49 50 // release the lock51 spinlock_unlock( &process->th_lock );52 53 } // end signal_rise()54 29 55 30 /* -
trunk/kernel/kern/signal.h
r23 r409 70 70 71 71 #define SIG_DEFAULT_MASK 0xFFEEFFFF 72 #define SIG_DEFAULT_STACK_SIZE 2048 72 73 73 74 74 /**** Forward declarations ****/ -
trunk/kernel/kern/thread.c
r408 r409 112 112 ///////////////////////////////////////////////////////////////////////////////////// 113 113 // This static function initializes a thread descriptor (kernel or user). 114 // It can be called by the fourfunctions:114 // It can be called by the three functions: 115 115 // - thread_user_create() 116 116 // - thread_user_fork() … … 164 164 165 165 thread->local_locks = 0; 166 list_root_init( &thread->locks_root );167 168 166 thread->remote_locks = 0; 167 168 #if CONFIG_LOCKS_DEBUG 169 list_root_init( &thread->locks_root ); 169 170 xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) ); 171 #endif 170 172 171 173 thread->u_stack_base = u_stack_base; … … 177 179 thread->entry_args = args; // thread function arguments 178 180 thread->flags = 0; // all flags reset 179 thread->signals = 0; // no pending signal180 181 thread->errno = 0; // no error detected 181 182 thread->fork_user = 0; // no user defined placement for fork 182 183 thread->fork_cxy = 0; // user defined target cluster for fork 183 184 // thread blocked 185 thread->blocked = THREAD_BLOCKED_GLOBAL; 184 thread->blocked = THREAD_BLOCKED_GLOBAL; 186 185 187 186 // reset children list … … 195 194 // reset thread info 196 195 memset( &thread->info , 0 , sizeof(thread_info_t) ); 196 197 // initializes join_lock 198 remote_spinlock_init( XPTR( local_cxy , &thread->join_lock ) ); 197 199 198 200 // initialise signature … … 296 298 return EINVAL; 297 299 } 298 299 // set LOADABLE flag300 thread->flags = THREAD_FLAG_LOADABLE;301 300 302 301 // set DETACHED flag if required … … 593 592 uint32_t tm_start; 594 593 uint32_t tm_end; 595 reg_t s tate;594 reg_t save_sr; 596 595 597 596 process_t * process = thread->process; … … 614 613 // release memory allocated for CPU context and FPU context 615 614 hal_cpu_context_destroy( thread ); 616 hal_fpu_context_destroy( thread );615 if ( thread->type == THREAD_USER ) hal_fpu_context_destroy( thread ); 617 616 618 617 // release FPU if required 619 618 // TODO This should be done before calling thread_destroy() 620 hal_disable_irq( &s tate);619 hal_disable_irq( &save_sr ); 621 620 if( core->fpu_owner == thread ) 622 621 { … … 624 623 hal_fpu_disable(); 625 624 } 626 hal_restore_irq( s tate);625 hal_restore_irq( save_sr ); 627 626 628 627 // remove thread from process th_tbl[] … … 668 667 xlist_add_first( root , entry ); 669 668 hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , 1 ); 670 } 669 670 } // end thread_child_parent_link() 671 671 672 672 /////////////////////////////////////////////////// … … 693 693 // release the lock 694 694 remote_spinlock_unlock( lock ); 695 } 695 696 } // thread_child_parent_unlink() 696 697 697 698 ///////////////////////////////////////////////// 698 699 inline void thread_set_signal( thread_t * thread, 699 uint32_t mask ) 700 { 701 hal_atomic_or( &thread->signals , mask ); 700 uint32_t * sig_rsp_count ) 701 { 702 reg_t save_sr; // for critical section 703 704 // get pointer on thread thread scheduler 705 scheduler_t * thread_sched = &thread->core->scheduler; 706 707 // wait scheduler ready to handle a new signal 708 while( thread_sched->sig_pending ) asm volatile( "nop" ); 709 710 // enter critical section 711 hal_disable_irq( &save_sr ); 712 713 // set signal in thread scheduler 714 thread_sched->sig_pending = true; 715 716 // set signal in thread thread "flags" 717 hal_atomic_or( &thread->flags , THREAD_FLAG_SIGNAL ); 718 719 // set pointer on responses counter in thread thread 720 thread->sig_rsp_count = sig_rsp_count; 721 722 // exit critical section 723 hal_restore_irq( save_sr ); 724 702 725 hal_fence(); 703 } 704 705 /////////////////////////////////////////////////// 706 inline void thread_reset_signal( thread_t * thread, 707 uint32_t mask ) 708 { 709 hal_atomic_and( &thread->signals , ~mask ); 726 727 } // thread_set_signal() 728 729 //////////////////////////////////////////////////// 730 inline void thread_reset_signal( thread_t * thread ) 731 { 732 reg_t save_sr; // for critical section 733 734 // get pointer on target thread scheduler 735 scheduler_t * sched = &thread->core->scheduler; 736 737 // check signal pending in scheduler 738 assert( sched->sig_pending , __FUNCTION__ , "no pending signal" ); 739 740 // enter critical section 741 hal_disable_irq( &save_sr ); 742 743 // reset signal in scheduler 744 sched->sig_pending = false; 745 746 // reset signal in thread "flags" 747 hal_atomic_and( &thread->flags , ~THREAD_FLAG_SIGNAL ); 748 749 // reset pointer on responses counter 750 thread->sig_rsp_count = NULL; 751 752 // exit critical section 753 hal_restore_irq( save_sr ); 754 710 755 hal_fence(); 711 } 756 757 } // thread_reset_signal() 712 758 713 759 //////////////////////////////// … … 760 806 } // end thread_unblock() 761 807 762 /////////////////////763 error_t thread_exit()764 {765 reg_t sr_save;766 767 thread_t * this = CURRENT_THREAD;768 769 // test if this thread can be descheduled770 if( !thread_can_yield() )771 {772 printk("ERROR in %s : locks not released for thread %x in process %x on core[%x,%d]\n",773 __FUNCTION__, this->trdid, this->process->pid, local_cxy, this->core->lid );774 return EINVAL;775 }776 777 if( this->flags & THREAD_FLAG_DETACHED )778 {779 // if detached set signal and set blocking cause atomically780 hal_disable_irq( &sr_save );781 thread_set_signal( this , THREAD_SIG_KILL );782 thread_block( this , THREAD_BLOCKED_EXIT );783 hal_restore_irq( sr_save );784 }785 else786 {787 // if attached, set blocking cause788 thread_block( this , THREAD_BLOCKED_EXIT );789 }790 791 // deschedule792 sched_yield( "exit" );793 return 0;794 795 } // end thread_exit()796 797 808 ///////////////////////////////////// 798 809 void thread_kill( thread_t * target ) 799 810 { 800 // set SIG_KILL signal in target thread descriptor 801 thread_set_signal( target , THREAD_SIG_KILL ); 811 volatile uint32_t sig_rsp_count = 1; // responses counter 812 813 thread_t * killer = CURRENT_THREAD; 814 815 kill_dmsg("\n[DBG] %s : killer thread %x enter for target thread %x\n", 816 __FUNCTION__, local_cxy, killer->trdid , target trdid ); 802 817 803 818 // set the global blocked bit in target thread descriptor. 804 819 thread_block( target , THREAD_BLOCKED_GLOBAL ); 805 820 806 // send an IPI to schedule the target thread core. 807 dev_pic_send_ipi( local_cxy , target->core->lid ); 821 // request target scheduler to deschedule the target thread 822 // when killer thread is not running on same core as target thread 823 if( killer->core->lid != target->core->lid ) 824 { 825 // set signal in target thread descriptor and in target scheduler 826 thread_set_signal( target , (uint32_t *)(&sig_rsp_count) ); 827 828 // send an IPI to the target thread core. 829 dev_pic_send_ipi( local_cxy , target->core->lid ); 830 831 // poll the response 832 while( 1 ) 833 { 834 // exit when response received from scheduler 835 if( sig_rsp_count == 0 ) break; 836 837 // deschedule without blocking 838 hal_fixed_delay( 1000 ); 839 } 840 } 841 842 // release FPU if required 843 if( target->core->fpu_owner == target ) target->core->fpu_owner = NULL; 844 845 // detach thread from parent if attached 846 if( (target->flags & THREAD_FLAG_DETACHED) == 0 ) 847 thread_child_parent_unlink( target->parent , XPTR( local_cxy , target ) ); 848 849 // detach thread from process 850 process_remove_thread( target ); 851 852 // remove thread from scheduler 853 sched_remove_thread( target ); 854 855 // release memory allocated to target thread 856 thread_destroy( target ); 857 858 kill_dmsg("\n[DBG] %s : killer thread %x enter for target thread %x\n", 859 __FUNCTION__, local_cxy, killer->trdid , target trdid ); 808 860 809 861 } // end thread_kill() -
trunk/kernel/kern/thread.h
r408 r409 44 44 45 45 /*************************************************************************************** 46 * These macros are used to compose or decompose global thread identifier (TRDID)46 * These macros are used to compose or decompose the global thread identifier (TRDID) 47 47 * to or from cluster identifier / local thread index (CXY , LTID) 48 48 **************************************************************************************/ … … 69 69 **************************************************************************************/ 70 70 71 #define THREAD_FLAG_LOADABLE 0x0001 /*! This thread has not been executed yet */ 72 #define THREAD_FLAG_DETACHED 0x0002 /*! This thread is detached from parent */ 73 #define THREAD_FLAG_JOIN 0x0004 /*! Parent thread made a join */ 74 #define THREAD_FLAG_EXIT 0x0008 /*! This thread made an exit */ 75 #define THREAD_FLAG_SCHED 0x0010 /*! Scheduling required for this thread */ 76 77 /*************************************************************************************** 78 * This defines the masks associated to the thread signals. 79 **************************************************************************************/ 80 81 #define THREAD_SIG_KILL 0x0001 /*! This thread killed by another thread */ 82 #define THREAD_SIG_SUICIDE 0x0002 /*! This thread required exit */ 71 #define THREAD_FLAG_DETACHED 0x0001 /*! This thread is detached from parent */ 72 #define THREAD_FLAG_JOIN_DONE 0x0002 /*! Parent thread made a join */ 73 #define THREAD_FLAG_SCHED 0x0004 /*! Scheduling required for this thread */ 74 #define THREAD_FLAG_SIGNAL 0x0004 /*! Acknowledge of descheduling required */ 83 75 84 76 /*************************************************************************************** … … 89 81 #define THREAD_BLOCKED_IO 0x0002 /*! thread wait IO operation completion */ 90 82 #define THREAD_BLOCKED_MAPPER 0x0004 /*! thread wait mapper */ 91 #define THREAD_BLOCKED_JOIN 0x0008 /*! thread blocked in join / wait exit */ 92 #define THREAD_BLOCKED_EXIT 0x0010 /*! thread blocked in exit / wait join */ 93 #define THREAD_BLOCKED_KILL 0x0020 /*! thread received kill signal */ 94 #define THREAD_BLOCKED_SEM 0x0040 /*! thread wait semaphore */ 95 #define THREAD_BLOCKED_PAGE 0x0080 /*! thread wait page access */ 96 #define THREAD_BLOCKED_USERSYNC 0x0100 /*! thread wait POSIX (cond/mutex/barrier) */ 83 #define THREAD_BLOCKED_EXIT 0x0008 /*! thread blocked in join / wait exit */ 84 #define THREAD_BLOCKED_JOIN 0x0010 /*! thread blocked in exit / wait join */ 85 #define THREAD_BLOCKED_SEM 0x0020 /*! thread wait semaphore */ 86 #define THREAD_BLOCKED_PAGE 0x0040 /*! thread wait page access */ 87 #define THREAD_BLOCKED_USERSYNC 0x0100 /*! thread wait (cond/mutex/barrier) */ 97 88 #define THREAD_BLOCKED_RPC 0x0200 /*! thread wait RPC completion */ 98 99 #define THREAD_BLOCKED_DEV_ISR 0x4000 /*! thread DEV wait ISR */ 89 #define THREAD_BLOCKED_DEV_ISR 0x0400 /*! thread DEV wait ISR */ 100 90 101 91 /*************************************************************************************** … … 156 146 xptr_t parent; /*! extended pointer on parent thread */ 157 147 158 void * exit_value; /*! exit_value used in case of join */159 160 148 uint32_t local_locks; /*! number of local locks owned by thread */ 161 list_entry_t locks_root; /*! root of local locks list */ 162 163 remote_spinlock_t * flags_lock; /*! lock protecting the flags */ 164 165 uint32_t remote_locks; /*! number of local locks owned by thread */ 166 xlist_entry_t xlocks_root; /*! root of remote locks list */ 149 uint32_t remote_locks; /*! number of remote locks owned by thread */ 150 151 remote_spinlock_t * join_lock; /*! lock protecting the join/exit */ 152 void * join_value; /*! exit_value used in case of join */ 153 xptr_t join_xp; /*! extended pointer on joining thread */ 154 155 uint32_t * sig_rsp_count; /*! pointer on signal response counter */ 167 156 168 157 intptr_t u_stack_base; /*! user stack base address */ … … 173 162 174 163 uint32_t flags; /*! bit vector of flags */ 175 uint32_t signals; /*! bit vector of (KILL / SUICIDE) signals */176 164 uint32_t blocked; /*! bit vector of blocking causes */ 177 165 … … 203 191 204 192 xlist_entry_t wait_list; /*! member of threads blocked on same cond */ 193 194 #if CONFIG_LOCKS_DEBUG 195 list_entry_t locks_root; /*! root of list of locks taken */ 196 xlist_entry_t xlocks_root; /*! root of xlist of remote locks taken */ 197 #endif 205 198 206 199 thread_info_t info; /*! embedded thread_info_t */ … … 311 304 312 305 /*************************************************************************************** 313 * This function releases the physical memory allocated for a thread descriptor 314 * in the local cluster. It can be used for both an user and a kernel thread. 315 * The physical memory dynamically allocated in the HEAP or MMAP zones by an user 316 * thread will be released when the process is killed, and the page table flushed. 306 * This function releases the physical memory allocated for a thread in a given cluster. 307 * This include the thread descriptor itself, the associated CPU and FPU context, and 308 * the physical memory allocated for an user thread local stack. 317 309 *************************************************************************************** 318 310 * @ thread : pointer on the thread descriptor to release. … … 353 345 354 346 /*************************************************************************************** 355 * This function atomically sets a signal in a thread descriptor. 347 * This function is used by a killer thread running in the same cluster as a target 348 * thread to request the scheduler of the target to call the thread_handle_signal() 349 * at the next context switch, to confirm that the target thread is blocked and 350 * not currently running. This function executes atomically the following actions : 351 * - it set the sig_pending flag in the target scheduler descriptor. 352 * - it set the SIG flag in the "flags" field of the target thread descriptor. 353 * - It registers the responses counter pointer in the target thread descriptor. 354 * The sig_pending flag is handled as a set/reset flip-flop by the killer thread 355 * and by the target scheduler. 356 *************************************************************************************** 357 * @ target : local pointer on target thread. 358 * @ sig_rsp_count : local pointer on responses counter. 359 **************************************************************************************/ 360 void thread_set_signal( thread_t * thread, 361 uint32_t * sig_rsp_count ); 362 363 /*************************************************************************************** 364 * This function is used by the sched_handle_signal() function executed by a scheduler 365 * to reset a pending signal in both a target <thread> descriptor, and in the target 366 * thread scheduler. 356 367 *************************************************************************************** 357 368 * @ thread : local pointer on target thread. 358 *s released all locks @ mask : mask on selected signal. 359 **************************************************************************************/ 360 inline void thread_set_signal( thread_t * thread, 361 uint32_t mask ); 362 363 /*************************************************************************************** 364 * This function resets a signal in a thread descriptor. 365 *************************************************************************************** 366 * @ thread : local pointer on target thread. 367 * @ mask : mask on selected signal. 368 **************************************************************************************/ 369 inline void thread_reset_signal( thread_t * thread, 370 uint32_t mask ); 369 **************************************************************************************/ 370 void thread_reset_signal( thread_t * thread ); 371 371 372 372 /*************************************************************************************** … … 385 385 386 386 /*************************************************************************************** 387 * This function is used by the calling thread to suicide. 388 * All locks must be previously released. The scenario depends on the DETACHED flag. 389 * if detached : 390 * 1) the calling thread sets the SIG_SUICIDE bit in the "signals" bit_vector, 391 * registers the BLOCKED_GLOBAL bit in the "blocked" bit_vector, and deschedule. 392 * 2) the scheduler, detecting the SIG_SUICIDE bit, remove the thread from the 393 * scheduler list, remove the thread from its process, and destroys the thread. 394 * if attached : 395 * 1) the calling thread simply sets the BLOCKED_EXIT bit in the "blocked" bit vector 396 * and deschedule. 397 * 2) The SIG_KILL bit and BLOCKED_SIGNAL bits are set by the parent thread when 398 * executing the pthread_join(), and detecting the BLOCKED_EXIT bit. 399 * The scenario is a standard kill as described below. 400 *************************************************************************************** 401 * @ returns 0 if success / returns EINVAL if locks_count is not zero. 402 **************************************************************************************/ 403 error_t thread_exit(); 404 405 /*************************************************************************************** 406 * This function request to kill a local target thread, with the following scenario: 407 * 1. This function set the BLOCKED_GLOBAL bit in target thread "blocked" bit_vector, 408 * set the SIG_KILL bit in target thread "signals" bit_vector, and send an IPI 409 * to the target thread core to force scheduling. 410 * 2. The scheduler, detecting the SIG_KILL set, removes the thread from the scheduler 411 * list, and reset the SIG_KILL bit to acknowledge the killer. 412 * 3. The caller of this function, (such as the process_kill() function), must poll 413 * SIG_KILL bit until reset, detach the thread from its parent if the thread is 414 * attached, remove the thread from its process, and destroys the thread. 415 * 416 * NOTE: The third step must be done by the caller to allows the process_kill() 417 * function to parallelize the work on all schedulers in a given cluster. 387 * This function is called to handle the "pthread_cancel" system call. 388 * It allows a killer thread to kill one single target thread. 389 * The killer thread must be running in the same cluster as the target thread. 390 * If not, the client thread must use the RPC_THREAD_KILL. 391 * - When the killer thread is running on the same core as the target thread, 392 * This function simply detach the target thread from the scheduler, 393 * detach it from the parent thread if it is attached, detach it from the 394 * local process descriptor, and rrleases all memory allocated to the thread. 395 * - When the killer thread is running on a different core than the target thread 396 * The killer send a signal to the target thread scheduler requesting this 397 * scheduler to confirm that the target thread is blocked and not running. 398 * Then, it executes the same actions as described above. 418 399 *************************************************************************************** 419 400 * @ thread : local pointer on the target thread.
Note: See TracChangeset
for help on using the changeset viewer.