Changeset 436 for trunk/kernel/kern
- Timestamp:
- Mar 7, 2018, 9:02:03 AM (7 years ago)
- Location:
- trunk/kernel/kern
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/chdev.c
r435 r436 179 179 180 180 // block current thread 181 thread_block( CURRENT_THREAD, THREAD_BLOCKED_IO );181 thread_block( XPTR( local_cxy , CURRENT_THREAD ) , THREAD_BLOCKED_IO ); 182 182 183 183 // register client thread in waiting queue -
trunk/kernel/kern/cluster.c
r433 r436 237 237 { 238 238 xptr_t root_xp; // xptr on root of list of processes in owner cluster 239 xptr_t lock_xp; // xptr ron lock protecting this list239 xptr_t lock_xp; // xptr on lock protecting this list 240 240 xptr_t iter_xp; // iterator 241 241 xptr_t current_xp; // xptr on current process descriptor … … 277 277 if( found ) return current_xp; 278 278 else return XPTR_NULL; 279 } 279 280 } // end cluster_get_owner_process_from_pid() 280 281 281 282 ////////////////////////////////////////////////////////// … … 440 441 void cluster_process_copies_link( process_t * process ) 441 442 { 442 uint32_tirq_state;443 reg_t irq_state; 443 444 pmgr_t * pm = &LOCAL_CLUSTER->pmgr; 445 446 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES 447 uint32_t cycle = (uint32_t)hal_get_cycles(); 448 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle ) 449 printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n", 450 __FUNCTION__ , local_cxy , process , cycle ); 451 #endif 444 452 445 453 // get owner cluster identifier CXY and process LPID … … 460 468 remote_spinlock_lock_busy( copies_lock , &irq_state ); 461 469 470 // add copy to copies_list 462 471 xlist_add_first( copies_root , copies_entry ); 463 472 hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , 1 ); … … 465 474 // release lock protecting copies_list[lpid] 466 475 remote_spinlock_unlock_busy( copies_lock , irq_state ); 467 } 476 477 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES 478 cycle = (uint32_t)hal_get_cycles(); 479 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle ) 480 printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n", 481 __FUNCTION__ , local_cxy , process , cycle ); 482 #endif 483 484 } // end cluster_process_copies_link() 468 485 469 486 ///////////////////////////////////////////////////////// … … 472 489 uint32_t irq_state; 473 490 pmgr_t * pm = &LOCAL_CLUSTER->pmgr; 491 492 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES 493 uint32_t cycle = (uint32_t)hal_get_cycles(); 494 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle ) 495 printk("\n[DBG] %s enters / cluster %x / process %x / cycle %d\n", 496 __FUNCTION__ , local_cxy , process , cycle ); 497 #endif 474 498 475 499 // get owner cluster identifier CXY and process LPID … … 479 503 480 504 // get extended pointer on lock protecting copies_list[lpid] 481 xptr_t copies_lock = hal_remote_lwd( XPTR( owner_cxy , &pm->copies_lock[lpid] ));505 xptr_t copies_lock = XPTR( owner_cxy , &pm->copies_lock[lpid] ); 482 506 483 507 // get extended pointer on the local copies_list entry … … 487 511 remote_spinlock_lock_busy( copies_lock , &irq_state ); 488 512 513 // remove copy from copies_list 489 514 xlist_unlink( copies_entry ); 490 515 hal_remote_atomic_add( XPTR( owner_cxy , &pm->copies_nr[lpid] ) , -1 ); … … 492 517 // release lock protecting copies_list[lpid] 493 518 remote_spinlock_unlock_busy( copies_lock , irq_state ); 494 } 519 520 #if CONFIG_DEBUG_CLUSTER_PROCESS_COPIES 521 cycle = (uint32_t)hal_get_cycles(); 522 if( CONFIG_DEBUG_CLUSTER_PROCESS_COPIES < cycle ) 523 printk("\n[DBG] %s exit / cluster %x / process %x / cycle %d\n", 524 __FUNCTION__ , local_cxy , process , cycle ); 525 #endif 526 527 } // end cluster_process_copies_unlink() 495 528 496 529 /////////////////////////////////////////// -
trunk/kernel/kern/kernel_init.c
r435 r436 125 125 // these debug variables are used to analyse the sys_read() syscall timing 126 126 127 #if CONFIG_ READ_DEBUG127 #if CONFIG_DEBUG_SYS_READ 128 128 uint32_t enter_sys_read; 129 129 uint32_t exit_sys_read; … … 150 150 // these debug variables are used to analyse the sys_write() syscall timing 151 151 152 #if CONFIG_ WRITE_DEBUG152 #if CONFIG_DEBUG_SYS_WRITE 153 153 uint32_t enter_sys_write; 154 154 uint32_t exit_sys_write; -
trunk/kernel/kern/process.c
r435 r436 365 365 cxy_t parent_cxy; 366 366 xptr_t children_lock_xp; 367 xptr_t copies_lock_xp;368 367 369 368 assert( (process->th_nr == 0) , __FUNCTION__ , … … 377 376 #endif 378 377 379 // get local process manager pointer 380 pmgr_t * pmgr = &LOCAL_CLUSTER->pmgr; 381 382 // remove process from local_list in cluster manager 383 remote_spinlock_lock( XPTR( local_cxy , &pmgr->local_lock ) ); 384 xlist_unlink( XPTR( local_cxy , &process->local_list ) ); 385 remote_spinlock_unlock( XPTR( local_cxy , &pmgr->local_lock ) ); 386 387 // get extended pointer on copies_lock in owner cluster manager 388 cxy_t owner_cxy = CXY_FROM_PID( process->pid ); 389 lpid_t lpid = LPID_FROM_PID( process->pid ); 390 copies_lock_xp = XPTR( owner_cxy , &pmgr->copies_lock[lpid] ); 391 392 // remove local process from copies_list 393 remote_spinlock_lock( copies_lock_xp ); 394 xlist_unlink( XPTR( local_cxy , &process->copies_list ) ); 395 remote_spinlock_unlock( copies_lock_xp ); 396 397 // for reference process only 398 if( XPTR( local_cxy , process ) == process->ref_xp ) 399 { 400 // remove reference process from txt_list 401 process_txt_detach( process ); 402 378 // remove process from local_list in local cluster manager 379 cluster_process_local_unlink( process ); 380 381 // remove process from copies_list in owner cluster manager 382 cluster_process_copies_unlink( process ); 383 384 // remove process from children_list if process is in owner cluster 385 if( CXY_FROM_PID( process->pid ) == local_cxy ) 386 { 403 387 // get pointers on parent process 404 388 parent_xp = process->parent_xp; … … 461 445 xptr_t process_xp; // extended pointer on process copy 462 446 cxy_t process_cxy; // process copy cluster identifier 463 process_t * process_ptr; // local pointer on process copy 464 uint32_t responses; // number of remote process copies 465 uint32_t rsp_count; // used to assert number of copies 466 rpc_desc_t rpc; // rpc descriptor allocated in stack 447 reg_t save_sr; // for critical section 448 rpc_desc_t rpc; // shared RPC descriptor 467 449 468 450 thread_t * client = CURRENT_THREAD; … … 475 457 #endif 476 458 477 // get localpointer on local cluster manager459 // get pointer on local cluster manager 478 460 cluster = LOCAL_CLUSTER; 479 461 … … 483 465 484 466 // get root of list of copies, lock, and number of copies from owner cluster 485 responses = hal_remote_lw ( XPTR( owner_cxy , &cluster->pmgr.copies_nr[lpid] ) ); 486 root_xp = hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] ) ); 487 lock_xp = hal_remote_lwd( XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] ) ); 488 489 rsp_count = 0; 467 root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] ); 468 lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] ); 490 469 491 470 // check action type … … 494 473 (action_type == UNBLOCK_ALL_THREADS )), __FUNCTION__ , "illegal action type" ); 495 474 496 // initialise rpc descriptor 497 rpc.index = RPC_PROCESS_SIGACTION; 498 rpc.response = responses; 499 rpc.blocking = false; 500 rpc.thread = client; 475 // allocate a - shared - RPC descriptor in client thread stack 476 // it can be shared because all parallel, non-blocking, server threads 477 // use the same input arguments, and use the shared RPC response field 478 // but use 479 480 // the client thread makes the following sequence: 481 // 1. mask interrupts 482 // 2. block itself 483 // 3. send RPC requests to all copies 484 // 4. unmask interrupts 485 // 5. deschedule 486 487 // mask IRQs 488 hal_disable_irq( &save_sr); 489 490 // client register blocking condition for itself 491 thread_block( XPTR( local_cxy , client ) , THREAD_BLOCKED_RPC ); 501 492 502 493 // take the lock protecting the copies 503 494 remote_spinlock_lock( lock_xp ); 504 495 505 // send RPCs to remote clusters 496 // initialize shared RPC descriptor 497 rpc.response = 0; 498 rpc.blocking = false; 499 rpc.index = RPC_PROCESS_SIGACTION; 500 rpc.thread = client; 501 rpc.lid = client->core->lid; 502 rpc.args[0] = action_type; 503 rpc.args[1] = pid; 504 505 // send RPCs to all clusters containing process copiess 506 506 XLIST_FOREACH( root_xp , iter_xp ) 507 507 { 508 509 #if CONFIG_DEBUG_PROCESS_SIGACTION 510 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 511 printk("\n[DBG] %s : send RPC to %s process %x in cluster %x\n", 512 __FUNCTION__ , process_action_str( action_type ) , pid , process_cxy ); 513 #endif 514 // atomically increment responses counter 515 hal_atomic_add( (void *)&rpc.response , 1 ); 516 508 517 process_xp = XLIST_ELEMENT( iter_xp , process_t , copies_list ); 509 518 process_cxy = GET_CXY( process_xp ); 510 process_ptr = GET_PTR( process_xp ); 511 512 #if CONFIG_DEBUG_PROCESS_SIGACTION 513 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 514 printk("\n[DBG] %s : send RPC to cluster %x\n", __FUNCTION__ , process_cxy ); 515 #endif 516 517 // check PID 518 assert( (hal_remote_lw( XPTR( process_cxy , &process_ptr->pid) ) == pid), 519 __FUNCTION__ , "unconsistent PID value\n" ); 520 521 rpc.args[0] = (uint64_t)action_type; 522 rpc.args[1] = (uint64_t)pid; 519 520 // call RPC in target cluster 523 521 rpc_process_sigaction_client( process_cxy , &rpc ); 524 rsp_count++;525 522 } 526 523 … … 528 525 remote_spinlock_unlock( lock_xp ); 529 526 530 // check number of copies... 531 assert( (rsp_count == responses) , __FUNCTION__ , 532 "unconsistent number of process copies : rsp_count = %d / responses = %d", 533 rsp_count , responses ); 534 535 // block and deschedule to wait RPC responses 536 thread_block( CURRENT_THREAD , THREAD_BLOCKED_RPC ); 537 sched_yield("BLOCKED on RPC_PROCESS_SIGACTION"); 527 // restore IRQs 528 hal_restore_irq( save_sr); 529 530 // client deschedule : will be unblocked by the last RPC server thread 531 sched_yield("blocked on rpc_process_sigaction"); 538 532 539 533 #if CONFIG_DEBUG_PROCESS_SIGACTION … … 541 535 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 542 536 printk("\n[DBG] %s : thread %x exit after %s process %x in cluster %x / cycle %d\n", 543 __FUNCTION__ , client, process_action_str( action_type ) , 544 process->pid , local_cxy , cycle ); 537 __FUNCTION__ , client, process_action_str( action_type ) , pid , local_cxy , cycle ); 545 538 #endif 546 539 … … 553 546 thread_t * this; // pointer on calling thread 554 547 uint32_t ltid; // index in process th_tbl 548 cxy_t owner_cxy; // target process owner cluster 555 549 uint32_t count; // requests counter 556 volatile uint32_t rsp_count; // responsescounter550 volatile uint32_t ack_count; // scheduler acknowledge counter 557 551 558 552 // get calling thread pointer 559 553 this = CURRENT_THREAD; 554 555 // get target process owner cluster 556 owner_cxy = CXY_FROM_PID( process->pid ); 560 557 561 558 #if CONFIG_DEBUG_PROCESS_SIGACTION … … 569 566 spinlock_lock( &process->th_lock ); 570 567 571 // initialize local responses counter 572 rsp_count = process->th_nr; 573 574 // loop on process threads to block and deschedule all threads in cluster 568 // loop to block all threads but the main thread 575 569 // we use both "ltid" and "count" because it can exist "holes" in th_tbl 576 for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )570 for( ltid = 0 , count = 0 , ack_count = 0 ; count < process->th_nr ; ltid++ ) 577 571 { 578 572 target = process->th_tbl[ltid]; 579 573 580 assert( (target != this) , __FUNCTION__ , "calling thread cannot be a target\n" ); 581 582 if( target != NULL ) // thread found 574 if( target != NULL ) // thread exist 583 575 { 584 576 count++; 585 577 586 // - if the calling thread and the target thread are on the same core, 587 // we block the target thread, we don't need confirmation from scheduler, 588 // and we simply decrement the responses counter. 589 // - if the calling thread and the target thread are not running on the same 590 // core, we ask the target scheduler to acknowlege the blocking 591 // to be sure that the target thread is not running. 592 593 if( this->core->lid == target->core->lid ) 578 // main thread should not be deleted 579 if( (ltid != 0) || (owner_cxy != local_cxy) ) 594 580 { 595 581 // set the global blocked bit in target thread descriptor. 596 thread_block( target , THREAD_BLOCKED_GLOBAL ); 597 598 // decrement responses counter 599 hal_atomic_add( (void *)&rsp_count , -1 ); 600 } 601 else 602 { 603 // set the global blocked bit in target thread descriptor. 604 thread_block( target , THREAD_BLOCKED_GLOBAL ); 605 606 // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor 607 thread_set_req_ack( target , (void *)&rsp_count ); 608 609 // force scheduling on target thread 610 dev_pic_send_ipi( local_cxy , target->core->lid ); 582 thread_block( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL ); 583 584 // - if the calling thread and the target thread are on the same core, 585 // we don't need confirmation from scheduler, 586 // - if the calling thread and the target thread are not running on the same 587 // core, we ask the target scheduler to acknowlege the blocking 588 // to be sure that the target thread is not running. 589 590 if( this->core->lid != target->core->lid ) 591 { 592 // increment responses counter 593 hal_atomic_add( (void*)&ack_count , 1 ); 594 595 // set FLAG_REQ_ACK and &ack_rsp_count in target descriptor 596 thread_set_req_ack( target , (uint32_t *)&ack_count ); 597 598 // force scheduling on target thread 599 dev_pic_send_ipi( local_cxy , target->core->lid ); 600 } 611 601 } 612 602 } … … 616 606 spinlock_unlock( &process->th_lock ); 617 607 618 // wait a ll responses from schedulers608 // wait acknowledges 619 609 while( 1 ) 620 610 { 621 // exit loop when all local responses received622 if ( rsp_count == 0 ) break;611 // exit when all scheduler acknoledges received 612 if ( ack_count == 0 ) break; 623 613 624 614 // wait 1000 cycles before retry … … 656 646 spinlock_lock( &process->th_lock ); 657 647 658 // loop on process threads to unblock all threads in cluster648 // loop on process threads to unblock all threads 659 649 // we use both "ltid" and "count" because it can exist "holes" in th_tbl 660 650 for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ ) 661 651 { 662 652 target = process->th_tbl[ltid]; 663 664 assert( (target != this) , __FUNCTION__ , "calling thread cannot be a target\n" );665 653 666 654 if( target != NULL ) // thread found … … 689 677 { 690 678 thread_t * target; // pointer on target thread 691 thread_t * this; // pointer on calling thread692 679 uint32_t ltid; // index in process th_tbl 693 uint32_t count; // request counter 694 cxy_t owner_cxy; // owner cluster identifier 695 696 // get calling thread pointer 697 this = CURRENT_THREAD; 698 owner_cxy = CXY_FROM_PID( process->pid ); 680 uint32_t count; // threads counter 699 681 700 682 #if CONFIG_DEBUG_PROCESS_SIGACTION … … 702 684 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 703 685 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", 704 __FUNCTION__ , this, process->pid , local_cxy , cycle );686 __FUNCTION__ , CURRENT_THREAD , process->pid , local_cxy , cycle ); 705 687 #endif 706 688 … … 708 690 spinlock_lock( &process->th_lock ); 709 691 710 // loop on threads to set the REQ_DELETE flag692 // loop to set the REQ_DELETE flag on all threads but the main 711 693 // we use both "ltid" and "count" because it can exist "holes" in th_tbl 712 694 for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ ) … … 714 696 target = process->th_tbl[ltid]; 715 697 716 assert( (target != this) , __FUNCTION__ , "calling thread cannot be a target\n" ); 717 718 if( target != NULL ) // thread found 698 if( target != NULL ) 719 699 { 720 700 count++; 721 701 722 // the main thread should not be deleted 723 if( (owner_cxy != local_cxy) || (ltid != 0) ) 724 { 725 hal_atomic_or( &target->flags , THREAD_FLAG_REQ_DELETE ); 726 } 702 thread_kill( XPTR( local_cxy , target ), 703 false, // is_exit 704 true ); // is_forced 727 705 } 728 706 } … … 735 713 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 736 714 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n", 737 __FUNCTION__ , this, process->pid , local_cxy , cycle );715 __FUNCTION__ , CURRENT_THREAD , process->pid , local_cxy , cycle ); 738 716 #endif 739 717 … … 790 768 791 769 } // end process_get_local_copy() 770 771 //////////////////////////////////////////// 772 pid_t process_get_ppid( xptr_t process_xp ) 773 { 774 cxy_t process_cxy; 775 process_t * process_ptr; 776 xptr_t parent_xp; 777 cxy_t parent_cxy; 778 process_t * parent_ptr; 779 780 // get process cluster and local pointer 781 process_cxy = GET_CXY( process_xp ); 782 process_ptr = GET_PTR( process_xp ); 783 784 // get pointers on parent process 785 parent_xp = (xptr_t)hal_remote_lwd( XPTR( process_cxy , &process_ptr->parent_xp ) ); 786 parent_cxy = GET_CXY( parent_xp ); 787 parent_ptr = GET_PTR( parent_xp ); 788 789 return hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) ); 790 } 792 791 793 792 ////////////////////////////////////////////////////////////////////////////////////////// … … 1067 1066 parent_process_xp ); 1068 1067 1069 #if CONFIG_DEBUG_PROCESS_MAKE_FORK1068 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 ) 1070 1069 cycle = (uint32_t)hal_get_cycles(); 1071 1070 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle ) … … 1086 1085 } 1087 1086 1088 #if CONFIG_DEBUG_PROCESS_MAKE_FORK1087 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 ) 1089 1088 cycle = (uint32_t)hal_get_cycles(); 1090 1089 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle ) … … 1112 1111 assert( (thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" ); 1113 1112 1114 #if CONFIG_DEBUG_PROCESS_MAKE_FORK1113 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 ) 1115 1114 cycle = (uint32_t)hal_get_cycles(); 1116 1115 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle ) … … 1134 1133 vmm_set_cow( process ); 1135 1134 1136 #if CONFIG_DEBUG_PROCESS_MAKE_FORK1135 #if( CONFIG_DEBUG_PROCESS_MAKE_FORK & 1 ) 1137 1136 cycle = (uint32_t)hal_get_cycles(); 1138 1137 if( CONFIG_DEBUG_PROCESS_MAKE_FORK < cycle ) … … 1236 1235 1237 1236 // give TXT ownership to new_process 1238 process_txt_set_ownership( XPTR( local_cxy , new_process 1239 1240 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC1237 process_txt_set_ownership( XPTR( local_cxy , new_process) ); 1238 1239 #if( CONFIG_DEBUG_PROCESS_MAKE_EXEC & 1 ) 1241 1240 cycle = (uint32_t)hal_get_cycles(); 1242 1241 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle ) … … 1255 1254 } 1256 1255 1257 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC1256 #if( CONFIG_DEBUG_PROCESS_MAKE_EXEC & 1 ) 1258 1257 cycle = (uint32_t)hal_get_cycles(); 1259 1258 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle ) … … 1287 1286 assert( (new_thread->trdid == 0) , __FUNCTION__ , "main thread must have index 0\n" ); 1288 1287 1289 #if CONFIG_DEBUG_PROCESS_MAKE_EXEC1288 #if( CONFIG_DEBUG_PROCESS_MAKE_EXEC & 1 ) 1290 1289 cycle = (uint32_t)hal_get_cycles(); 1291 1290 if( CONFIG_DEBUG_PROCESS_MAKE_EXEC < cycle ) … … 1312 1311 thread_unblock( XPTR( local_cxy , new_thread ) , THREAD_BLOCKED_GLOBAL ); 1313 1312 1313 // detach old_process from TXT 1314 process_txt_detach( XPTR( local_cxy , old_process ) ); 1315 1314 1316 // request old_thread destruction => old_process destruction 1315 thread_block( old_thread, THREAD_BLOCKED_GLOBAL );1317 thread_block( XPTR( local_cxy , old_thread ) , THREAD_BLOCKED_GLOBAL ); 1316 1318 hal_atomic_or( &old_thread->flags , THREAD_FLAG_REQ_DELETE ); 1317 1319 … … 1600 1602 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle ) 1601 1603 printk("\n[DBG] %s : thread %x enter for process %x / txt_id = %d / cycle %d\n", 1602 __FUNCTION__, CURRENT_THREAD, process , txt_id, cycle );1603 #endif 1604 1605 // check process is reference1606 assert( ( process->ref_xp == XPTR( local_cxy , process )) , __FUNCTION__ ,1607 "process is not the reference descriptor" );1604 __FUNCTION__, CURRENT_THREAD, process->pid, txt_id, cycle ); 1605 #endif 1606 1607 // check process is in owner cluster 1608 assert( (CXY_FROM_PID( process->pid ) == local_cxy) , __FUNCTION__ , 1609 "process descriptor not in owner cluster" ); 1608 1610 1609 1611 // check terminal index … … 1629 1631 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle ) 1630 1632 printk("\n[DBG] %s : thread %x exit for process %x / txt_id = %d / cycle %d\n", 1631 __FUNCTION__, CURRENT_THREAD, process , txt_id , cycle );1633 __FUNCTION__, CURRENT_THREAD, process->pid, txt_id , cycle ); 1632 1634 #endif 1633 1635 1634 1636 } // end process_txt_attach() 1635 1637 1636 ////////////////////////////////////////////// 1637 void process_txt_detach( process_t * process ) 1638 { 1638 ///////////////////////////////////////////// 1639 void process_txt_detach( xptr_t process_xp ) 1640 { 1641 process_t * process_ptr; // local pointer on process in owner cluster 1642 cxy_t process_cxy; // process owner cluster 1643 pid_t process_pid; // process identifier 1644 xptr_t file_xp; // extended pointer on stdin file 1639 1645 xptr_t chdev_xp; // extended pointer on TXT_RX chdev 1640 1646 cxy_t chdev_cxy; // TXT_RX chdev cluster … … 1642 1648 xptr_t lock_xp; // extended pointer on list lock in chdev 1643 1649 1650 // get process cluster, local pointer, and PID 1651 process_cxy = GET_CXY( process_xp ); 1652 process_ptr = GET_PTR( process_xp ); 1653 process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) ); 1654 1655 // check process descriptor in owner cluster 1656 assert( (CXY_FROM_PID( process_pid ) == process_cxy ) , __FUNCTION__ , 1657 "process descriptor not in owner cluster" ); 1658 1644 1659 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH 1645 1660 uint32_t cycle = (uint32_t)hal_get_cycles(); 1646 1661 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle ) 1647 1662 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 1648 __FUNCTION__, CURRENT_THREAD, process, cycle ); 1649 #endif 1650 1651 // check process is reference 1652 assert( (process->ref_xp == XPTR( local_cxy , process )) , __FUNCTION__ , 1653 "process is not the reference descriptor" ); 1654 1655 // get extended pointer on TXT_RX chdev 1656 chdev_xp = chdev_from_file( process->fd_array.array[0] ); 1663 __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); 1664 #endif 1665 1666 // release TXT ownership (does nothing if not TXT owner) 1667 process_txt_transfer_ownership( process_xp ); 1668 1669 // get extended pointer on process stdin file 1670 file_xp = (xptr_t)hal_remote_lwd( XPTR( process_cxy , &process_ptr->fd_array.array[0] ) ); 1671 1672 // get pointers on TXT_RX chdev 1673 chdev_xp = chdev_from_file( file_xp ); 1657 1674 chdev_cxy = GET_CXY( chdev_xp ); 1658 1675 chdev_ptr = (chdev_t *)GET_PTR( chdev_xp ); 1659 1676 1660 // get extended pointer on lock ofattached process list1677 // get extended pointer on lock protecting attached process list 1661 1678 lock_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.lock ); 1662 1679 1663 1680 // unlink process from attached process list 1664 1681 remote_spinlock_lock( lock_xp ); 1665 xlist_unlink( XPTR( local_cxy , &process->txt_list ) );1682 xlist_unlink( XPTR( process_cxy , &process_ptr->txt_list ) ); 1666 1683 remote_spinlock_unlock( lock_xp ); 1667 1684 1685 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 ) 1686 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle ) 1687 { 1688 xptr_t root_xp = XPTR( chdev_cxy , &chdev_ptr->ext.txt.root ); 1689 xptr_t iter_xp; 1690 XLIST_FOREACH( root_xp , iter_xp ) 1691 { 1692 xptr_t current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list ); 1693 process_t * current_ptr = GET_PTR( current_xp ); 1694 1695 printk("\n[DBG] %s : attached_process %x (pid = %x)\n", 1696 __FUNCTION__, current_ptr, current_ptr->pid ); 1697 } 1698 } 1699 #endif 1700 1668 1701 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH 1669 1702 cycle = (uint32_t)hal_get_cycles(); 1670 1703 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle ) 1671 printk("\n[DBG] %s : thread %x exit for process %x/ cycle %d\n",1672 __FUNCTION__, CURRENT_THREAD, process , cycle );1704 printk("\n[DBG] %s : thread %x exit / process %x detached from TXT / cycle %d\n", 1705 __FUNCTION__, CURRENT_THREAD, process->pid, cycle ); 1673 1706 #endif 1674 1707 … … 1680 1713 process_t * process_ptr; 1681 1714 cxy_t process_cxy; 1715 pid_t process_pid; 1682 1716 xptr_t file_xp; 1683 1717 xptr_t txt_xp; … … 1685 1719 cxy_t txt_cxy; 1686 1720 1687 // get cluster and local pointer on process1721 // get pointers on process in owner cluster 1688 1722 process_cxy = GET_CXY( process_xp ); 1689 1723 process_ptr = GET_PTR( process_xp ); 1724 1725 // get process PID 1726 process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) ); 1727 1728 // check owner cluster 1729 assert( (process_cxy == CXY_FROM_PID( process_pid )) , __FUNCTION__, 1730 "process descriptor not in owner cluster\n" ); 1731 1732 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH 1733 uint32_t cycle = (uint32_t)hal_get_cycles(); 1734 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle ) 1735 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 1736 __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); 1737 #endif 1690 1738 1691 1739 // get extended pointer on stdin pseudo file … … 1700 1748 hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , process_xp ); 1701 1749 1750 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH 1751 cycle = (uint32_t)hal_get_cycles(); 1752 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle ) 1753 printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n", 1754 __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); 1755 #endif 1756 1702 1757 } // end process_txt_set ownership() 1703 1758 1704 ///////////////////////////////////////////////////// 1705 void process_txt_reset_ownership( xptr_t process_xp ) 1706 { 1707 process_t * process_ptr; 1708 cxy_t process_cxy; 1709 xptr_t parent_xp; // extended pointer on parent process 1710 process_t * parent_ptr; 1711 cxy_t parent_cxy; 1759 //////////////////////////////////////////////////////// 1760 void process_txt_transfer_ownership( xptr_t process_xp ) 1761 { 1762 process_t * process_ptr; // local pointer on process releasing ownership 1763 cxy_t process_cxy; // process cluster 1764 pid_t process_pid; // process identifier 1712 1765 xptr_t file_xp; // extended pointer on TXT_RX pseudo file 1713 1766 xptr_t txt_xp; // extended pointer on TXT_RX chdev … … 1717 1770 xptr_t owner_xp; // extended pointer on current TXT_RX owner 1718 1771 xptr_t root_xp; // extended pointer on root of attached process list 1772 xptr_t lock_xp; // extended pointer on lock protecting attached process list 1719 1773 xptr_t iter_xp; // iterator for xlist 1720 1774 xptr_t current_xp; // extended pointer on current process 1721 1775 process_t * current_ptr; // local pointer on current process 1722 1776 cxy_t current_cxy; // cluster for current process 1723 pid_t ppid; // parent process identifier for current process 1724 1725 // get cluster and local pointer on process 1777 1778 // get pointers on process in owner cluster 1726 1779 process_cxy = GET_CXY( process_xp ); 1727 1780 process_ptr = GET_PTR( process_xp ); 1781 1782 // get process PID 1783 process_pid = hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) ); 1784 1785 // check owner cluster 1786 assert( (process_cxy == CXY_FROM_PID( process_pid )) , __FUNCTION__, 1787 "process descriptor not in owner cluster\n" ); 1788 1789 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH 1790 uint32_t cycle = (uint32_t)hal_get_cycles(); 1791 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle ) 1792 printk("\n[DBG] %s : thread %x enter / process %x / pid %x / cycle %d\n", 1793 __FUNCTION__, CURRENT_THREAD, process_ptr, process_pid, cycle ); 1794 #endif 1728 1795 1729 1796 // get extended pointer on stdin pseudo file … … 1739 1806 txt_id = hal_remote_lw ( XPTR( txt_cxy , &txt_ptr->channel ) ); 1740 1807 1741 // transfer ownership to KSH if required 1742 if( (owner_xp == process_xp) && (txt_id > 0) ) 1743 { 1744 // get extended pointer on root of list of attached processes 1745 root_xp = hal_remote_lwd( XPTR( txt_cxy , &txt_ptr->ext.txt.root ) ); 1746 1747 // scan attached process list to find KSH process 1748 XLIST_FOREACH( root_xp , iter_xp ) 1808 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 ) 1809 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle ) 1810 printk("\n[DBG] %s : file_ptr %x / txt_ptr %x / txt_id %d / owner_ptr = %x\n", 1811 __FUNCTION__, GET_PTR(file_xp), txt_ptr, txt_id, GET_PTR(owner_xp) ); 1812 #endif 1813 1814 // transfer ownership only if process is the TXT owner 1815 if( (owner_xp == process_xp) && (txt_id > 0) ) 1816 { 1817 // get extended pointers on root and lock of attached processes list 1818 root_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.root ); 1819 lock_xp = XPTR( txt_cxy , &txt_ptr->ext.txt.lock ); 1820 1821 // get lock 1822 remote_spinlock_lock( lock_xp ); 1823 1824 if( process_get_ppid( process_xp ) != 1 ) // process is not KSH 1749 1825 { 1750 current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list ); 1751 current_cxy = GET_CXY( current_xp ); 1752 current_ptr = GET_PTR( current_xp ); 1753 parent_xp = hal_remote_lwd( XPTR( current_cxy , ¤t_ptr->parent_xp ) ); 1754 parent_cxy = GET_CXY( parent_xp ); 1755 parent_ptr = GET_PTR( parent_xp ); 1756 ppid = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) ); 1757 1758 printk("\n@@@ %s : pid = %x / process = %x\n", __FUNCTION__ , current_ptr->pid, current_ptr ); 1759 1760 if( ppid == 1 ) // current is KSH 1826 1827 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 ) 1828 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle ) 1829 printk("\n[DBG] %s : process is not the KSH process => search the KSH\n", __FUNCTION__ ); 1830 #endif 1831 // scan attached process list to find KSH process 1832 XLIST_FOREACH( root_xp , iter_xp ) 1761 1833 { 1762 // set owner field in TXT chdev 1763 hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp ); 1764 return; 1834 current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list ); 1835 current_cxy = GET_CXY( current_xp ); 1836 current_ptr = GET_PTR( current_xp ); 1837 1838 if( process_get_ppid( current_xp ) == 1 ) // current is KSH 1839 { 1840 // release lock 1841 remote_spinlock_unlock( lock_xp ); 1842 1843 // set owner field in TXT chdev 1844 hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp ); 1845 1846 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH 1847 cycle = (uint32_t)hal_get_cycles(); 1848 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle ) 1849 printk("\n[DBG] %s : thread %x exit / process %x to KSH process %x / cycle %d\n", 1850 __FUNCTION__, CURRENT_THREAD, process_pid, 1851 hal_remote_lw( XPTR( current_cxy , ¤t_ptr->pid ) ), cycle ); 1852 #endif 1853 return; 1854 } 1765 1855 } 1856 1857 // release lock 1858 remote_spinlock_unlock( lock_xp ); 1859 1860 // PANIC if KSH not found 1861 assert( false , __FUNCTION__ , "KSH process not found for TXT %d" ); 1862 1863 return; 1766 1864 } 1767 1768 assert( false , __FUNCTION__ , "KSH process not found" ); 1769 } 1770 } // end process_txt_reset_ownership() 1771 1772 1773 ////////////////////////////////////////////////////// 1774 inline pid_t process_get_txt_owner( uint32_t channel ) 1865 else // process is KSH 1866 { 1867 1868 #if( CONFIG_DEBUG_PROCESS_TXT_ATTACH & 1 ) 1869 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle ) 1870 printk("\n[DBG] %s : process is the KSH process => search another\n", __FUNCTION__ ); 1871 #endif 1872 1873 // scan attached process list to find another process 1874 XLIST_FOREACH( root_xp , iter_xp ) 1875 { 1876 current_xp = XLIST_ELEMENT( iter_xp , process_t , txt_list ); 1877 current_cxy = GET_CXY( current_xp ); 1878 current_ptr = GET_PTR( current_xp ); 1879 1880 if( current_xp != process_xp ) // current is not KSH 1881 { 1882 // release lock 1883 remote_spinlock_unlock( lock_xp ); 1884 1885 // set owner field in TXT chdev 1886 hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , current_xp ); 1887 1888 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH 1889 cycle = (uint32_t)hal_get_cycles(); 1890 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle ) 1891 printk("\n[DBG] %s : thread %x exit / KSH process %x to process %x / cycle %d\n", 1892 __FUNCTION__, CURRENT_THREAD, process_pid, 1893 hal_remote_lw( XPTR( current_cxy , ¤t_ptr->pid ) ), cycle ); 1894 #endif 1895 return; 1896 } 1897 } 1898 1899 // release lock 1900 remote_spinlock_unlock( lock_xp ); 1901 1902 // no more owner for TXT if no other process found 1903 hal_remote_swd( XPTR( txt_cxy , &txt_ptr->ext.txt.owner_xp ) , XPTR_NULL ); 1904 1905 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH 1906 cycle = (uint32_t)hal_get_cycles(); 1907 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle ) 1908 printk("\n[DBG] %s : thread %x exit / KSH process %x to nobody / cycle %d\n", 1909 __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); 1910 #endif 1911 return; 1912 } 1913 } 1914 else 1915 { 1916 1917 #if CONFIG_DEBUG_PROCESS_TXT_ATTACH 1918 cycle = (uint32_t)hal_get_cycles(); 1919 if( CONFIG_DEBUG_PROCESS_TXT_ATTACH < cycle ) 1920 printk("\n[DBG] %s : thread %x exit / process %x is not TXT owner / cycle %d\n", 1921 __FUNCTION__, CURRENT_THREAD, process_pid, cycle ); 1922 #endif 1923 1924 } 1925 } // end process_txt_transfer_ownership() 1926 1927 1928 //////////////////////////////////////////////// 1929 xptr_t process_txt_get_owner( uint32_t channel ) 1775 1930 { 1776 1931 xptr_t txt_rx_xp = chdev_dir.txt_rx[channel]; … … 1778 1933 chdev_t * txt_rx_ptr = GET_PTR( txt_rx_xp ); 1779 1934 1780 xptr_t process_xp = (xptr_t)hal_remote_lwd( XPTR( txt_rx_cxy, 1781 &txt_rx_ptr->ext.txt.owner_xp ) ); 1782 1783 cxy_t process_cxy = GET_CXY( process_xp ); 1784 process_t * process_ptr = GET_PTR( process_xp ); 1785 1786 return (pid_t)hal_remote_lw( XPTR( process_cxy , &process_ptr->pid ) ); 1935 return (xptr_t)hal_remote_lwd( XPTR( txt_rx_cxy , &txt_rx_ptr->ext.txt.owner_xp ) ); 1787 1936 } 1788 1937 … … 1817 1966 remote_spinlock_lock( lock_xp ); 1818 1967 1819 // scan attached process list to find KSH process1968 // scan attached process list 1820 1969 XLIST_FOREACH( root_xp , iter_xp ) 1821 1970 { -
trunk/kernel/kern/process.h
r435 r436 58 58 enum process_sigactions 59 59 { 60 BLOCK_ALL_THREADS = 11,61 UNBLOCK_ALL_THREADS = 22,62 DELETE_ALL_THREADS = 33,60 BLOCK_ALL_THREADS = 0x11, 61 UNBLOCK_ALL_THREADS = 0x22, 62 DELETE_ALL_THREADS = 0x33, 63 63 }; 64 64 … … 281 281 * This function allows a client thread running in any cluster to block, unblock or delete 282 282 * all threads of a process identified by the <pid> argument, depending on the 283 * <action_type> argument. The scenario is the following: 284 * - It uses the multicast, non blocking rpc_process_sigaction_client() function to send 285 * parallel requests to all remote clusters containing a process copy. Then it blocks 286 $ and deschedule to wait completion of these parrallel requests. 287 * - In each remote cluster, the rpc_process_sigaction_server() function, calls directly 288 * the relevant process_block(), process_unblock(), or process_delete() function, and 289 * decrement the responses counter to signal completion. The last server unblock 290 * the client thread. 291 * - Finally, the client thread calls directly the process_block(), process_unblock(), or 292 * process_delete() function in the owner cluster. 283 * <action_type> argument. 284 * WARNING : the DELETE action is NOT executed on the target process main thread 285 * (thread 0 in process owner cluster). 286 * It uses the multicast, non blocking rpc_process_sigaction_client() function to send 287 * parallel requests to all remote clusters containing a process copy. 288 * Then it blocks and deschedule to wait completion of these parallel requests. 289 * 293 290 * It is used by the sys_kill() & sys_exit() functions to handle the "kill" & "exit" syscalls. 294 291 * It is also used by the process_make_exec() function to handle the "exec" syscall. 295 * It is also called by the TXT device to execute the ctrl C & ctrl Z commands. 296 * WARNING : the DELETE action is NOT executed on the main thread (thread 0 in owner cluster). 292 * It is also called by the TXT device ISR to execute the ctrl C & ctrl Z commands. 293 * 294 * Implementation note: 295 * This function allocates a - shared - RPC descriptor in client thread stack, 296 * and initializes it. This RPC descriptor can be shared because all parallel, 297 * non-blocking, RPC server threads use the same input arguments, including the 298 * RPC responses counter field. 297 299 ********************************************************************************************* 298 300 * @ pid : target process identifier. … … 303 305 304 306 /********************************************************************************************* 305 * This function blocks all threads for a given <process> in a given cluster. 306 * The calling thread cannot be a target thread. 307 * It loops on all local threads of the process, set the THREAD_BLOCKED_GLOBAL bit, 307 * This function blocks all threads - but the main thread - for a given <process> 308 * in a given cluster. It sets the THREAD_BLOCKED_GLOBAL bit in the thread descriptor, 308 309 * and request the relevant schedulers to acknowledge the blocking, using IPI if required. 309 310 * The threads are not detached from the scheduler, and not detached from the local process. … … 322 323 323 324 /********************************************************************************************* 324 * This function marks for deletion all threads - but one _ for a given <process> 325 * in a given cluster. The main thread in owner cluster is NOT marked. 326 * It will be marked for deletion by the parent process sys_wait(). 327 * The calling thread cannot be a target thread. 328 * It loops on all local threads of the process, and set the THREAD_FLAG_REQ_DELETE bit. 329 * For each marked thread, the following actions will be done by the scheduler at the next 330 * scheduling point: 325 * This function marks for deletion all threads - but the main thread - for a given <process> 326 * in a given cluster. It sets the THREAD_FLAG_REQ_DELETE bit. For each marked thread, 327 * the following actions will be done by the scheduler at the next scheduling point: 331 328 * - the thread will be detached from the scheduler. 332 329 * - the thread will be detached from the local process descriptor. … … 349 346 ********************************************************************************************/ 350 347 process_t * process_get_local_copy( pid_t pid ); 348 349 /********************************************************************************************* 350 * This function returns the parent process identifier for a remote process descriptor 351 * identified by an extended pointer. 352 ********************************************************************************************* 353 * @ process_xp : extended pointer on remote process descriptor. 354 * @ returns parent process dentifier. 355 ********************************************************************************************/ 356 pid_t process_get_ppid( xptr_t process_xp ); 351 357 352 358 /********************************************************************************************* … … 508 514 509 515 /********************************************************************************************* 510 * This function attach a reference process descriptor, identified by the <process>516 * This function attach a process descriptor in owner cluster, identified by the <process> 511 517 * argument to a TXT terminal, identified by its <txt_id> channel index argument. 512 518 * It insert the process descriptor in the xlist rooted in the TXT_RX device. … … 520 526 521 527 /********************************************************************************************* 522 * This function detach a reference process descriptor, identified by the <process_xp> 523 * argument, from the list of process attached to a given TXT terminal. 524 * It is called when the process is killed. 525 ********************************************************************************************* 526 * @ process : local pointer on process descriptor. 527 ********************************************************************************************/ 528 void process_txt_detach( process_t * process ); 529 530 /********************************************************************************************* 531 * This function gives to a process identified by the <process_xp> argument, and attached 528 * This function detach a process, identified by the <process_xp> argument, 529 * from the list of process attached to a given TXT terminal. 530 * The target process descriptor must be in the owner cluster, but the calling thread can 531 * be running in any cluster. 532 ********************************************************************************************* 533 * @ process_xp : extended pointer on process descriptor. 534 ********************************************************************************************/ 535 void process_txt_detach( xptr_t process_xp ); 536 537 /********************************************************************************************* 538 * This function gives to a process identified by the <owner_xp> argument, and attached 532 539 * to terminal TXT[i] the exclusive ownership of the TXT_RX[i] terminal. 533 ********************************************************************************************* 534 * @ process_xp : extended pointer on reference process descriptor. 535 ********************************************************************************************/ 536 void process_txt_set_ownership( xptr_t process_xp ); 537 538 /********************************************************************************************* 539 * When the process identified by the <process_xp> argument has the exclusive ownership 540 * of the TXT_RX[i] terminal, this function gives this ownership to the KSH[i] process. 541 * It does nothing if the process is not the owner. 542 ********************************************************************************************* 543 * @ process_xp : extended pointer on reference process descriptor. 544 ********************************************************************************************/ 545 void process_txt_reset_ownership( xptr_t process_xp ); 546 547 /********************************************************************************************* 548 * This function returns the terminal owner process (foreground process) 540 * The process descriptor must be in the process owner cluster. 541 ********************************************************************************************* 542 * @ owner_xp : extended pointer on process descriptor in owner cluster. 543 ********************************************************************************************/ 544 void process_txt_set_ownership( xptr_t owner_xp ); 545 546 /********************************************************************************************* 547 * When the process dentified by the <owner_xp> argument has the exclusive ownership of 548 * the TXT_RX terminal, this function transfer this ownership to another attached process. 549 * The process descriptor must be in the process owner cluster. 550 * This function does nothing if the <pid> process is not the owner. 551 * - If the current owner is not the KSH process, the new owner is the KSH process. 552 * - If the <pid> process is the the KSH process, the new owner is another attached process. 553 * - If there is no other attached process, the TXT has no more defined owner. 554 ********************************************************************************************* 555 * @ owner_xp : extended pointer on process descriptor in owner cluster. 556 ********************************************************************************************/ 557 void process_txt_transfer_ownership( xptr_t owner_xp ); 558 559 /********************************************************************************************* 560 * This function returns the TXT owner process (foreground process) 549 561 * for a given TXT terminal identified by its <channel> index. 550 562 ********************************************************************************************* 551 563 * @ channel : TXT terminal channel. 552 * @ return owner process identifier.553 ********************************************************************************************/ 554 pid_t process_get_txt_owner( uint32_t channel );564 * @ return extentded pointer on TXT owner process in owner cluster. 565 ********************************************************************************************/ 566 xptr_t process_txt_get_owner( uint32_t channel ); 555 567 556 568 /********************************************************************************************* -
trunk/kernel/kern/rpc.c
r435 r436 82 82 &rpc_thread_user_create_server, // 6 83 83 &rpc_thread_kernel_create_server, // 7 84 &rpc_ thread_kill_server, // 884 &rpc_undefined, // 8 unused slot 85 85 &rpc_process_sigaction_server, // 9 86 86 … … 122 122 rpc_desc_t * rpc ) 123 123 { 124 error_t error; 125 126 thread_t * this = CURRENT_THREAD; 127 core_t * core = this->core; 124 volatile error_t full = 0; 125 thread_t * this = CURRENT_THREAD; 126 core_t * core = this->core; 127 128 #if CONFIG_DEBUG_RPC_SEND 129 uint32_t cycle = (uint32_t)hal_get_cycles(); 130 if( CONFIG_DEBUG_RPC_SEND < cycle ) 131 printk("\n[DBG] %s : thread %x enter for rpc[%d] / rpc_ptr %x / cycle %d\n", 132 __FUNCTION__, CURRENT_THREAD, rpc->index, rpc, cycle ); 133 #endif 128 134 129 135 // register client thread pointer and core lid in RPC descriptor 130 rpc->thread 131 rpc->lid 136 rpc->thread = this; 137 rpc->lid = core->lid; 132 138 133 139 // build an extended pointer on the RPC descriptor 134 140 xptr_t desc_xp = XPTR( local_cxy , rpc ); 135 141 136 // get local pointer on rpc_fifo in remote cluster, with the 137 // assumption that local pointers are identical in all clusters 142 // get local pointer on rpc_fifo in remote cluster, 138 143 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo; 139 144 140 // try to post an item in remote fifo 141 // deschedule and retry if remote fifo full 145 // post RPC in remote fifo / deschedule and retry if fifo full 142 146 do 143 147 { 144 error = remote_fifo_put_item( XPTR( server_cxy , rpc_fifo ), 145 (uint64_t )desc_xp ); 146 if ( error ) 148 full = remote_fifo_put_item( XPTR( server_cxy , rpc_fifo ), (uint64_t )desc_xp ); 149 if ( full ) 147 150 { 148 151 printk("\n[WARNING] %s : cluster %x cannot post RPC to cluster %x\n", 149 152 __FUNCTION__ , local_cxy , server_cxy ); 150 153 151 if( thread_can_yield() ) sched_yield("RPC fifo full"); 154 // deschedule without blocking 155 sched_yield("RPC fifo full"); 152 156 } 153 157 } 154 while( error);158 while( full ); 155 159 156 160 hal_fence(); … … 167 171 { 168 172 169 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s busy waiting after registering RPC\n" 170 " rpc = %d / server = %x / cycle %d\n", 171 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) , 172 rpc->index , server_cxy , hal_time_stamp() ); 173 #if CONFIG_DEBUG_RPC_SEND 174 cycle = (uint32_t)hal_get_cycles(); 175 if( CONFIG_DEBUG_RPC_SEND < cycle ) 176 printk("\n[DBG] %s : thread %x busy waiting / rpc[%d] / server = %x / cycle %d\n", 177 __FUNCTION__, CURRENT_THREAD, rpc->index , server_cxy , cycle ); 178 #endif 173 179 174 180 while( rpc->response ) hal_fixed_delay( 100 ); 175 181 176 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s exit after RPC completion\n", 177 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) ); 178 182 #if CONFIG_DEBUG_RPC_SEND 183 cycle = (uint32_t)hal_get_cycles(); 184 if( CONFIG_DEBUG_RPC_SEND < cycle ) 185 printk("\n[DBG] %s : thread % resume / rpc[%d] / cycle %d\n", 186 __FUNCTION__, CURRENT_THREAD, rpc->index, cycle ); 187 #endif 179 188 } 180 else 189 else // block & deschedule 181 190 { 182 191 183 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s deschedule after registering RPC\n" 184 " rpc = %d / server = %x / cycle %d\n", 185 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) , 186 rpc->index , server_cxy , hal_time_stamp() ); 187 188 thread_block( this , THREAD_BLOCKED_RPC ); 189 sched_yield("BLOCKED on RPC"); 190 191 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s resumes after RPC completion\n", 192 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) ); 193 192 #if CONFIG_DEBUG_RPC_SEND 193 cycle = (uint32_t)hal_get_cycles(); 194 if( CONFIG_DEBUG_RPC_SEND < cycle ) 195 printk("\n[DBG] %s : thread %x block & deschedule / rpc[%d] / server = %x / cycle %d\n", 196 __FUNCTION__, CURRENT_THREAD, rpc->index , server_cxy , cycle ); 197 #endif 198 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC ); 199 sched_yield("blocked on RPC"); 200 201 #if CONFIG_DEBUG_RPC_SEND 202 cycle = (uint32_t)hal_get_cycles(); 203 if( CONFIG_DEBUG_RPC_SEND < cycle ) 204 printk("\n[DBG] %s : thread % resume / rpcr[%d] / cycle %d\n", 205 __FUNCTION__, CURRENT_THREAD, rpc->index, cycle ); 206 #endif 194 207 } 195 208 … … 199 212 // acknowledge the IPI sent by the server 200 213 dev_pic_ack_ipi(); 214 } 215 else 216 { 217 218 #if CONFIG_DEBUG_RPC_SEND 219 cycle = (uint32_t)hal_get_cycles(); 220 if( CONFIG_DEBUG_RPC_SEND < cycle ) 221 printk("\n[DBG] %s : non blocking rpc[%d] => thread return / cycle %d\n", 222 __FUNCTION__, rpc->index, CURRENT_THREAD, cycle ); 223 #endif 224 201 225 } 202 226 } // end rpc_send() … … 220 244 remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo; 221 245 222 grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s / cycle %d\n", 223 __FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() ); 246 #if CONFIG_DEBUG_RPC_SERVER 247 uint32_t cycle = (uint32_t)hal_get_cycles(); 248 if( CONFIG_DEBUG_RPC_SERVER < cycle ) 249 printk("\n[DBG] %s : thread %x interrupted in cluster %x / cycle %d\n", 250 __FUNCTION__, this, local_cxy, cycle ); 251 #endif 224 252 225 253 // interrupted thread not preemptable during RPC chek … … 262 290 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 ); 263 291 264 grpc_dmsg("\n[DBG] %s : core [%x,%d] creates a new RPC thread %x / trdid %x / cycle %d\n", 265 __FUNCTION__ , local_cxy , core->lid , thread , thread->trdid , hal_time_stamp() ); 266 292 #if CONFIG_DEBUG_RPC_SERVER 293 cycle = (uint32_t)hal_get_cycles(); 294 if( CONFIG_DEBUG_RPC_SERVER < cycle ) 295 printk("\n[DBG] %s : create a new RPC thread %x in cluster %x / cycle %d\n", 296 __FUNCTION__, thread, local_cxy, cycle ); 297 #endif 267 298 } 268 299 } 269 300 } 270 301 271 grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s deschedules / cycle %d\n", 272 __FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() ); 302 #if CONFIG_DEBUG_RPC_SERVER 303 cycle = (uint32_t)hal_get_cycles(); 304 if( CONFIG_DEBUG_RPC_SERVER < cycle ) 305 printk("\n[DBG] %s : interrupted thread %x deschedules in cluster %x / cycle %d\n", 306 __FUNCTION__, this, local_cxy, cycle ); 307 #endif 273 308 274 309 // interrupted thread deschedule always 275 310 sched_yield("IPI received"); 276 311 277 grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s resume / cycle %d\n", 278 __FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() ); 312 #if CONFIG_DEBUG_RPC_SERVER 313 cycle = (uint32_t)hal_get_cycles(); 314 if( CONFIG_DEBUG_RPC_SERVER < cycle ) 315 printk("\n[DBG] %s : interrupted thread %x resumes in cluster %x / cycle %d\n", 316 __FUNCTION__, this, local_cxy, cycle ); 317 #endif 279 318 280 319 // interrupted thread restore IRQs after resume … … 312 351 if( hal_atomic_test_set( &rpc_fifo->owner , this->trdid ) ) 313 352 { 353 354 #if CONFIG_DEBUG_RPC_SERVER 355 uint32_t cycle = (uint32_t)hal_get_cycles(); 356 if( CONFIG_DEBUG_RPC_SERVER < cycle ) 357 printk("\n[DBG] %s : RPC thread %x takes RPC fifo ownership / cluster %x / cycle %d\n", 358 __FUNCTION__, this, local_cxy, cycle ); 359 #endif 314 360 // initializes RPC requests counter 315 361 count = 0; … … 324 370 while( 1 ) // internal loop 325 371 { 326 327 372 empty = local_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp ); 328 373 … … 330 375 { 331 376 // get client cluster and pointer on RPC descriptor 332 desc_cxy = (cxy_t)GET_CXY( desc_xp ); 333 desc_ptr = (rpc_desc_t *)GET_PTR( desc_xp ); 334 335 // get RPC <index> & <blocking> fields from RPC descriptor 336 index = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->index ) ); 337 blocking = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->blocking ) ); 338 339 grpc_dmsg("\n[DBG] %s : core[%x,%d] / RPC thread %x / starts rpc %d / cycle %d\n", 340 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , index , (uint32_t)hal_get_cycles() ); 341 377 desc_cxy = GET_CXY( desc_xp ); 378 desc_ptr = GET_PTR( desc_xp ); 379 380 index = desc_ptr->index; 381 blocking = desc_ptr->blocking; 382 383 #if CONFIG_DEBUG_RPC_SERVER 384 cycle = (uint32_t)hal_get_cycles(); 385 if( CONFIG_DEBUG_RPC_SERVER < cycle ) 386 printk("\n[DBG] %s : RPC thread %x got rpc[%d] / rpc_ptr %x / cycle %d\n", 387 __FUNCTION__, this, index, desc_ptr, cycle ); 388 #endif 342 389 // call the relevant server function 343 390 rpc_server[index]( desc_xp ); 344 391 345 grpc_dmsg("\n[DBG] %s : core[%x,%d] / RPC thread %x / completes rpc %d / cycle %d\n", 346 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , index , hal_time_stamp() ); 347 392 #if CONFIG_DEBUG_RPC_SERVER 393 cycle = (uint32_t)hal_get_cycles(); 394 if( CONFIG_DEBUG_RPC_SERVER < cycle ) 395 printk("\n[DBG] %s : RPC thread %x completes rpc %d in cluster %x / cycle %d\n", 396 __FUNCTION__, this, index, local_cxy, cycle ); 397 #endif 348 398 // increment handled RPCs counter 349 399 count++; … … 382 432 { 383 433 384 grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) suicide at cycle %d\n", 385 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() ); 386 434 #if CONFIG_DEBUG_RPC_SERVER 435 uint32_t cycle = (uint32_t)hal_get_cycles(); 436 if( CONFIG_DEBUG_RPC_SERVER < cycle ) 437 printk("\n[DBG] %s : RPC thread %x suicides in cluster %x / cycle %d\n", 438 __FUNCTION__, this, local_cxy, cycle ); 439 #endif 387 440 // update RPC threads counter 388 441 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 ); 389 442 390 443 // suicide 391 thread_kill( this ); 444 thread_kill( XPTR( local_cxy , this ), 445 true, // is_exit 446 true ); // is forced 392 447 } 393 448 394 grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) deschedules / cycle %d\n", 395 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() ); 449 #if CONFIG_DEBUG_RPC_SERVER 450 uint32_t cycle = (uint32_t)hal_get_cycles(); 451 if( CONFIG_DEBUG_RPC_SERVER < cycle ) 452 printk("\n[DBG] %s : RPC thread %x deschedules in cluster %x / cycle %d\n", 453 __FUNCTION__, this, local_cxy, cycle ); 454 #endif 396 455 397 456 // deschedule without blocking 398 457 sched_yield("RPC fifo empty or too much work"); 399 458 400 grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) resumes / cycle %d\n", 401 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() ); 459 #if CONFIG_DEBUG_RPC_SERVER 460 cycle = (uint32_t)hal_get_cycles(); 461 if( CONFIG_DEBUG_RPC_SERVER < cycle ) 462 printk("\n[DBG] %s : RPC thread %x resumes in cluster %x / cycle %d\n", 463 __FUNCTION__, this, local_cxy, cycle ); 464 #endif 402 465 403 466 } // end external loop … … 430 493 rpc.args[0] = (uint64_t)order; 431 494 432 // register RPC request in remote RPC fifo (blocking function)495 // register RPC request in remote RPC fifo 433 496 rpc_send( cxy , &rpc ); 434 497 … … 449 512 450 513 // get client cluster identifier and pointer on RPC descriptor 451 cxy_t cxy = (cxy_t)GET_CXY( xp );452 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );514 cxy_t cxy = GET_CXY( xp ); 515 rpc_desc_t * desc = GET_PTR( xp ); 453 516 454 517 // get input arguments from client RPC descriptor … … 489 552 rpc.args[0] = (uint64_t)(intptr_t)page; 490 553 491 // register RPC request in remote RPC fifo (blocking function)554 // register RPC request in remote RPC fifo 492 555 rpc_send( cxy , &rpc ); 493 556 … … 505 568 506 569 // get client cluster identifier and pointer on RPC descriptor 507 cxy_t cxy = (cxy_t)GET_CXY( xp );508 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );570 cxy_t cxy = GET_CXY( xp ); 571 rpc_desc_t * desc = GET_PTR( xp ); 509 572 510 573 // get input arguments from client RPC descriptor … … 554 617 rpc.args[1] = (uint64_t)(intptr_t)parent_thread_xp; 555 618 556 // register RPC request in remote RPC fifo (blocking function)619 // register RPC request in remote RPC fifo 557 620 rpc_send( cxy , &rpc ); 558 621 … … 581 644 582 645 // get client cluster identifier and pointer on RPC descriptor 583 cxy_t client_cxy = (cxy_t)GET_CXY( xp );584 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );646 cxy_t client_cxy = GET_CXY( xp ); 647 rpc_desc_t * desc = GET_PTR( xp ); 585 648 586 649 // get input arguments from cient RPC descriptor … … 613 676 614 677 ///////////////////////////////////////////////////////////////////////////////////////// 615 // [6] 678 // [6] Marshaling functions attached to RPC_THREAD_USER_CREATE (blocking) 616 679 ///////////////////////////////////////////////////////////////////////////////////////// 617 680 … … 633 696 // initialise RPC descriptor header 634 697 rpc_desc_t rpc; 635 rpc.index 636 rpc.response 698 rpc.index = RPC_THREAD_USER_CREATE; 699 rpc.response = 1; 637 700 rpc.blocking = true; 638 701 … … 643 706 rpc.args[3] = (uint64_t)(intptr_t)attr; 644 707 645 // register RPC request in remote RPC fifo (blocking function)708 // register RPC request in remote RPC fifo 646 709 rpc_send( cxy , &rpc ); 647 710 … … 673 736 674 737 // get client cluster identifier and pointer on RPC descriptor 675 cxy_t client_cxy = (cxy_t)GET_CXY( xp );676 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );738 cxy_t client_cxy = GET_CXY( xp ); 739 rpc_desc_t * desc = GET_PTR( xp ); 677 740 678 741 // get pointer on attributes structure in client cluster from RPC descriptor … … 707 770 708 771 ///////////////////////////////////////////////////////////////////////////////////////// 709 // [7] 772 // [7] Marshaling functions attached to RPC_THREAD_KERNEL_CREATE (blocking) 710 773 ///////////////////////////////////////////////////////////////////////////////////////// 711 774 … … 735 798 rpc.args[2] = (uint64_t)(intptr_t)args; 736 799 737 // register RPC request in remote RPC fifo (blocking function)800 // register RPC request in remote RPC fifo 738 801 rpc_send( cxy , &rpc ); 739 802 … … 760 823 761 824 // get client cluster identifier and pointer on RPC descriptor 762 cxy_t client_cxy = (cxy_t)GET_CXY( xp );763 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );825 cxy_t client_cxy = GET_CXY( xp ); 826 rpc_desc_t * desc = GET_PTR( xp ); 764 827 765 828 // get attributes from RPC descriptor … … 785 848 786 849 ///////////////////////////////////////////////////////////////////////////////////////// 787 // [8] Marshaling functions attached to RPC_THREAD_KILL (blocking) 788 ///////////////////////////////////////////////////////////////////////////////////////// 789 790 ///////////////////////////////////////////// 791 void rpc_thread_kill_client( cxy_t cxy, 792 thread_t * thread ) // in 793 { 794 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 795 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 796 CURRENT_THREAD->core->lid , hal_time_stamp() ); 797 798 // this RPC can be called in local cluster 799 800 // initialise RPC descriptor header 801 rpc_desc_t rpc; 802 rpc.index = RPC_THREAD_KILL; 803 rpc.response = 1; 804 rpc.blocking = true; 805 806 // set input arguments in RPC descriptor 807 rpc.args[0] = (uint64_t)(intptr_t)thread; 808 809 // register RPC request in remote RPC fifo (blocking function) 810 rpc_send( cxy , &rpc ); 811 812 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 813 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 814 CURRENT_THREAD->core->lid , hal_time_stamp() ); 815 } 816 817 //////////////////////////////////////// 818 void rpc_thread_kill_server( xptr_t xp ) 819 { 820 rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n", 821 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 822 CURRENT_THREAD->core->lid , hal_time_stamp() ); 823 824 thread_t * thread; // local pointer on process descriptor 825 826 // get client cluster identifier and pointer on RPC descriptor 827 cxy_t client_cxy = (cxy_t)GET_CXY( xp ); 828 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp ); 829 830 // get attributes from RPC descriptor 831 thread = (thread_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) ); 832 833 // call local kernel function 834 thread_kill( thread ); 835 836 rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n", 837 __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy, 838 CURRENT_THREAD->core->lid , hal_time_stamp() ); 839 } 850 // [8] undefined slot 851 ///////////////////////////////////////////////////////////////////////////////////////// 840 852 841 853 … … 846 858 //////////////////////////////////////////////////// 847 859 void rpc_process_sigaction_client( cxy_t cxy, 848 rpc_desc_t * rpc_ptr ) 849 { 850 rpc_dmsg("\n[DBG] %s : enter to %s process %x in cluster %x / cycle %d\n", 851 __FUNCTION__ , process_action_str( (uint32_t)rpc_ptr->args[0] ) , 852 ((process_t *)(intptr_t)rpc_ptr->args[1])->pid , cxy , (uint32_t)hal_get_cycles() ); 853 854 // register RPC request in remote RPC fifo 855 rpc_send( cxy , rpc_ptr ); 856 857 rpc_dmsg("\n[DBG] %s : exit after %s process %x in cluster %x / cycle %d\n", 858 __FUNCTION__ , process_action_str( (uint32_t)rpc_ptr->args[0] ) , 859 ((process_t *)(intptr_t)rpc_ptr->args[1])->pid , cxy , (uint32_t)hal_get_cycles() ); 860 } 860 rpc_desc_t * rpc ) 861 { 862 863 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1) 864 uint32_t cycle = (uint32_t)hal_get_cycles(); 865 uint32_t action = rpc->args[0]; 866 pid_t pid = rpc->args[1]; 867 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 868 printk("\n[DBG] %s : enter to %s process %x in cluster %x / cycle %d\n", 869 __FUNCTION__ , process_action_str( action ) , pid , cxy , cycle ); 870 #endif 871 872 // check some RPC arguments 873 assert( (rpc->blocking == false) , __FUNCTION__ , "must be non-blocking\n"); 874 assert( (rpc->index == RPC_PROCESS_SIGACTION ) , __FUNCTION__ , "bad RPC index\n" ); 875 876 // register RPC request in remote RPC fifo and return 877 rpc_send( cxy , rpc ); 878 879 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1) 880 cycle = (uint32_t)hal_get_cycles(); 881 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 882 printk("\n[DBG] %s : exit after requesting to %s process %x in cluster %x / cycle %d\n", 883 __FUNCTION__ , process_action_str( action ) , pid , cxy , cycle ); 884 #endif 885 886 } // end rpc_process_sigaction_client() 861 887 862 888 ////////////////////////////////////////////// … … 864 890 { 865 891 pid_t pid; // target process identifier 866 process_t * process; // pointer on local process descriptor892 process_t * process; // pointer on local target process descriptor 867 893 uint32_t action; // sigaction index 868 thread_t * client_ ptr; // localpointer on client thread in client cluster894 thread_t * client_thread; // pointer on client thread in client cluster 869 895 cxy_t client_cxy; // client cluster identifier 870 xptr_t client_xp; // extended pointer on client thread871 core_t * client_core; // local pointer on core running the client thread872 rpc_desc_t * rpc; // local pointer on rpc descriptor in client cluster873 874 // get client cluster identifier and pointer on RPC descriptor 875 client_cxy = (cxy_t)GET_CXY( xp );876 rpc = (rpc_desc_t *)GET_PTR( xp );896 rpc_desc_t * rpc; // pointer on rpc descriptor in client cluster 897 xptr_t count_xp; // extended pointer on response counter 898 lid_t client_lid; // client core local index 899 900 // get client cluster identifier and pointer on RPC descriptor 901 client_cxy = GET_CXY( xp ); 902 rpc = GET_PTR( xp ); 877 903 878 904 // get arguments from RPC descriptor 879 action = (uint32_t) hal_remote_lwd( XPTR( client_cxy , &rpc->args[0] ) ); 880 pid = (pid_t) hal_remote_lwd( XPTR( client_cxy , &rpc->args[1] ) ); 881 client_ptr = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) ); 882 883 rpc_dmsg("\n[DBG] %s : enter to %s process %x / cycle %d\n", 884 __FUNCTION__ , process_action_str( action ) , pid , (uint32_t)hal_get_cycles() ); 905 action = (uint32_t)hal_remote_lwd( XPTR(client_cxy , &rpc->args[0]) ); 906 pid = (pid_t) hal_remote_lwd( XPTR(client_cxy , &rpc->args[1]) ); 907 908 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1) 909 uint32_t cycle = (uint32_t)hal_get_cycles(); 910 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 911 printk("\n[DBG] %s : enter to %s process %x in cluster %x / cycle %d\n", 912 __FUNCTION__ , process_action_str( action ) , pid , local_cxy , cycle ); 913 #endif 885 914 886 915 // get local process descriptor 887 process = process_get_local_copy( pid ); 888 889 // build extended pointer on client thread 890 client_xp = XPTR( client_cxy , client_ptr ); 916 process = cluster_get_local_process_from_pid( pid ); 891 917 892 918 // call relevant kernel function 893 if (action == DELETE_ALL_THREADS ) process_delete_threads ( process ); 894 else if (action == BLOCK_ALL_THREADS ) process_block_threads ( process ); 895 else if (action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process ); 919 if ( action == DELETE_ALL_THREADS ) process_delete_threads ( process ); 920 else if ( action == BLOCK_ALL_THREADS ) process_block_threads ( process ); 921 else if ( action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process ); 922 923 // build extended pointer on response counter in RPC 924 count_xp = XPTR( client_cxy , &rpc->response ); 896 925 897 926 // decrement the responses counter in RPC descriptor, 898 927 // unblock the client thread only if it is the last response. 899 if( hal_remote_atomic_add( XPTR( client_cxy , &rpc->response ), -1 ) == 1 )928 if( hal_remote_atomic_add( count_xp , -1 ) == 1 ) 900 929 { 901 client_core = (core_t *)hal_remote_lpt( XPTR( client_cxy , &client_ptr->core ) ); 902 thread_unblock( client_xp , THREAD_BLOCKED_RPC ); 903 dev_pic_send_ipi( client_cxy , client_core->lid ); 930 // get client thread pointer and client core lid 931 client_thread = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) ); 932 client_lid = (lid_t) hal_remote_lw ( XPTR( client_cxy , &rpc->lid ) ); 933 934 thread_unblock( XPTR( client_cxy , client_thread ) , THREAD_BLOCKED_RPC ); 935 dev_pic_send_ipi( client_cxy , client_lid ); 904 936 } 905 937 906 rpc_dmsg("\n[DBG] %s : exit after %s process %x / cycle %d\n", 907 __FUNCTION__ , process_action_str( action ) , pid , (uint32_t)hal_get_cycles() ); 908 } 909 910 ///////////////////////////////////////////////////////////////////////////////////////// 911 // [10] Marshaling functions attached to RPC_VFS_INODE_CREATE (blocking) 938 #if (CONFIG_DEBUG_PROCESS_SIGACTION & 1) 939 cycle = (uint32_t)hal_get_cycles(); 940 if( CONFIG_DEBUG_PROCESS_SIGACTION < cycle ) 941 printk("\n[DBG] %s : exit after %s process %x in cluster %x / cycle %d\n", 942 __FUNCTION__ , process_action_str( action ) , pid , local_cxy , cycle ); 943 #endif 944 945 } // end rpc_process_sigaction_server() 946 947 ///////////////////////////////////////////////////////////////////////////////////////// 948 // [10] Marshaling functions attached to RPC_VFS_INODE_CREATE (blocking) 912 949 ///////////////////////////////////////////////////////////////////////////////////////// 913 950 … … 947 984 rpc.args[7] = (uint64_t)gid; 948 985 949 // register RPC request in remote RPC fifo (blocking function)986 // register RPC request in remote RPC fifo 950 987 rpc_send( cxy , &rpc ); 951 988 … … 978 1015 979 1016 // get client cluster identifier and pointer on RPC descriptor 980 cxy_t client_cxy = (cxy_t)GET_CXY( xp );981 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );1017 cxy_t client_cxy = GET_CXY( xp ); 1018 rpc_desc_t * desc = GET_PTR( xp ); 982 1019 983 1020 // get input arguments from client rpc descriptor … … 1034 1071 rpc.args[0] = (uint64_t)(intptr_t)inode; 1035 1072 1036 // register RPC request in remote RPC fifo (blocking function)1073 // register RPC request in remote RPC fifo 1037 1074 rpc_send( cxy , &rpc ); 1038 1075 … … 1052 1089 1053 1090 // get client cluster identifier and pointer on RPC descriptor 1054 cxy_t client_cxy = (cxy_t)GET_CXY( xp );1055 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );1091 cxy_t client_cxy = GET_CXY( xp ); 1092 rpc_desc_t * desc = GET_PTR( xp ); 1056 1093 1057 1094 // get arguments "inode" from client RPC descriptor … … 1095 1132 rpc.args[2] = (uint64_t)(intptr_t)parent; 1096 1133 1097 // register RPC request in remote RPC fifo (blocking function)1134 // register RPC request in remote RPC fifo 1098 1135 rpc_send( cxy , &rpc ); 1099 1136 … … 1123 1160 1124 1161 // get client cluster identifier and pointer on RPC descriptor 1125 cxy_t client_cxy = (cxy_t)GET_CXY( xp );1126 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );1162 cxy_t client_cxy = GET_CXY( xp ); 1163 rpc_desc_t * desc = GET_PTR( xp ); 1127 1164 1128 1165 // get arguments "name", "type", and "parent" from client RPC descriptor … … 1173 1210 rpc.args[0] = (uint64_t)(intptr_t)dentry; 1174 1211 1175 // register RPC request in remote RPC fifo (blocking function)1212 // register RPC request in remote RPC fifo 1176 1213 rpc_send( cxy , &rpc ); 1177 1214 … … 1191 1228 1192 1229 // get client cluster identifier and pointer on RPC descriptor 1193 cxy_t client_cxy = (cxy_t)GET_CXY( xp );1194 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );1230 cxy_t client_cxy = GET_CXY( xp ); 1231 rpc_desc_t * desc = GET_PTR( xp ); 1195 1232 1196 1233 // get arguments "dentry" from client RPC descriptor … … 1233 1270 rpc.args[1] = (uint64_t)file_attr; 1234 1271 1235 // register RPC request in remote RPC fifo (blocking function)1272 // register RPC request in remote RPC fifo 1236 1273 rpc_send( cxy , &rpc ); 1237 1274 … … 1258 1295 1259 1296 // get client cluster identifier and pointer on RPC descriptor 1260 cxy_t client_cxy = (cxy_t)GET_CXY( xp );1261 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );1297 cxy_t client_cxy = GET_CXY( xp ); 1298 rpc_desc_t * desc = GET_PTR( xp ); 1262 1299 1263 1300 // get arguments "file_attr" and "inode" from client RPC descriptor … … 1302 1339 rpc.args[0] = (uint64_t)(intptr_t)file; 1303 1340 1304 // register RPC request in remote RPC fifo (blocking function)1341 // register RPC request in remote RPC fifo 1305 1342 rpc_send( cxy , &rpc ); 1306 1343 … … 1320 1357 1321 1358 // get client cluster identifier and pointer on RPC descriptor 1322 cxy_t client_cxy = (cxy_t)GET_CXY( xp );1323 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );1359 cxy_t client_cxy = GET_CXY( xp ); 1360 rpc_desc_t * desc = GET_PTR( xp ); 1324 1361 1325 1362 // get arguments "dentry" from client RPC descriptor … … 1362 1399 rpc.args[2] = (uint64_t)child_inode_xp; 1363 1400 1364 // register RPC request in remote RPC fifo (blocking function)1401 // register RPC request in remote RPC fifo 1365 1402 rpc_send( cxy , &rpc ); 1366 1403 … … 1388 1425 1389 1426 // get client cluster identifier and pointer on RPC descriptor 1390 cxy_t client_cxy = (cxy_t)GET_CXY( xp );1391 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );1427 cxy_t client_cxy = GET_CXY( xp ); 1428 rpc_desc_t * desc = GET_PTR( xp ); 1392 1429 1393 1430 // get arguments "parent", "name", and "child_xp" … … 1435 1472 rpc.args[0] = (uint64_t)(intptr_t)inode; 1436 1473 1437 // register RPC request in remote RPC fifo (blocking function)1474 // register RPC request in remote RPC fifo 1438 1475 rpc_send( cxy , &rpc ); 1439 1476 … … 1457 1494 1458 1495 // get client cluster identifier and pointer on RPC descriptor 1459 cxy_t client_cxy = (cxy_t)GET_CXY( xp );1460 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );1496 cxy_t client_cxy = GET_CXY( xp ); 1497 rpc_desc_t * desc = GET_PTR( xp ); 1461 1498 1462 1499 // get arguments "parent", "name", and "child_xp" … … 1529 1566 1530 1567 // get client cluster identifier and pointer on RPC descriptor 1531 cxy_t client_cxy = (cxy_t)GET_CXY( xp );1532 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );1568 cxy_t client_cxy = GET_CXY( xp ); 1569 rpc_desc_t * desc = GET_PTR( xp ); 1533 1570 1534 1571 // get input arguments … … 1576 1613 rpc.args[1] = (uint64_t)vaddr; 1577 1614 1578 // register RPC request in remote RPC fifo (blocking function)1615 // register RPC request in remote RPC fifo 1579 1616 rpc_send( cxy , &rpc ); 1580 1617 … … 1602 1639 1603 1640 // get client cluster identifier and pointer on RPC descriptor 1604 cxy_t client_cxy = (cxy_t)GET_CXY( xp );1605 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );1641 cxy_t client_cxy = GET_CXY( xp ); 1642 rpc_desc_t * desc = GET_PTR( xp ); 1606 1643 1607 1644 // get input argument from client RPC descriptor … … 1653 1690 rpc.args[2] = (uint64_t)cow; 1654 1691 1655 // register RPC request in remote RPC fifo (blocking function)1692 // register RPC request in remote RPC fifo 1656 1693 rpc_send( cxy , &rpc ); 1657 1694 … … 1681 1718 1682 1719 // get client cluster identifier and pointer on RPC descriptor 1683 cxy_t client_cxy = (cxy_t)GET_CXY( xp );1684 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );1720 cxy_t client_cxy = GET_CXY( xp ); 1721 rpc_desc_t * desc = GET_PTR( xp ); 1685 1722 1686 1723 // get input argument "process" & "vpn" from client RPC descriptor … … 1726 1763 rpc.args[0] = (uint64_t)kmem_type; 1727 1764 1728 // register RPC request in remote RPC fifo (blocking function)1765 // register RPC request in remote RPC fifo 1729 1766 rpc_send( cxy , &rpc ); 1730 1767 … … 1745 1782 1746 1783 // get client cluster identifier and pointer on RPC descriptor 1747 cxy_t client_cxy = (cxy_t)GET_CXY( xp );1748 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );1784 cxy_t client_cxy = GET_CXY( xp ); 1785 rpc_desc_t * desc = GET_PTR( xp ); 1749 1786 1750 1787 // get input argument "kmem_type" from client RPC descriptor … … 1791 1828 rpc.args[1] = (uint64_t)kmem_type; 1792 1829 1793 // register RPC request in remote RPC fifo (blocking function)1830 // register RPC request in remote RPC fifo 1794 1831 rpc_send( cxy , &rpc ); 1795 1832 … … 1807 1844 1808 1845 // get client cluster identifier and pointer on RPC descriptor 1809 cxy_t client_cxy = (cxy_t)GET_CXY( xp );1810 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );1846 cxy_t client_cxy = GET_CXY( xp ); 1847 rpc_desc_t * desc = GET_PTR( xp ); 1811 1848 1812 1849 // get input arguments "buf" and "kmem_type" from client RPC descriptor … … 1859 1896 rpc.args[5] = (uint64_t)size; 1860 1897 1861 // register RPC request in remote RPC fifo (blocking function)1898 // register RPC request in remote RPC fifo 1862 1899 rpc_send( cxy , &rpc ); 1863 1900 … … 1887 1924 1888 1925 // get client cluster identifier and pointer on RPC descriptor 1889 cxy_t client_cxy = (cxy_t)GET_CXY( xp );1890 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );1926 cxy_t client_cxy = GET_CXY( xp ); 1927 rpc_desc_t * desc = GET_PTR( xp ); 1891 1928 1892 1929 // get arguments from client RPC descriptor … … 1953 1990 rpc.args[1] = (uint64_t)index; 1954 1991 1955 // register RPC request in remote RPC fifo (blocking function)1992 // register RPC request in remote RPC fifo 1956 1993 rpc_send( cxy , &rpc ); 1957 1994 … … 1972 2009 1973 2010 // get client cluster identifier and pointer on RPC descriptor 1974 cxy_t cxy = (cxy_t)GET_CXY( xp );1975 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );2011 cxy_t cxy = GET_CXY( xp ); 2012 rpc_desc_t * desc = GET_PTR( xp ); 1976 2013 1977 2014 // get input arguments from client RPC descriptor … … 2028 2065 rpc.args[7] = (uint64_t)vseg_cxy; 2029 2066 2030 // register RPC request in remote RPC fifo (blocking function)2067 // register RPC request in remote RPC fifo 2031 2068 rpc_send( cxy , &rpc ); 2032 2069 … … 2047 2084 2048 2085 // get client cluster identifier and pointer on RPC descriptor 2049 cxy_t cxy = (cxy_t)GET_CXY( xp );2050 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );2086 cxy_t cxy = GET_CXY( xp ); 2087 rpc_desc_t * desc = GET_PTR( xp ); 2051 2088 2052 2089 // get input arguments from client RPC descriptor … … 2101 2138 rpc.args[0] = (uint64_t)lid; 2102 2139 2103 // register RPC request in remote RPC fifo (blocking function)2140 // register RPC request in remote RPC fifo 2104 2141 rpc_send( cxy , &rpc ); 2105 2142 … … 2117 2154 2118 2155 // get client cluster identifier and pointer on RPC descriptor 2119 cxy_t cxy = (cxy_t)GET_CXY( xp );2120 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );2156 cxy_t cxy = GET_CXY( xp ); 2157 rpc_desc_t * desc = GET_PTR( xp ); 2121 2158 2122 2159 // get input arguments from client RPC descriptor … … 2154 2191 rpc.args[0] = (uint64_t)(intptr_t)process; 2155 2192 2156 // register RPC request in remote RPC fifo (blocking function)2193 // register RPC request in remote RPC fifo 2157 2194 rpc_send( cxy , &rpc ); 2158 2195 … … 2172 2209 2173 2210 // get client cluster identifier and pointer on RPC descriptor 2174 cxy_t cxy = (cxy_t)GET_CXY( xp );2175 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );2211 cxy_t cxy = GET_CXY( xp ); 2212 rpc_desc_t * desc = GET_PTR( xp ); 2176 2213 2177 2214 // get input arguments from client RPC descriptor … … 2211 2248 rpc.args[1] = (uint64_t)detailed; 2212 2249 2213 // register RPC request in remote RPC fifo (blocking function)2250 // register RPC request in remote RPC fifo 2214 2251 rpc_send( cxy , &rpc ); 2215 2252 … … 2230 2267 2231 2268 // get client cluster identifier and pointer on RPC descriptor 2232 cxy_t cxy = (cxy_t)GET_CXY( xp );2233 rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );2269 cxy_t cxy = GET_CXY( xp ); 2270 rpc_desc_t * desc = GET_PTR( xp ); 2234 2271 2235 2272 // get input arguments from client RPC descriptor -
trunk/kernel/kern/rpc.h
r435 r436 48 48 struct mapper_s; 49 49 50 50 51 /**********************************************************************************/ 51 52 /************** structures for Remote Procedure Calls ****************************/ … … 67 68 RPC_THREAD_USER_CREATE = 6, 68 69 RPC_THREAD_KERNEL_CREATE = 7, 69 RPC_ THREAD_KILL= 8,70 RPC_UNDEFINED_8 = 8, 70 71 RPC_PROCESS_SIGACTION = 9, 71 72 … … 288 289 289 290 /*********************************************************************************** 290 * [8] The RPC_THREAD_KILL ask a target cluster to kill a given thread descriptor. 291 * It is called by the sys_thread_cancel() function for a remote thread. 292 *********************************************************************************** 293 * @ cxy : server cluster identifier. 294 * @ thread : [in] local pointer on target process descriptor in server. 295 **********************************************************************************/ 296 void rpc_thread_kill_client( cxy_t cxy, 297 struct thread_s * thread ); 298 299 void rpc_thread_kill_server( xptr_t xp ); 300 301 /*********************************************************************************** 302 * [9] The RPC_PROCESS_SIGACTION allows the owner cluster to request any other 303 * cluster to execute a given sigaction (BLOCK / UNBLOCK / DELETE) for all 304 * threads of a given process. 291 * [8] undefined slot 292 **********************************************************************************/ 293 294 /*********************************************************************************** 295 * [9] The RPC_PROCESS_SIGACTION allows a thread running in any cluster 296 * to request a cluster identified by the <cxy> argument (local or remote) 297 * to execute a given sigaction for a given cluster. The <action_type> and 298 * the <pid> arguments are defined in the shared RPC descriptor, that must be 299 * initialised by the client thread. 305 300 * 306 301 * WARNING : It is implemented as a NON BLOCKING multicast RPC, that can be sent 307 * in parallel to all process copies. The rpc descriptor is allocated in the client 308 * thread stack by the process_sigaction() function. The various server threads 309 * must decrement the responses counter defined in the rsp descriptor, and the last 310 * server thread unblock the client thread that blocked (after sending all RPC 311 * requests) in the process_sigaction() function. 312 * - The first RPC argument is the sigaction type (BLOCK / UNBLOCK / DELETE). 313 * - The second RPC argument is the local pointer on target process. 314 *********************************************************************************** 315 * @ cxy : server cluster identifier. 316 * @ rpc_ptr : [in] local pointer on rpc descriptor in client cluster. 317 **********************************************************************************/ 318 void rpc_process_sigaction_client( cxy_t cxy, 319 rpc_desc_t * rpc_ptr ); 302 * in parallel to all process copies. The various RPC server threads atomically 303 * decrement the <response> field in the shared RPC descriptor. 304 * The last server thread unblock the client thread that blocked (after sending 305 * all RPC requests) in the process_sigaction() function. 306 *********************************************************************************** 307 * @ cxy : server cluster identifier. 308 * @ rpc : pointer on ishared RPC descriptor initialized by the client thread. 309 **********************************************************************************/ 310 void rpc_process_sigaction_client( cxy_t cxy, 311 struct rpc_desc_s * rpc ); 320 312 321 313 void rpc_process_sigaction_server( xptr_t xp ); -
trunk/kernel/kern/scheduler.c
r435 r436 286 286 next = sched_select( sched ); 287 287 288 // check next thread kernel_stack overflow 289 assert( (next->signature == THREAD_SIGNATURE), 290 __FUNCTION__ , "kernel stack overflow for thread %x\n", next ); 291 288 292 // check next thread attached to same core as the calling thread 289 assert( (next->core == current->core), __FUNCTION__ ,290 "next core != current core\n");293 assert( (next->core == current->core), 294 __FUNCTION__ , "next core %x != current core %x\n", next->core, current->core ); 291 295 292 296 // check next thread not blocked when type != IDLE … … 327 331 { 328 332 329 #if ( CONFIG_DEBUG_SCHED_YIELD & 0x1)333 #if (CONFIG_DEBUG_SCHED_YIELD & 1) 330 334 uint32_t cycle = (uint32_t)hal_get_cycles(); 331 335 if( CONFIG_DEBUG_SCHED_YIELD < cycle ) … … 354 358 uint32_t save_sr; 355 359 356 if( lid >= LOCAL_CLUSTER->cores_nr ) 357 { 358 printk("\n[ERROR] in %s : illegal local index %d in cluster %x\n", 359 __FUNCTION__ , lid , local_cxy ); 360 return; 361 } 360 assert( (lid < LOCAL_CLUSTER->cores_nr), __FUNCTION__, "illegal core index %d\n", lid); 362 361 363 362 core_t * core = &LOCAL_CLUSTER->core_tbl[lid]; -
trunk/kernel/kern/scheduler.h
r433 r436 49 49 struct thread_s * idle; /*! pointer on idle thread */ 50 50 struct thread_s * current; /*! pointer on current running thread */ 51 bool_t req_ack_pending; /*! signal_handller must be called when true*/51 volatile bool_t req_ack_pending; /*! sequencialize ack requests when true */ 52 52 } 53 53 scheduler_t; -
trunk/kernel/kern/thread.c
r433 r436 799 799 } // end thread_check_sched() 800 800 801 ///////////////////////////////////// 802 void thread_block( thread_t * thread, 803 uint32_t cause ) 804 { 801 ////////////////////////////////////// 802 void thread_block( xptr_t thread_xp, 803 uint32_t cause ) 804 { 805 // get thread cluster and local pointer 806 cxy_t cxy = GET_CXY( thread_xp ); 807 thread_t * ptr = GET_PTR( thread_xp ); 808 805 809 // set blocking cause 806 hal_ atomic_or( &thread->blocked, cause );810 hal_remote_atomic_or( XPTR( cxy , &ptr->blocked ) , cause ); 807 811 hal_fence(); 808 812 … … 810 814 uint32_t cycle = (uint32_t)hal_get_cycles(); 811 815 if( CONFIG_DEBUG_THREAD_BLOCK < cycle ) 812 printk("\n[DBG] %s : thread %x blocked thread %x / cause %x / state %x / cycle %d\n", 813 __FUNCTION__ , CURRENT_THREAD , thread , cause , thread->blocked , cycle ); 816 printk("\n[DBG] %s : thread %x blocked thread %x / cause %x / cycle %d\n", 817 __FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle ); 818 #endif 819 820 #if (CONFIG_DEBUG_THREAD_BLOCK & 1) 821 if( CONFIG_DEBUG_THREAD_BLOCK < cycle ) 822 sched_display( ptr->core->lid ); 814 823 #endif 815 824 … … 831 840 uint32_t cycle = (uint32_t)hal_get_cycles(); 832 841 if( CONFIG_DEBUG_THREAD_BLOCK < cycle ) 833 printk("\n[DBG] %s : thread %x unblocked thread %x / cause %x / state %x / cycle %d\n", 834 __FUNCTION__ , CURRENT_THREAD , ptr , cause , ptr->blocked , cycle ); 842 printk("\n[DBG] %s : thread %x unblocked thread %x / cause %x / cycle %d\n", 843 __FUNCTION__ , CURRENT_THREAD , ptr , cause , cycle ); 844 #endif 845 846 #if (CONFIG_DEBUG_THREAD_BLOCK & 1) 847 if( CONFIG_DEBUG_THREAD_BLOCK < cycle ) 848 sched_display( ptr->core->lid ); 835 849 #endif 836 850 … … 840 854 } // end thread_unblock() 841 855 842 ///////////////////////////////////// 843 void thread_kill( thread_t * target ) 844 { 845 volatile uint32_t rsp_count = 1; // responses counter 846 847 thread_t * killer = CURRENT_THREAD; 856 //////////////////////////////////// 857 void thread_kill( xptr_t target_xp, 858 bool_t is_exit, 859 bool_t is_forced ) 860 { 861 reg_t save_sr; // for critical section 862 bool_t attached; // target thread in attached mode 863 bool_t join_done; // joining thread arrived first 864 xptr_t killer_xp; // extended pointer on killer thread (this) 865 thread_t * killer_ptr; // pointer on killer thread (this) 866 cxy_t target_cxy; // target thread cluster 867 thread_t * target_ptr; // pointer on target thread 868 xptr_t joining_xp; // extended pointer on joining thread 869 thread_t * joining_ptr; // pointer on joining thread 870 cxy_t joining_cxy; // joining thread cluster 871 pid_t target_pid; // target process PID 872 cxy_t owner_cxy; // target process owner cluster 873 trdid_t target_trdid; // target thread identifier 874 ltid_t target_ltid; // target thread local index 875 xptr_t process_state_xp; // extended pointer on <term_state> in process 876 877 xptr_t target_flags_xp; // extended pointer on target thread <flags> 878 xptr_t target_join_lock_xp; // extended pointer on target thread <join_lock> 879 xptr_t target_join_xp_xp; // extended pointer on target thread <join_xp> 880 xptr_t target_process_xp; // extended pointer on target thread <process> 881 882 process_t * target_process; // pointer on target thread process 883 884 // get target thread cluster and pointer 885 target_cxy = GET_CXY( target_xp ); 886 target_ptr = GET_PTR( target_xp ); 887 888 // get killer thread pointers 889 killer_ptr = CURRENT_THREAD; 890 killer_xp = XPTR( local_cxy , killer_ptr ); 848 891 849 892 #if CONFIG_DEBUG_THREAD_KILL … … 851 894 if( CONFIG_DEBUG_THREAD_KILL < cycle ) 852 895 printk("\n[DBG] %s : thread %x enter for target thread %x / cycle %d\n", 853 __FUNCTION__, killer, target, cycle ); 854 #endif 855 856 // set the global blocked bit in target thread descriptor. 857 thread_block( target , THREAD_BLOCKED_GLOBAL ); 858 859 // request target scheduler to deschedule the target thread 860 // when killer thread is not running on same core as target thread 861 if( killer->core->lid != target->core->lid ) 862 { 863 // set signal in target thread descriptor and in target scheduler 864 thread_set_req_ack( target , (void *)(&rsp_count) ); 865 866 // send an IPI to the target thread core. 867 dev_pic_send_ipi( local_cxy , target->core->lid ); 868 869 // poll the response 870 while( 1 ) 896 __FUNCTION__, killer_ptr, target_ptr, cycle ); 897 #endif 898 899 // block the target thread 900 thread_block( target_xp , THREAD_BLOCKED_GLOBAL ); 901 902 // get target thread attached mode 903 target_flags_xp = XPTR( target_cxy , &target_ptr->flags ); 904 attached = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_DETACHED) == 0); 905 906 // synchronize with the joining thread 907 // if the target thread is attached && not forced 908 909 if( attached && (is_forced == false) ) 910 { 911 // build extended pointers on target thread join fields 912 target_join_lock_xp = XPTR( target_cxy , &target_ptr->join_lock ); 913 target_join_xp_xp = XPTR( target_cxy , &target_ptr->join_xp ); 914 915 // enter critical section 916 hal_disable_irq( &save_sr ); 917 918 // take the join_lock in target thread descriptor 919 remote_spinlock_lock( target_join_lock_xp ); 920 921 // get join_done from target thread descriptor 922 join_done = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0); 923 924 if( join_done ) // joining thread arrived first 871 925 { 872 // exit when response received from scheduler 873 if( rsp_count == 0 ) break; 874 875 // deschedule without blocking 876 hal_fixed_delay( 1000 ); 926 // get extended pointer on joining thread 927 joining_xp = (xptr_t)hal_remote_lwd( target_join_xp_xp ); 928 joining_ptr = GET_PTR( joining_xp ); 929 joining_cxy = GET_CXY( joining_xp ); 930 931 // reset the join_done flag in target thread 932 hal_remote_atomic_and( target_flags_xp , ~THREAD_FLAG_JOIN_DONE ); 933 934 // unblock the joining thread 935 thread_unblock( joining_xp , THREAD_BLOCKED_JOIN ); 936 937 // release the join_lock in target thread descriptor 938 remote_spinlock_unlock( target_join_lock_xp ); 939 940 // restore IRQs 941 hal_restore_irq( save_sr ); 877 942 } 878 } 879 880 // set REQ_DELETE flag 881 hal_atomic_or( &target->flags , THREAD_FLAG_REQ_DELETE ); 943 else // this thread arrived first 944 { 945 // set the kill_done flag in target thread 946 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE ); 947 948 // block this thread on BLOCKED_JOIN 949 thread_block( killer_xp , THREAD_BLOCKED_JOIN ); 950 951 // set extended pointer on killer thread in target thread 952 hal_remote_swd( target_join_xp_xp , killer_xp ); 953 954 // release the join_lock in target thread descriptor 955 remote_spinlock_unlock( target_join_lock_xp ); 956 957 // deschedule 958 sched_yield( "killer thread wait joining thread" ); 959 960 // restore IRQs 961 hal_restore_irq( save_sr ); 962 } 963 } // end if attached 964 965 // - if the target thread is the main thread 966 // => synchronize with the parent process main thread 967 // - if the target thread is not the main thread 968 // => simply mark the target thread for delete 969 970 // get pointer on target thread process 971 target_process_xp = XPTR( target_cxy , &target_ptr->process ); 972 target_process = (process_t *)hal_remote_lpt( target_process_xp ); 973 974 // get target process owner cluster 975 target_pid = hal_remote_lw( XPTR( target_cxy , &target_process->pid ) ); 976 owner_cxy = CXY_FROM_PID( target_pid ); 977 978 // get target thread local index 979 target_trdid = hal_remote_lw( XPTR( target_cxy , &target_ptr->trdid ) ); 980 target_ltid = LTID_FROM_TRDID( target_trdid ); 981 982 if( (owner_cxy == target_cxy) && (target_ltid == 0) ) // main thread 983 { 984 // get extended pointer on term_state in target process owner cluster 985 process_state_xp = XPTR( owner_cxy , &target_process->term_state ); 986 987 // set termination info in target process owner 988 if( is_exit ) hal_remote_atomic_or( process_state_xp , PROCESS_TERM_EXIT ); 989 else hal_remote_atomic_or( process_state_xp , PROCESS_TERM_KILL ); 882 990 883 991 #if CONFIG_DEBUG_THREAD_KILL 884 992 cycle = (uint32_t)hal_get_cycles; 885 993 if( CONFIG_DEBUG_THREAD_KILL < cycle ) 886 printk("\n[DBG] %s : thread %x exit for target thread %x / cycle %d\n", 887 __FUNCTION__, killer, target, cycle ); 888 #endif 994 printk("\n[DBG] %s : thread %x exit for thread %x / main thread / cycle %d\n", 995 __FUNCTION__, killer_ptr, target_ptr, cycle ); 996 #endif 997 998 } 999 else // main thread 1000 { 1001 // set the REQ_DELETE flag in target thread descriptor 1002 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE ); 1003 1004 #if CONFIG_DEBUG_THREAD_KILL 1005 cycle = (uint32_t)hal_get_cycles; 1006 if( CONFIG_DEBUG_THREAD_KILL < cycle ) 1007 printk("\n[DBG] %s : thread %x exit for thread %x / not the main thread / cycle %d\n", 1008 __FUNCTION__, killer_ptr, target_ptr, cycle ); 1009 #endif 1010 1011 } 889 1012 890 1013 } // end thread_kill() … … 958 1081 target_thread_ltid = LTID_FROM_TRDID( trdid ); 959 1082 1083 // check trdid argument 1084 if( (target_thread_ltid >= CONFIG_THREAD_MAX_PER_CLUSTER) || 1085 cluster_is_undefined( target_cxy ) ) return XPTR_NULL; 1086 960 1087 // get root of list of process descriptors in target cluster 961 1088 hal_remote_memcpy( XPTR( local_cxy , &root ), … … 987 1114 remote_spinlock_unlock( lock_xp ); 988 1115 989 // check target thread found 990 if( found == false ) 991 { 992 return XPTR_NULL; 993 } 1116 // check PID found 1117 if( found == false ) return XPTR_NULL; 994 1118 995 1119 // get target thread local pointer … … 997 1121 target_thread_ptr = (thread_t *)hal_remote_lpt( xp ); 998 1122 999 if( target_thread_ptr == NULL ) 1000 { 1001 return XPTR_NULL; 1002 } 1123 if( target_thread_ptr == NULL ) return XPTR_NULL; 1003 1124 1004 1125 return XPTR( target_cxy , target_thread_ptr ); -
trunk/kernel/kern/thread.h
r428 r436 70 70 71 71 #define THREAD_FLAG_DETACHED 0x0001 /*! This thread is detached from parent */ 72 #define THREAD_FLAG_JOIN_DONE 0x0002 /*! Parent thread made a join */ 73 #define THREAD_FLAG_SCHED 0x0004 /*! Scheduling required for this thread */ 74 #define THREAD_FLAG_REQ_ACK 0x0008 /*! Acknowledge required from scheduler */ 75 #define THREAD_FLAG_REQ_DELETE 0x0010 /*! Destruction required from scheduler */ 72 #define THREAD_FLAG_JOIN_DONE 0x0002 /*! Parent thread made a join request */ 73 #define THREAD_FLAG_KILL_DONE 0x0004 /*! This thread received a kill request */ 74 #define THREAD_FLAG_SCHED 0x0008 /*! Scheduling required for this thread */ 75 #define THREAD_FLAG_REQ_ACK 0x0010 /*! Acknowledge required from scheduler */ 76 #define THREAD_FLAG_REQ_DELETE 0x0020 /*! Destruction required from scheduler */ 76 77 77 78 /*************************************************************************************** … … 88 89 #define THREAD_BLOCKED_USERSYNC 0x0100 /*! thread wait (cond/mutex/barrier) */ 89 90 #define THREAD_BLOCKED_RPC 0x0200 /*! thread wait RPC completion */ 90 #define THREAD_BLOCKED_ DEV_ISR0x0400 /*! thread DEV wait ISR */91 #define THREAD_BLOCKED_ISR 0x0400 /*! thread DEV wait ISR */ 91 92 #define THREAD_BLOCKED_WAIT 0x0800 /*! thread parent wait child termination */ 92 93 … … 153 154 154 155 remote_spinlock_t join_lock; /*! lock protecting the join/exit */ 155 void * join_value; /*! exit_value used in case of join */ 156 xptr_t join_xp; /*! extended pointer on joining thread */ 156 xptr_t join_xp; /*! joining/killer thread extended pointer */ 157 157 158 158 uint32_t * ack_rsp_count; /*! pointer on acknowledge response counter */ … … 386 386 387 387 /*************************************************************************************** 388 * This function is called to handle the "pthread_cancel" system call. 389 * It allows a killer thread to kill one single target thread. 390 * The killer thread must be running in the same cluster as the target thread. 391 * If not, the client thread must use the RPC_THREAD_KILL. 392 * - When the killer thread is running on the same core as the target thread, 393 * this function simply set the BLOCKED_ GLOBAL bit and the REQ_DELETE flag 394 * in the target thread descriptor and return. 395 * - When the killer thread is running on a different core than the target thread, 396 * the killer set the BLOCKED_GLOBAL bit and the REQ_ACK flag in target thread, 397 * to ask the scheduler to confirm that the target is blocked and not running. 398 * Then, it set the REQ_DELETE flag in the target thread and return. 399 * In both cases, the actual target thread destruction is done by the scheduler 400 * at the next scheduling point. 401 *************************************************************************************** 402 * @ thread : local pointer on the target thread. 403 **************************************************************************************/ 404 void thread_kill( thread_t * thread ); 405 406 /*************************************************************************************** 407 * This function registers a blocking cause in the target thread "blocked" bit vector. 408 * Warning : this function does not deschedule the calling thread, and the descheduling 388 * This function is called to handle the four pthread_cancel(), pthread_exit(), 389 * kill() and exit() system calls. It kills a "target" thread identified by the 390 * <thread_xp> argument. The "killer" thread can be the "target" thread, when the 391 * <is_exit> argument is true. The "killer" thread can run in any cluster, 392 * as it uses remote accesses. 393 * If the "target" thread is running in "attached" mode, and the <is_forced> argument 394 * is false, this function implements the required sychronisation with the joining 395 * thread, blocking the "killer" thread until the pthread_join() syscall is executed. 396 * To delete the target thread, this function sets the THREAD_FLAG_REQ_DELETE bit 397 * and the THREAD BLOCKED_GLOBAL bit in the target thread, and the actual destruction 398 * is asynchronously done by the scheduler at the next scheduling point. 399 *************************************************************************************** 400 * @ thread_xp : extended pointer on the target thread. 401 * @ is_exit : the killer thread is the target thread itself. 402 * @ is_forced : the killing does not depends on the attached mode. 403 **************************************************************************************/ 404 void thread_kill( xptr_t thread_xp, 405 bool_t is_exit, 406 bool_t is_forced ); 407 408 /*************************************************************************************** 409 * This function registers a blocking cause defined by the <cause> argument 410 * in a remote thread descriptor identified by the <thread_xp> argument. 411 * We need an extended pointer, because this function can be called by another thread 412 * than the target thread, executing the sys_kill() function. 413 * WARNING : this function does not deschedule the target thread, and the descheduling 409 414 * must be explicitely forced by a sched_yield(). 410 415 *************************************************************************************** 411 * @ thread : local pointer on targetthread descriptor.412 * @ cause : mask defining the cause (one hot).413 **************************************************************************************/ 414 void thread_block( thread_t * thread,415 uint32_t 416 417 /*************************************************************************************** 418 * This function resets the bit identified by the cause argument in the "blocked"419 * bit vector of a remote thread descriptor, using an atomic access.416 * @ thread_xp : extended pointer on remote thread descriptor. 417 * @ cause : mask defining the cause (one hot). 418 **************************************************************************************/ 419 void thread_block( xptr_t thread_xp, 420 uint32_t cause ); 421 422 /*************************************************************************************** 423 * This function resets the bit identified by the <cause> argument in a remote 424 * thread descriptor identified by the <thread_xp> argument. 420 425 * We need an extended pointer, because the client thread of an I/O operation on a 421 426 * given device is not in the same cluster as the associated device descriptor. 422 * W arning: this function does not reschedule the remote thread.427 * WARNING : this function does not reschedule the remote thread. 423 428 * The scheduling can be forced by sending an IPI to the core running the remote thread. 424 429 *************************************************************************************** 425 * @ thread : extended pointer onthe remote thread.426 * @ cause : mask defining the cause (one hot).430 * @ thread_xp : extended pointer the remote thread. 431 * @ cause : mask defining the cause (one hot). 427 432 * @ return non zero if the bit-vector was actually modified / return 0 otherwise 428 433 **************************************************************************************/ 429 uint32_t thread_unblock( xptr_t thread ,434 uint32_t thread_unblock( xptr_t thread_xp, 430 435 uint32_t cause ); 431 436 … … 449 454 450 455 /*************************************************************************************** 451 * This function handles all pending signals for the thread identified by the <thread>452 * argument. It is called each time the core exits the kernel, after handling an453 * interrupt, exception or syscall.454 * TODO This function is not implemented.455 ***************************************************************************************456 * @ thread : local pointer on target thread.457 **************************************************************************************/458 void thread_signals_handle( thread_t * thread );459 460 /***************************************************************************************461 456 * This function returns the extended pointer on a thread descriptor identified 462 457 * by its thread identifier, and process identifier.
Note: See TracChangeset
for help on using the changeset viewer.