Changeset 564 for trunk/kernel/kern/thread.c
- Timestamp:
- Oct 4, 2018, 11:47:36 PM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/thread.c
r531 r564 1 1 /* 2 * thread.c - implementation of thread operations(user & kernel)2 * thread.c - thread operations implementation (user & kernel) 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017 )5 * Alain Greiner (2016,2017,2018) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 48 48 ////////////////////////////////////////////////////////////////////////////////////// 49 49 50 extern process_t process_zero; 50 extern process_t process_zero; // allocated in kernel_init.c 51 extern char * lock_type_str[]; // allocated in kernel_init.c 52 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 51 53 52 54 ////////////////////////////////////////////////////////////////////////////////////// … … 145 147 cluster_t * local_cluster = LOCAL_CLUSTER; 146 148 147 #if DEBUG_THREAD_ USER_INIT149 #if DEBUG_THREAD_INIT 148 150 uint32_t cycle = (uint32_t)hal_get_cycles(); 149 if( DEBUG_THREAD_USER_INIT < cycle ) 150 printk("\n[DBG] %s : thread %x enter to init thread %x in process %x / cycle %d\n", 151 __FUNCTION__, CURRENT_THREAD, thread, process->pid , cycle ); 152 #endif 153 154 // register new thread in process descriptor, and get a TRDID 155 thread->type = type; // needed by process_register_thread. 156 error = process_register_thread( process, thread , &trdid ); 157 158 if( error ) 159 { 160 printk("\n[ERROR] in %s : cannot get TRDID\n", __FUNCTION__ ); 161 return EINVAL; 162 } 151 if( DEBUG_THREAD_INIT < cycle ) 152 printk("\n[DBG] %s : thread %x in process %x enter fot thread %x in process %x / cycle %d\n", 153 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 154 thread, process->pid , cycle ); 155 #endif 163 156 164 157 // compute thread descriptor size without kernel stack … … 166 159 167 160 // Initialize new thread descriptor 168 thread->trdid = trdid;161 thread->type = type; 169 162 thread->quantum = 0; // TODO 170 163 thread->ticks_nr = 0; // TODO … … 173 166 thread->process = process; 174 167 175 thread->local_locks = 0; 176 thread->remote_locks = 0; 177 178 #if CONFIG_LOCKS_DEBUG 179 list_root_init( &thread->locks_root ); 180 xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) ); 168 thread->busylocks = 0; 169 170 #if DEBUG_BUSYLOCK 171 xlist_root_init( XPTR( local_cxy , &thread->busylocks_root ) ); 181 172 #endif 182 173 … … 194 185 thread->blocked = THREAD_BLOCKED_GLOBAL; 195 186 196 // reset sched list 187 // register new thread in process descriptor, and get a TRDID 188 error = process_register_thread( process, thread , &trdid ); 189 190 if( error ) 191 { 192 printk("\n[ERROR] in %s : cannot get TRDID\n", __FUNCTION__ ); 193 return EINVAL; 194 } 195 196 // initialize trdid 197 thread->trdid = trdid; 198 199 // initialize sched list 197 200 list_entry_init( &thread->sched_list ); 198 201 199 // reset thread info 202 // initialize waiting queue entries 203 list_entry_init( &thread->wait_list ); 204 xlist_entry_init( XPTR( local_cxy , &thread->wait_xlist ) ); 205 206 // initialize thread info 200 207 memset( &thread->info , 0 , sizeof(thread_info_t) ); 201 208 202 // initialize sjoin_lock203 remote_ spinlock_init( XPTR( local_cxy , &thread->join_lock ));209 // initialize join_lock 210 remote_busylock_init( XPTR( local_cxy , &thread->join_lock ), LOCK_THREAD_JOIN ); 204 211 205 212 // initialise signature … … 216 223 dqdt_update_threads( 1 ); 217 224 218 #if DEBUG_THREAD_ USER_INIT225 #if DEBUG_THREAD_INIT 219 226 cycle = (uint32_t)hal_get_cycles(); 220 if( DEBUG_THREAD_USER_INIT < cycle ) 221 printk("\n[DBG] %s : thread %x exit after init of thread %x in process %x / cycle %d\n", 222 __FUNCTION__, CURRENT_THREAD, thread, process->pid , cycle ); 227 if( DEBUG_THREAD_INIT < cycle ) 228 printk("\n[DBG] %s : thread %x in process %x exit for thread %x in process %x / cycle %d\n", 229 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 230 thread, process->pid , cycle ); 223 231 #endif 224 232 … … 436 444 args = (void *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_args )); 437 445 base = (intptr_t)hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->u_stack_base )); 438 size = (uint32_t)hal_remote_l w( XPTR( parent_cxy , &parent_ptr->u_stack_size ));439 flags = hal_remote_l w( XPTR( parent_cxy , &parent_ptr->flags ));446 size = (uint32_t)hal_remote_l32 ( XPTR( parent_cxy , &parent_ptr->u_stack_size )); 447 flags = hal_remote_l32 ( XPTR( parent_cxy , &parent_ptr->flags )); 440 448 uzone = (reg_t *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->uzone_current )); 441 449 … … 474 482 } 475 483 484 #if (DEBUG_THREAD_USER_FORK & 1) 485 if( DEBUG_THREAD_USER_FORK < cycle ) 486 printk("\n[DBG] %s : thread %x in process %x / initialised thread %x in process %x\n", 487 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 488 child_ptr->trdid, child_process->pid ); 489 #endif 490 476 491 // return child pointer 477 492 *child_thread = child_ptr; … … 502 517 } 503 518 504 // create and initialize STACK vseg 519 #if (DEBUG_THREAD_USER_FORK & 1) 520 if( DEBUG_THREAD_USER_FORK < cycle ) 521 printk("\n[DBG] %s : thread %x in process %x / created CPU & FPU contexts\n", 522 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid ); 523 #endif 524 525 // create and initialize STACK vseg 505 526 vseg = vseg_alloc(); 506 527 vseg_init( vseg, … … 514 535 515 536 // register STACK vseg in local child VSL 516 vseg_attach( &child_process->vmm , vseg ); 537 vmm_vseg_attach( &child_process->vmm , vseg ); 538 539 #if (DEBUG_THREAD_USER_FORK & 1) 540 if( DEBUG_THREAD_USER_FORK < cycle ) 541 printk("\n[DBG] %s : thread %x in process %x / created stack vseg\n", 542 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid ); 543 #endif 517 544 518 545 // copy all valid STACK GPT entries … … 530 557 if( error ) 531 558 { 532 v seg_detach(vseg );559 vmm_vseg_detach( &child_process->vmm , vseg ); 533 560 vseg_free( vseg ); 534 561 thread_release( child_ptr ); … … 549 576 xptr_t lock_xp = XPTR( page_cxy , &page_ptr->lock ); 550 577 551 // increment the forks counter 552 remote_spinlock_lock( lock_xp ); 578 // get lock protecting page 579 remote_busylock_acquire( lock_xp ); 580 581 // increment the forks counter in page descriptor 553 582 hal_remote_atomic_add( forks_xp , 1 ); 554 remote_spinlock_unlock( lock_xp ); 583 584 // release lock protecting page 585 remote_busylock_release( lock_xp ); 555 586 556 587 #if (DEBUG_THREAD_USER_FORK & 1) … … 559 590 printk("\n[DBG] %s : thread %x in process %x copied one PTE to child GPT : vpn %x / forks %d\n", 560 591 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, vpn, 561 hal_remote_l w( XPTR( page_cxy , &page_ptr->forks) ) );592 hal_remote_l32( XPTR( page_cxy , &page_ptr->forks) ) ); 562 593 #endif 563 594 … … 596 627 #endif 597 628 598 assert( (thread->type == THREAD_USER ) , "bad type" ); 599 assert( (thread->signature == THREAD_SIGNATURE) , "bad signature" );600 assert( (thread->local_locks == 0) , "bad local locks" );601 assert( (thread->remote_locks == 0) , "bad remotelocks" );629 // check parent thread attributes 630 assert( (thread->type == THREAD_USER ) , "bad type" ); 631 assert( (thread->signature == THREAD_SIGNATURE) , "bad signature" ); 632 assert( (thread->busylocks == 0) , "bad busylocks" ); 602 633 603 634 // re-initialize various thread descriptor fields … … 605 636 thread->ticks_nr = 0; // TODO 606 637 thread->time_last_check = 0; // TODO 607 608 #if CONFIG_LOCKS_DEBUG609 list_root_init( &thread->locks_root );610 xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) );611 #endif612 638 613 639 thread->entry_func = entry_func; … … 622 648 thread->fork_cxy = 0; // not inherited 623 649 650 // re-initialize busylocks counters 651 thread->busylocks = 0; 652 624 653 // reset thread info 625 654 memset( &thread->info , 0 , sizeof(thread_info_t) ); 626 655 627 // initialize join_lock628 remote_ spinlock_init( XPTR( local_cxy , &thread->join_lock ));656 // re-initialize join_lock 657 remote_busylock_init( XPTR( local_cxy , &thread->join_lock ), LOCK_THREAD_JOIN ); 629 658 630 659 // allocate an user stack vseg for main thread … … 664 693 hal_cpu_context_exec( thread ); 665 694 666 assert( false, "we should execute this code");695 assert( false, "we should not execute this code"); 667 696 668 697 return 0; … … 742 771 lid_t core_lid ) 743 772 { 744 assert( (type == THREAD_IDLE) , "illegal thread type" ); 745 assert( (core_lid < LOCAL_CLUSTER->cores_nr) , "illegal core index" ); 773 774 // check arguments 775 assert( (type == THREAD_IDLE) , "illegal thread type" ); 776 assert( (core_lid < LOCAL_CLUSTER->cores_nr) , "illegal core index" ); 746 777 747 778 // initialize thread descriptor … … 784 815 #endif 785 816 786 assert( (thread->local_locks == 0) , 787 "local lock not released for thread %x in process %x", thread->trdid, process->pid ); 788 789 assert( (thread->remote_locks == 0) , 790 "remote lock not released for thread %x in process %x", thread->trdid, process->pid ); 817 // check busylocks counter 818 assert( (thread->busylocks == 0) , 819 "busylock not released for thread %x in process %x", thread->trdid, process->pid ); 791 820 792 821 // update intrumentation values … … 890 919 } // thread_reset_req_ack() 891 920 892 ////////////////////////////////893 inline bool_t thread_can_yield( void )894 {895 thread_t * this = CURRENT_THREAD;896 return (this->local_locks == 0) && (this->remote_locks == 0);897 }898 899 /////////////////////////900 void thread_check_sched( void )901 {902 thread_t * this = CURRENT_THREAD;903 904 if( (this->local_locks == 0) &&905 (this->remote_locks == 0) &&906 (this->flags & THREAD_FLAG_SCHED) )907 {908 this->flags &= ~THREAD_FLAG_SCHED;909 sched_yield( "delayed scheduling" );910 }911 912 } // end thread_check_sched()913 914 921 ////////////////////////////////////// 915 922 void thread_block( xptr_t thread_xp, … … 930 937 printk("\n[DBG] %s : thread %x in process %x blocked thread %x in process %x / cause %x\n", 931 938 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 932 ptr->trdid, hal_remote_l w(XPTR( cxy , &process->pid )), cause );939 ptr->trdid, hal_remote_l32(XPTR( cxy , &process->pid )), cause ); 933 940 #endif 934 941 … … 953 960 printk("\n[DBG] %s : thread %x in process %x unblocked thread %x in process %x / cause %x\n", 954 961 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 955 ptr->trdid, hal_remote_l w(XPTR( cxy , &process->pid )), cause );962 ptr->trdid, hal_remote_l32(XPTR( cxy , &process->pid )), cause ); 956 963 #endif 957 964 … … 974 981 thread_t * target_ptr; // pointer on target thread 975 982 xptr_t target_flags_xp; // extended pointer on target thread <flags> 976 uint32_t target_flags; // target thread <flags> value977 983 xptr_t target_join_lock_xp; // extended pointer on target thread <join_lock> 978 984 xptr_t target_join_xp_xp; // extended pointer on target thread <join_xp> … … 982 988 thread_t * joining_ptr; // pointer on joining thread 983 989 cxy_t joining_cxy; // joining thread cluster 984 cxy_t owner_cxy; // process owner cluster 985 986 987 // get target thread pointers, identifiers, and flags 990 991 // get target thread cluster and local pointer 988 992 target_cxy = GET_CXY( target_xp ); 989 993 target_ptr = GET_PTR( target_xp ); 990 target_trdid = hal_remote_lw( XPTR( target_cxy , &target_ptr->trdid ) ); 994 995 // get target thread identifiers, and attached flag 996 target_trdid = hal_remote_l32( XPTR( target_cxy , &target_ptr->trdid ) ); 991 997 target_ltid = LTID_FROM_TRDID( target_trdid ); 992 998 target_flags_xp = XPTR( target_cxy , &target_ptr->flags ); 993 target_ flags = hal_remote_lw( target_flags_xp);999 target_attached = ( (hal_remote_l32( target_flags_xp ) & THREAD_FLAG_DETACHED) == 0 ); 994 1000 995 1001 // get killer thread pointers … … 998 1004 999 1005 #if DEBUG_THREAD_DELETE 1000 uint32_t cycle = (uint32_t)hal_get_cycles ;1006 uint32_t cycle = (uint32_t)hal_get_cycles(); 1001 1007 if( DEBUG_THREAD_DELETE < cycle ) 1002 printk("\n[DBG] %s : killer thread %x enter for target thread %x / cycle %d\n", 1003 __FUNCTION__, killer_ptr, target_ptr, cycle ); 1004 #endif 1005 1006 // target thread cannot be the main thread, because the main thread 1007 // must be deleted by the parent process sys_wait() function 1008 owner_cxy = CXY_FROM_PID( pid ); 1009 assert( ((owner_cxy != target_cxy) || (target_ltid != 0)), 1010 "tharget thread cannot be the main thread\n" ); 1008 printk("\n[DBG] %s : thread %x in process %x enters / target thread %x / cycle %d\n", 1009 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid, target_ptr->trdid, cycle ); 1010 #endif 1011 1012 // check killer thread can yield 1013 assert( (killer_ptr->busylocks == 0), 1014 "cannot yield : busylocks = %d\n", killer_ptr->busylocks ); 1015 1016 // check target thread is not the main thread, because the main thread 1017 // must be deleted by the parent process sys_wait() function 1018 assert( ((CXY_FROM_PID( pid ) != target_cxy) || (target_ltid != 0)), 1019 "tharget thread cannot be the main thread\n" ); 1011 1020 1012 1021 // block the target thread 1013 1022 thread_block( target_xp , THREAD_BLOCKED_GLOBAL ); 1014 1023 1015 // get attached from target flag descriptor 1016 target_attached = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_DETACHED) != 0); 1017 1018 // synchronize with the joining thread if the target thread is attached 1019 if( target_attached && (is_forced == false) ) 1020 { 1024 // synchronize with the joining thread if attached 1025 if( target_attached && (is_forced == false) ) 1026 { 1027 1028 #if (DEBUG_THREAD_DELETE & 1) 1029 if( DEBUG_THREAD_DELETE < cycle ) 1030 printk("\n[DBG] %s : thread %x in process %x / target thread is attached\n", 1031 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid ); 1032 #endif 1021 1033 // build extended pointers on target thread join fields 1022 1034 target_join_lock_xp = XPTR( target_cxy , &target_ptr->join_lock ); … … 1027 1039 1028 1040 // take the join_lock in target thread descriptor 1029 remote_ spinlock_lock( target_join_lock_xp );1041 remote_busylock_acquire( target_join_lock_xp ); 1030 1042 1031 1043 // get join_done from target thread descriptor 1032 target_join_done = ((hal_remote_l w( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);1044 target_join_done = ((hal_remote_l32( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0); 1033 1045 1034 1046 if( target_join_done ) // joining thread arrived first => unblock the joining thread 1035 1047 { 1048 1049 #if (DEBUG_THREAD_DELETE & 1) 1050 if( DEBUG_THREAD_DELETE < cycle ) 1051 printk("\n[DBG] %s : thread %x in process %x / joining thread arrived first\n", 1052 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid ); 1053 #endif 1036 1054 // get extended pointer on joining thread 1037 joining_xp = (xptr_t)hal_remote_l wd( target_join_xp_xp );1055 joining_xp = (xptr_t)hal_remote_l64( target_join_xp_xp ); 1038 1056 joining_ptr = GET_PTR( joining_xp ); 1039 1057 joining_cxy = GET_CXY( joining_xp ); … … 1046 1064 1047 1065 // release the join_lock in target thread descriptor 1048 remote_spinlock_unlock( target_join_lock_xp ); 1066 remote_busylock_release( target_join_lock_xp ); 1067 1068 // set the REQ_DELETE flag in target thread descriptor 1069 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE ); 1049 1070 1050 1071 // restore IRQs 1051 1072 hal_restore_irq( save_sr ); 1052 1073 } 1053 else // thisthread arrived first => register flags and deschedule1074 else // killer thread arrived first => register flags and deschedule 1054 1075 { 1076 1077 #if (DEBUG_THREAD_DELETE & 1) 1078 if( DEBUG_THREAD_DELETE < cycle ) 1079 printk("\n[DBG] %s : thread %x in process %x / killer thread arrived first\n", 1080 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid ); 1081 #endif 1055 1082 // set the kill_done flag in target thread 1056 1083 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE ); … … 1060 1087 1061 1088 // set extended pointer on killer thread in target thread 1062 hal_remote_s wd( target_join_xp_xp , killer_xp );1089 hal_remote_s64( target_join_xp_xp , killer_xp ); 1063 1090 1064 1091 // release the join_lock in target thread descriptor 1065 remote_spinlock_unlock( target_join_lock_xp ); 1066 1092 remote_busylock_release( target_join_lock_xp ); 1093 1094 #if (DEBUG_THREAD_DELETE & 1) 1095 if( DEBUG_THREAD_DELETE < cycle ) 1096 printk("\n[DBG] %s : thread %x in process %x / killer thread deschedule\n", 1097 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid ); 1098 #endif 1067 1099 // deschedule 1068 1100 sched_yield( "killer thread wait joining thread" ); 1101 1102 #if (DEBUG_THREAD_DELETE & 1) 1103 if( DEBUG_THREAD_DELETE < cycle ) 1104 printk("\n[DBG] %s : thread %x in process %x / killer thread resume\n", 1105 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid ); 1106 #endif 1107 // set the REQ_DELETE flag in target thread descriptor 1108 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE ); 1069 1109 1070 1110 // restore IRQs 1071 1111 hal_restore_irq( save_sr ); 1072 1112 } 1073 } // end if attached 1074 1075 // set the REQ_DELETE flag in target thread descriptor 1076 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE ); 1113 } 1114 else // target thread not attached 1115 { 1116 // set the REQ_DELETE flag in target thread descriptor 1117 hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE ); 1118 } 1077 1119 1078 1120 #if DEBUG_THREAD_DELETE 1079 1121 cycle = (uint32_t)hal_get_cycles; 1080 1122 if( DEBUG_THREAD_DELETE < cycle ) 1081 printk("\n[DBG] %s : killer thread %x exit fortarget thread %x / cycle %d\n",1082 __FUNCTION__, killer_ptr , target_ptr, cycle );1123 printk("\n[DBG] %s : thread %x in process %x exit / target thread %x / cycle %d\n", 1124 __FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid, target_ptr->trdid, cycle ); 1083 1125 #endif 1084 1126 … … 1087 1129 1088 1130 1089 /////////////////////// 1131 ///////////////////////////// 1090 1132 void thread_idle_func( void ) 1091 1133 { 1092 1093 #if DEBUG_THREAD_IDLE1094 uint32_t cycle;1095 #endif1096 1097 1134 while( 1 ) 1098 1135 { … … 1104 1141 { 1105 1142 1106 #if (DEBUG_THREAD_IDLE & 1) 1107 cycle = (uint32_t)hal_get_cycles; 1143 #if DEBUG_THREAD_IDLE 1144 { 1145 uint32_t cycle = (uint32_t)hal_get_cycles(); 1108 1146 if( DEBUG_THREAD_IDLE < cycle ) 1109 1147 printk("\n[DBG] %s : idle thread on core[%x,%d] goes to sleep / cycle %d\n", 1110 1148 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle ); 1149 } 1111 1150 #endif 1112 1151 1113 1152 hal_core_sleep(); 1114 1153 1115 #if (DEBUG_THREAD_IDLE & 1) 1116 cycle = (uint32_t)hal_get_cycles; 1154 #if DEBUG_THREAD_IDLE 1155 { 1156 uint32_t cycle = (uint32_t)hal_get_cycles(); 1117 1157 if( DEBUG_THREAD_IDLE < cycle ) 1118 1158 printk("\n[DBG] %s : idle thread on core[%x,%d] wake up / cycle %d\n", 1119 1159 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle ); 1160 } 1120 1161 #endif 1121 1162 … … 1123 1164 1124 1165 #if DEBUG_THREAD_IDLE 1166 { 1167 uint32_t cycle = (uint32_t)hal_get_cycles(); 1168 if( DEBUG_THREAD_IDLE < cycle ) 1125 1169 sched_display( CURRENT_THREAD->core->lid ); 1170 } 1126 1171 #endif 1127 1128 1172 // search a runable thread 1129 sched_yield( "IDLE" ); 1130 } 1173 sched_yield( "running idle thread" ); 1174 1175 } // end while 1176 1131 1177 } // end thread_idle() 1132 1178 … … 1134 1180 /////////////////////////////////////////// 1135 1181 void thread_time_update( thread_t * thread, 1136 uint32_tis_user )1182 bool_t is_user ) 1137 1183 { 1138 1184 cycle_t current_cycle; // current cycle counter value … … 1154 1200 if( is_user ) info->usr_cycles += (current_cycle - last_cycle); 1155 1201 else info->sys_cycles += (current_cycle - last_cycle); 1156 } 1202 1203 } // end thread_time_update() 1157 1204 1158 1205 ///////////////////////////////////// … … 1174 1221 1175 1222 // check trdid argument 1176 if( (target_thread_ltid >= CONFIG_THREAD _MAX_PER_CLUSTER) ||1223 if( (target_thread_ltid >= CONFIG_THREADS_MAX_PER_CLUSTER) || 1177 1224 cluster_is_undefined( target_cxy ) ) return XPTR_NULL; 1178 1225 … … 1182 1229 sizeof(xlist_entry_t) ); 1183 1230 1184 // get extended pointer on lock protecting the list of processes1231 // get extended pointer on lock protecting the list of local processes 1185 1232 lock_xp = XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_lock ); 1186 1233 1187 1234 // take the lock protecting the list of processes in target cluster 1188 remote_ spinlock_lock( lock_xp );1189 1190 // loop on list of process in target cluster to find the PID process1235 remote_queuelock_acquire( lock_xp ); 1236 1237 // scan the list of local processes in target cluster 1191 1238 xptr_t iter; 1192 1239 bool_t found = false; … … 1195 1242 target_process_xp = XLIST_ELEMENT( iter , process_t , local_list ); 1196 1243 target_process_ptr = GET_PTR( target_process_xp ); 1197 target_process_pid = hal_remote_l w( XPTR( target_cxy , &target_process_ptr->pid ) );1244 target_process_pid = hal_remote_l32( XPTR( target_cxy , &target_process_ptr->pid ) ); 1198 1245 if( target_process_pid == pid ) 1199 1246 { … … 1204 1251 1205 1252 // release the lock protecting the list of processes in target cluster 1206 remote_ spinlock_unlock( lock_xp );1253 remote_queuelock_release( lock_xp ); 1207 1254 1208 1255 // check PID found … … 1216 1263 1217 1264 return XPTR( target_cxy , target_thread_ptr ); 1265 1266 } // end thread_get_xptr() 1267 1268 /////////////////////////////////////////////////// 1269 void thread_assert_can_yield( thread_t * thread, 1270 const char * func_str ) 1271 { 1272 // does nothing if thread does not hold any busylock 1273 1274 if( thread->busylocks ) 1275 { 1276 // get pointers on TXT0 chdev 1277 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 1278 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 1279 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 1280 1281 // get extended pointer on TXT0 lock 1282 xptr_t txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 1283 1284 // get TXT0 lock 1285 remote_busylock_acquire( txt0_lock_xp ); 1286 1287 // display error message on TXT0 1288 nolock_printk("\n[PANIC] in %s / thread %x in process %x [%x] cannot yield : " 1289 "%d busylock(s) / cycle %d\n", 1290 func_str, thread->trdid, thread->process->pid, thread, 1291 thread->busylocks, (uint32_t)hal_get_cycles() ); 1292 1293 #if DEBUG_BUSYLOCK 1294 if( XPTR( local_cxy , thread ) == DEBUG_BUSYLOCK_THREAD_XP ) 1295 { 1296 // get root of list of taken busylocks 1297 xptr_t root_xp = XPTR( local_cxy , &thread->busylocks_root ); 1298 xptr_t iter_xp; 1299 1300 // scan list of busylocks 1301 XLIST_FOREACH( root_xp , iter_xp ) 1302 { 1303 xptr_t lock_xp = XLIST_ELEMENT( iter_xp , busylock_t , xlist ); 1304 cxy_t lock_cxy = GET_CXY( lock_xp ); 1305 busylock_t * lock_ptr = GET_PTR( lock_xp ); 1306 uint32_t lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->type ) ); 1307 nolock_printk(" - %s in cluster %x\n", lock_type_str[lock_type] , lock_cxy ); 1308 } 1218 1309 } 1219 1310 #endif 1311 1312 // release TXT0 lock 1313 remote_busylock_release( txt0_lock_xp ); 1314 1315 // suicide 1316 hal_core_sleep(); 1317 } 1318 } // end thread_assert_can yield() 1319 1320 #if DEBUG_BUSYLOCK 1321 1322 //////////////////////////////////////////////////// 1323 void thread_display_busylocks( uint32_t lock_type, 1324 bool_t is_acquire ) 1325 { 1326 xptr_t iter_xp; 1327 1328 // get cluster and local pointer of target thread 1329 cxy_t thread_cxy = GET_CXY( DEBUG_BUSYLOCK_THREAD_XP ); 1330 thread_t * thread_ptr = GET_PTR( DEBUG_BUSYLOCK_THREAD_XP ); 1331 1332 // get extended pointer on root of busylocks 1333 xptr_t root_xp = XPTR( thread_cxy , &thread_ptr->busylocks_root ); 1334 1335 // get pointers on TXT0 chdev 1336 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 1337 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 1338 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 1339 1340 // get extended pointer on remote TXT0 lock 1341 xptr_t txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 1342 1343 // get TXT0 lock 1344 remote_busylock_acquire( txt0_lock_xp ); 1345 1346 if( is_acquire ) 1347 { 1348 nolock_printk("\n### thread [%x,%x] ACQUIRE lock %s / root %x / locks :\n", 1349 thread_cxy, thread_ptr, lock_type_str[lock_type], GET_PTR(root_xp) ); 1350 } 1351 else 1352 { 1353 nolock_printk("\n### thread [%x,%x] RELEASE lock %s / root %x / locks :\n", 1354 thread_cxy, thread_ptr, lock_type_str[lock_type], GET_PTR(root_xp) ); 1355 } 1356 1357 int i; 1358 1359 XLIST_FOREACH( root_xp , iter_xp ) 1360 { 1361 xptr_t ilock_xp = XLIST_ELEMENT( iter_xp , busylock_t , xlist ); 1362 cxy_t ilock_cxy = GET_CXY( ilock_xp ); 1363 busylock_t * ilock_ptr = GET_PTR( ilock_xp ); 1364 uint32_t ilock_type = hal_remote_l32( XPTR( ilock_cxy , &ilock_ptr->type ) ); 1365 nolock_printk(" - %s in cluster %x\n", lock_type_str[ilock_type] , ilock_cxy ); 1366 } 1367 1368 // release TXT0 lock 1369 remote_busylock_release( txt0_lock_xp ); 1370 } 1371 #endif
Note: See TracChangeset
for help on using the changeset viewer.