Changeset 583 for trunk/kernel/kern/scheduler.c
- Timestamp:
- Nov 1, 2018, 12:10:42 PM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/scheduler.c
r582 r583 40 40 41 41 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 42 extern process_t process_zero; // allocated in kernel_init.c 42 43 43 44 /////////////////////////////////////////////////////////////////////////////////////////// … … 83 84 84 85 // check kernel threads list 85 assert( (count < sched->k_threads_nr), 86 "bad kernel threads list" ); 86 assert( (count < sched->k_threads_nr), "bad kernel threads list" ); 87 87 88 88 // get next entry in kernel list … … 118 118 119 119 // check user threads list 120 assert( (count < sched->u_threads_nr), 121 "bad user threads list" ); 120 assert( (count < sched->u_threads_nr), "bad user threads list" ); 122 121 123 122 // get next entry in user list … … 146 145 147 146 //////////////////////////////////////////////////////////////////////////////////////////// 148 // This static function is the only function that can remove a thread from the scheduler.147 // This static function is the only function that can actually delete a thread. 149 148 // It is private, because it is called by the sched_yield() public function. 150 149 // It scan all threads attached to a given scheduler, and executes the relevant 151 // actions for pending requests:150 // actions for two types of pending requests: 152 151 // - REQ_ACK : it checks that target thread is blocked, decrements the response counter 153 152 // to acknowledge the client thread, and reset the pending request. 154 // - REQ_DELETE : it detach the target thread from parent if attached, detach it from 155 // the process, remove it from scheduler, release memory allocated to thread descriptor, 156 // and destroy the process descriptor it the target thread was the last thread. 153 // - REQ_DELETE : it removes the target thread from the process th_tbl[], remove it 154 // from the scheduler list, and release the memory allocated to thread descriptor. 155 // For an user thread, it destroys the process descriptor it the target thread is 156 // the last thread in the local process descriptor. 157 // 158 // Implementation note: 159 // We use a while to scan the threads in scheduler lists, because some threads can 160 // be destroyed, and we want not use a LIST_FOREACH() 157 161 //////////////////////////////////////////////////////////////////////////////////////////// 158 162 // @ core : local pointer on the core descriptor. … … 166 170 process_t * process; 167 171 scheduler_t * sched; 168 bool_t last; 172 uint32_t threads_nr; // number of threads in scheduler list 173 ltid_t ltid; // thread local index 174 uint32_t count; // number of threads in local process 169 175 170 176 // get pointer on scheduler 171 177 sched = &core->scheduler; 172 178 173 // get pointer on user threads root179 ////// scan user threads to handle both ACK and DELETE requests 174 180 root = &sched->u_root; 175 176 // We use a while to scan the user threads, to control the iterator increment,177 // because some threads will be destroyed, and we want not use a LIST_FOREACH()178 179 // initialise list iterator180 181 iter = root->next; 181 182 // scan all user threads183 182 while( iter != root ) 184 183 { … … 210 209 process = thread->process; 211 210 212 // release FPU if required 213 if( thread->core->fpu_owner == thread ) thread->core->fpu_owner = NULL; 214 215 // take lock protecting sheduler state 211 // get thread ltid 212 ltid = LTID_FROM_TRDID( thread->trdid); 213 214 // take the lock protecting th_tbl[] 215 rwlock_wr_acquire( &process->th_lock ); 216 217 // take the lock protecting sheduler state 216 218 busylock_acquire( &sched->lock ); 217 219 218 220 // update scheduler state 219 uint32_tthreads_nr = sched->u_threads_nr;221 threads_nr = sched->u_threads_nr; 220 222 sched->u_threads_nr = threads_nr - 1; 221 223 list_unlink( &thread->sched_list ); … … 236 238 } 237 239 238 // release lock protecting scheduler state240 // release the lock protecting sheduler state 239 241 busylock_release( &sched->lock ); 240 242 241 // delete thread descriptor 242 last = thread_destroy( thread ); 243 // get number of threads in local process 244 count = process->th_nr; 245 246 // check th_nr value 247 assert( (count > 0) , "process th_nr cannot be 0\n" ); 248 249 // remove thread from process th_tbl[] 250 process->th_tbl[ltid] = NULL; 251 process->th_nr = count - 1; 252 253 // release the lock protecting th_tbl[] 254 rwlock_wr_release( &process->th_lock ); 255 256 // release memory allocated for thread descriptor 257 thread_destroy( thread ); 243 258 244 259 #if DEBUG_SCHED_HANDLE_SIGNALS 245 260 uint32_t cycle = (uint32_t)hal_get_cycles(); 246 261 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 247 printk("\n[DBG] %s : thread %x in process %xon core[%x,%d] deleted / cycle %d\n",248 __FUNCTION__ , thread->trdid , process->pid , local_cxy , thread->core->lid , cycle );262 printk("\n[DBG] %s : thread[%x,%x] on core[%x,%d] deleted / cycle %d\n", 263 __FUNCTION__ , process->pid , thread->trdid , local_cxy , thread->core->lid , cycle ); 249 264 #endif 250 // destroy process descriptor if no more threads251 if( last)265 // destroy process descriptor if last thread 266 if( count == 1 ) 252 267 { 253 268 // delete process … … 262 277 } 263 278 } 279 } // end user threads 280 281 ////// scan kernel threads for DELETE only 282 root = &sched->k_root; 283 iter = root->next; 284 while( iter != root ) 285 { 286 // get pointer on thread 287 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 288 289 // increment iterator 290 iter = iter->next; 291 292 // handle REQ_DELETE only if target thread != calling thread 293 if( (thread->flags & THREAD_FLAG_REQ_DELETE) && (thread != CURRENT_THREAD) ) 294 { 295 296 // check process descriptor is local kernel process 297 assert( ( thread->process == &process_zero ) , "illegal process descriptor\n"); 298 299 // get thread ltid 300 ltid = LTID_FROM_TRDID( thread->trdid); 301 302 // take the lock protecting th_tbl[] 303 rwlock_wr_acquire( &process_zero.th_lock ); 304 305 // take lock protecting sheduler state 306 busylock_acquire( &sched->lock ); 307 308 // update scheduler state 309 threads_nr = sched->k_threads_nr; 310 sched->k_threads_nr = threads_nr - 1; 311 list_unlink( &thread->sched_list ); 312 if( sched->k_last == &thread->sched_list ) 313 { 314 if( threads_nr == 1 ) 315 { 316 sched->k_last = NULL; 317 } 318 else if( sched->k_root.next == &thread->sched_list ) 319 { 320 sched->k_last = sched->k_root.pred; 321 } 322 else 323 { 324 sched->k_last = sched->k_root.next; 325 } 326 } 327 328 // release lock protecting scheduler state 329 busylock_release( &sched->lock ); 330 331 // get number of threads in local kernel process 332 count = process_zero.th_nr; 333 334 // check th_nr value 335 assert( (count > 0) , "kernel process th_nr cannot be 0\n" ); 336 337 // remove thread from process th_tbl[] 338 process_zero.th_tbl[ltid] = NULL; 339 process_zero.th_nr = count - 1; 340 341 // release the lock protecting th_tbl[] 342 rwlock_wr_release( &process_zero.th_lock ); 343 344 // delete thread descriptor 345 thread_destroy( thread ); 346 347 #if DEBUG_SCHED_HANDLE_SIGNALS 348 uint32_t cycle = (uint32_t)hal_get_cycles(); 349 if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) 350 printk("\n[DBG] %s : thread[%x,%x] on core[%x,%d] deleted / cycle %d\n", 351 __FUNCTION__ , process_zero.pid , thread->trdid , local_cxy , thread->core->lid , cycle ); 352 #endif 353 } 264 354 } 265 355 } // end sched_handle_signals() … … 268 358 // This static function is called by the sched_yield function when the RFC_FIFO 269 359 // associated to the core is not empty. 270 // It checks if it exists an idle (blocked) RPC thread for this core, and unblock271 // it if found.It creates a new RPC thread if no idle RPC thread is found.360 // It search an idle RPC thread for this core, and unblock it if found. 361 // It creates a new RPC thread if no idle RPC thread is found. 272 362 //////////////////////////////////////////////////////////////////////////////////////////// 273 363 // @ sched : local pointer on scheduler. … … 285 375 { 286 376 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 287 if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) ) 288 { 289 // exit loop 377 378 if( (thread->type == THREAD_RPC) && 379 (thread->blocked == THREAD_BLOCKED_IDLE ) ) 380 { 290 381 found = true; 291 382 break; … … 303 394 if ( error ) 304 395 { 305 printk("\n[ WARNING] in %s : no memory to create a RPC thread in cluster %x\n",396 printk("\n[ERROR] in %s : no memory to create a RPC thread in cluster %x\n", 306 397 __FUNCTION__, local_cxy ); 307 398 } … … 317 408 uint32_t cycle = (uint32_t)hal_get_cycles(); 318 409 if( DEBUG_SCHED_RPC_ACTIVATE < cycle ) 319 printk("\n[DBG] %s : new RPC thread %x created for core[%x,%d] / cycle %d\n",320 __FUNCTION__, thread->trdid, local_cxy, lid, cycle );410 printk("\n[DBG] %s : new RPC thread %x created for core[%x,%d] / total %d / cycle %d\n", 411 __FUNCTION__, thread->trdid, local_cxy, lid, LOCAL_CLUSTER->rpc_threads[lid], cycle ); 321 412 #endif 322 413 } … … 476 567 busylock_release( &sched->lock ); 477 568 478 #if DEBUG_SCHED_YIELD569 #if (DEBUG_SCHED_YIELD & 1) 479 570 if( sched->trace ) 480 571 printk("\n[DBG] %s : core[%x,%d] / cause = %s\n" … … 519 610 remote_busylock_acquire( lock_xp ); 520 611 521 nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n", 522 local_cxy , core->lid, sched->current, (uint32_t)hal_get_cycles() ); 612 nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n", 613 local_cxy , core->lid, sched->current, LOCAL_CLUSTER->rpc_threads[lid], 614 (uint32_t)hal_get_cycles() ); 523 615 524 616 // display kernel threads … … 564 656 "illegal cluster %x\n", cxy ); 565 657 566 // check lid567 658 assert( (lid < hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->cores_nr ) ) ), 568 659 "illegal core index %d\n", lid ); … … 590 681 remote_busylock_acquire( lock_xp ); 591 682 683 // get rpc_threads 684 uint32_t rpcs = hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->rpc_threads[lid] ) ); 685 592 686 // display header 593 nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n",594 cxy , lid, current, (uint32_t)hal_get_cycles() );687 nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n", 688 cxy , lid, current, rpcs, (uint32_t)hal_get_cycles() ); 595 689 596 690 // display kernel threads
Note: See TracChangeset
for help on using the changeset viewer.