Changeset 564 for trunk/kernel/kern/scheduler.c
- Timestamp:
- Oct 4, 2018, 11:47:36 PM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/scheduler.c
r551 r564 2 2 * scheduler.c - Core scheduler implementation. 3 3 * 4 * Author Alain Greiner (2016 )4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 36 36 37 37 /////////////////////////////////////////////////////////////////////////////////////////// 38 // Externglobal variables38 // global variables 39 39 /////////////////////////////////////////////////////////////////////////////////////////// 40 40 41 uint32_t idle_thread_count; 42 uint32_t idle_thread_count_active; 43 44 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c file 45 extern uint32_t switch_save_sr[]; // allocated in kernel_init.c file 46 47 //////////////////////////////// 48 void sched_init( core_t * core ) 49 { 50 scheduler_t * sched = &core->scheduler; 51 52 sched->u_threads_nr = 0; 53 sched->k_threads_nr = 0; 54 55 sched->current = CURRENT_THREAD; 56 sched->idle = NULL; // initialized in kernel_init() 57 sched->u_last = NULL; // initialized in sched_register_thread() 58 sched->k_last = NULL; // initialized in sched_register_thread() 59 60 // initialise threads lists 61 list_root_init( &sched->u_root ); 62 list_root_init( &sched->k_root ); 63 64 // init spinlock 65 spinlock_init( &sched->lock ); 66 67 sched->req_ack_pending = false; // no pending request 68 sched->trace = false; // context switches trace desactivated 69 70 } // end sched_init() 71 72 //////////////////////////////////////////// 73 void sched_register_thread( core_t * core, 74 thread_t * thread ) 75 { 76 scheduler_t * sched = &core->scheduler; 77 thread_type_t type = thread->type; 78 79 // take lock protecting sheduler lists 80 uint32_t irq_state; 81 spinlock_lock_busy( &sched->lock, &irq_state ); 82 83 if( type == THREAD_USER ) 84 { 85 list_add_last( &sched->u_root , &thread->sched_list ); 86 sched->u_threads_nr++; 87 if( sched->u_last == NULL ) sched->u_last = &thread->sched_list; 88 } 89 else // kernel thread 90 { 91 list_add_last( &sched->k_root , &thread->sched_list ); 92 sched->k_threads_nr++; 93 if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; 94 } 95 96 // release lock 97 hal_fence(); 98 spinlock_unlock_busy( &sched->lock, irq_state); 99 100 } // end sched_register_thread() 101 102 ////////////////////////////////////////////// 41 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 42 43 /////////////////////////////////////////////////////////////////////////////////////////// 44 // private functions 45 /////////////////////////////////////////////////////////////////////////////////////////// 46 47 48 //////////////////////////////////////////////////////////////////////////////////////////// 49 // This static function does NOT modify the scheduler state. 50 // It just select a thread in the list of attached threads, implementing the following 51 // three steps policy: 52 // 1) It scan the list of kernel threads, from the next thread after the last executed one, 53 // and returns the first runnable found : not IDLE, not blocked, client queue not empty. 54 // It can be the current thread. 55 // 2) If no kernel thread found, it scan the list of user thread, from the next thread after 56 // the last executed one, and returns the first runable found : not blocked. 57 // It can be the current thread. 58 // 3) If no runable thread found, it returns the idle thread. 59 //////////////////////////////////////////////////////////////////////////////////////////// 60 // @ sched : local pointer on scheduler. 61 // @ returns pointer on selected thread descriptor 62 //////////////////////////////////////////////////////////////////////////////////////////// 103 63 thread_t * sched_select( scheduler_t * sched ) 104 64 { … … 110 70 uint32_t count; 111 71 112 // take lock protecting sheduler lists113 spinlock_lock( &sched->lock );114 115 72 // first : scan the kernel threads list if not empty 116 73 if( list_is_empty( &sched->k_root ) == false ) … … 124 81 while( done == false ) 125 82 { 126 assert( (count < sched->k_threads_nr), "bad kernel threads list" ); 83 84 // check kernel threads list 85 assert( (count < sched->k_threads_nr), 86 "bad kernel threads list" ); 127 87 128 88 // get next entry in kernel list … … 140 100 141 101 // select kernel thread if non blocked and non THREAD_IDLE 142 if( (thread->blocked == 0) && (thread->type != THREAD_IDLE) ) 143 { 144 spinlock_unlock( &sched->lock ); 145 return thread; 146 } 102 if( (thread->blocked == 0) && (thread->type != THREAD_IDLE) ) return thread; 103 147 104 } // end loop on kernel threads 148 105 } // end kernel threads … … 159 116 while( done == false ) 160 117 { 161 assert( (count < sched->u_threads_nr), "bad user threads list" ); 118 119 // check user threads list 120 assert( (count < sched->u_threads_nr), 121 "bad user threads list" ); 162 122 163 123 // get next entry in user list … … 175 135 176 136 // select thread if non blocked 177 if( thread->blocked == 0 ) 178 { 179 spinlock_unlock( &sched->lock ); 180 return thread; 181 } 137 if( thread->blocked == 0 ) return thread; 138 182 139 } // end loop on user threads 183 140 } // end user threads 184 141 185 142 // third : return idle thread if no other runnable thread 186 spinlock_unlock( &sched->lock );187 143 return sched->idle; 188 144 189 145 } // end sched_select() 190 146 191 /////////////////////////////////////////// 192 void sched_handle_signals( core_t * core ) 147 //////////////////////////////////////////////////////////////////////////////////////////// 148 // This static function is the only function that can remove a thread from the scheduler. 149 // It is private, because it is called by the sched_yield() public function. 150 // It scan all threads attached to a given scheduler, and executes the relevant 151 // actions for pending requests: 152 // - REQ_ACK : it checks that target thread is blocked, decrements the response counter 153 // to acknowledge the client thread, and reset the pending request. 154 // - REQ_DELETE : it detach the target thread from parent if attached, detach it from 155 // the process, remove it from scheduler, release memory allocated to thread descriptor, 156 // and destroy the process descriptor it the target thread was the last thread. 157 //////////////////////////////////////////////////////////////////////////////////////////// 158 // @ core : local pointer on the core descriptor. 159 //////////////////////////////////////////////////////////////////////////////////////////// 160 static void sched_handle_signals( core_t * core ) 193 161 { 194 162 … … 197 165 thread_t * thread; 198 166 process_t * process; 199 bool_t last_thread; 167 scheduler_t * sched; 168 bool_t last; 200 169 201 170 // get pointer on scheduler 202 sched uler_t * sched= &core->scheduler;171 sched = &core->scheduler; 203 172 204 173 // get pointer on user threads root 205 174 root = &sched->u_root; 206 175 207 // take lock protecting threads lists208 spinlock_lock( &sched->lock );209 210 176 // We use a while to scan the user threads, to control the iterator increment, 211 // because some threads will be destroyed, and we cannot use a LIST_FOREACH()177 // because some threads will be destroyed, and we want not use a LIST_FOREACH() 212 178 213 179 // initialise list iterator … … 226 192 if( thread->flags & THREAD_FLAG_REQ_ACK ) 227 193 { 228 // check thread blocked 229 assert( (thread->blocked & THREAD_BLOCKED_GLOBAL) , 230 "thread not blocked" ); 194 195 // check thread blocked 196 assert( (thread->blocked & THREAD_BLOCKED_GLOBAL) , 197 "thread not blocked" ); 231 198 232 199 // decrement response counter … … 237 204 } 238 205 239 // handle REQ_DELETE 240 if( thread->flags & THREAD_FLAG_REQ_DELETE)206 // handle REQ_DELETE only if target thread != calling thread 207 if( (thread->flags & THREAD_FLAG_REQ_DELETE) && (thread != CURRENT_THREAD) ) 241 208 { 242 209 // get thread process descriptor … … 246 213 if( thread->core->fpu_owner == thread ) thread->core->fpu_owner = NULL; 247 214 248 // remove thread from scheduler (scheduler lock already taken) 215 // take lock protecting sheduler state 216 busylock_acquire( &sched->lock ); 217 218 // update scheduler state 249 219 uint32_t threads_nr = sched->u_threads_nr; 250 251 assert( (threads_nr != 0) , "u_threads_nr cannot be 0\n" );252 253 220 sched->u_threads_nr = threads_nr - 1; 254 221 list_unlink( &thread->sched_list ); … … 269 236 } 270 237 238 // release lock protecting scheduler state 239 busylock_release( &sched->lock ); 240 271 241 // delete thread descriptor 272 last _thread= thread_destroy( thread );242 last = thread_destroy( thread ); 273 243 274 244 #if DEBUG_SCHED_HANDLE_SIGNALS … … 279 249 #endif 280 250 // destroy process descriptor if no more threads 281 if( last _thread)251 if( last ) 282 252 { 283 253 // delete process … … 293 263 } 294 264 } 265 } // end sched_handle_signals() 266 267 //////////////////////////////////////////////////////////////////////////////////////////// 268 // This static function is called by the sched_yield function when the RFC_FIFO 269 // associated to the core is not empty. 270 // It checks if it exists an idle (blocked) RPC thread for this core, and unblock 271 // it if found. It creates a new RPC thread if no idle RPC thread is found. 272 //////////////////////////////////////////////////////////////////////////////////////////// 273 // @ sched : local pointer on scheduler. 274 //////////////////////////////////////////////////////////////////////////////////////////// 275 void sched_rpc_activate( scheduler_t * sched ) 276 { 277 error_t error; 278 thread_t * thread; 279 list_entry_t * iter; 280 lid_t lid = CURRENT_THREAD->core->lid; 281 bool_t found = false; 282 283 // search one IDLE RPC thread associated to the selected core 284 LIST_FOREACH( &sched->k_root , iter ) 285 { 286 thread = LIST_ELEMENT( iter , thread_t , sched_list ); 287 if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) ) 288 { 289 // exit loop 290 found = true; 291 break; 292 } 293 } 294 295 if( found == false ) // create new RPC thread 296 { 297 error = thread_kernel_create( &thread, 298 THREAD_RPC, 299 &rpc_thread_func, 300 NULL, 301 lid ); 302 // check memory 303 if ( error ) 304 { 305 printk("\n[WARNING] in %s : no memory to create a RPC thread in cluster %x\n", 306 __FUNCTION__, local_cxy ); 307 } 308 else 309 { 310 // unblock created RPC thread 311 thread->blocked = 0; 312 313 // update RPC threads counter 314 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[lid] , 1 ); 315 316 #if DEBUG_SCHED_RPC_ACTIVATE 317 uint32_t cycle = (uint32_t)hal_get_cycles(); 318 if( DEBUG_SCHED_RPC_ACTIVATE < cycle ) 319 printk("\n[DBG] %s : new RPC thread %x created for core[%x,%d] / cycle %d\n", 320 __FUNCTION__, thread->trdid, local_cxy, lid, cycle ); 321 #endif 322 } 323 } 324 else // RPC thread found => unblock it 325 { 326 // unblock found RPC thread 327 thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_IDLE ); 328 329 #if DEBUG_SCHED_RPC_ACTIVATE 330 uint32_t cycle = (uint32_t)hal_get_cycles(); 331 if( DEBUG_SCHED_RPC_ACTIVATE < cycle ) 332 printk("\n[DBG] %s : idle RPC thread %x unblocked for core[%x,%d] / cycle %d\n", 333 __FUNCTION__, thread->trdid, local_cxy, lid, cycle ); 334 #endif 335 336 } 337 338 } // end sched_rpc_activate() 339 340 341 342 /////////////////////////////////////////////////////////////////////////////////////////// 343 // public functions 344 /////////////////////////////////////////////////////////////////////////////////////////// 345 346 //////////////////////////////// 347 void sched_init( core_t * core ) 348 { 349 scheduler_t * sched = &core->scheduler; 350 351 sched->u_threads_nr = 0; 352 sched->k_threads_nr = 0; 353 354 sched->current = CURRENT_THREAD; 355 sched->idle = NULL; // initialized in kernel_init() 356 sched->u_last = NULL; // initialized in sched_register_thread() 357 sched->k_last = NULL; // initialized in sched_register_thread() 358 359 // initialise threads lists 360 list_root_init( &sched->u_root ); 361 list_root_init( &sched->k_root ); 362 363 // init lock 364 busylock_init( &sched->lock , LOCK_SCHED_STATE ); 365 366 sched->req_ack_pending = false; // no pending request 367 sched->trace = false; // context switches trace desactivated 368 369 } // end sched_init() 370 371 //////////////////////////////////////////// 372 void sched_register_thread( core_t * core, 373 thread_t * thread ) 374 { 375 scheduler_t * sched = &core->scheduler; 376 thread_type_t type = thread->type; 377 378 // take lock protecting sheduler state 379 busylock_acquire( &sched->lock ); 380 381 if( type == THREAD_USER ) 382 { 383 list_add_last( &sched->u_root , &thread->sched_list ); 384 sched->u_threads_nr++; 385 if( sched->u_last == NULL ) sched->u_last = &thread->sched_list; 386 } 387 else // kernel thread 388 { 389 list_add_last( &sched->k_root , &thread->sched_list ); 390 sched->k_threads_nr++; 391 if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; 392 } 295 393 296 394 // release lock 297 hal_fence(); 298 spinlock_unlock( &sched->lock ); 299 300 } // end sched_handle_signals() 301 302 //////////////////////////////// 395 busylock_release( &sched->lock ); 396 397 } // end sched_register_thread() 398 399 ////////////////////////////////////// 303 400 void sched_yield( const char * cause ) 304 401 { 305 thread_t * next; 306 thread_t * current = CURRENT_THREAD; 307 core_t * core = current->core; 308 scheduler_t * sched = &core->scheduler; 402 thread_t * next; 403 thread_t * current = CURRENT_THREAD; 404 core_t * core = current->core; 405 lid_t lid = core->lid; 406 scheduler_t * sched = &core->scheduler; 407 remote_fifo_t * fifo = &LOCAL_CLUSTER->rpc_fifo[lid]; 309 408 310 409 #if (DEBUG_SCHED_YIELD & 0x1) 311 if( sched->trace ) 312 sched_display( core->lid ); 410 if( sched->trace ) sched_display( lid ); 313 411 #endif 314 412 315 // delay the yield if current thread has locks 316 if( (current->local_locks != 0) || (current->remote_locks != 0) ) 317 { 318 current->flags |= THREAD_FLAG_SCHED; 319 return; 320 } 321 322 // enter critical section / save SR in current thread descriptor 323 hal_disable_irq( &CURRENT_THREAD->save_sr ); 324 325 // loop on threads to select next thread 413 // check current thread busylocks counter 414 assert( (current->busylocks == 0), 415 "thread cannot yield : busylocks = %d\n", current->busylocks ); 416 417 // activate or create an RPC thread if RPC_FIFO non empty 418 if( remote_fifo_is_empty( fifo ) == false ) sched_rpc_activate( sched ); 419 420 // disable IRQs / save SR in current thread descriptor 421 hal_disable_irq( ¤t->save_sr ); 422 423 // take lock protecting sheduler state 424 busylock_acquire( &sched->lock ); 425 426 // select next thread 326 427 next = sched_select( sched ); 327 428 328 329 330 "kernel stack overflow for thread %x on core[%x,%d] \n", next, local_cxy, core->lid );331 332 333 334 335 336 337 338 339 next->trdid , thread_type_str(next->type) , local_cxy , core->lid );429 // check next thread kernel_stack overflow 430 assert( (next->signature == THREAD_SIGNATURE), 431 "kernel stack overflow for thread %x on core[%x,%d] \n", next, local_cxy, lid ); 432 433 // check next thread attached to same core as the calling thread 434 assert( (next->core == current->core), 435 "next core %x != current core %x\n", next->core, current->core ); 436 437 // check next thread not blocked when type != IDLE 438 assert( ((next->blocked == 0) || (next->type == THREAD_IDLE)) , 439 "next thread %x (%s) is blocked on core[%x,%d]\n", 440 next->trdid , thread_type_str(next->type) , local_cxy , lid ); 340 441 341 442 // switch contexts and update scheduler state if next != current 342 443 if( next != current ) 343 444 { 445 // update scheduler 446 sched->current = next; 447 if( next->type == THREAD_USER ) sched->u_last = &next->sched_list; 448 else sched->k_last = &next->sched_list; 449 450 // handle FPU ownership 451 if( next->type == THREAD_USER ) 452 { 453 if( next == current->core->fpu_owner ) hal_fpu_enable(); 454 else hal_fpu_disable(); 455 } 456 457 // release lock protecting scheduler state 458 busylock_release( &sched->lock ); 344 459 345 460 #if DEBUG_SCHED_YIELD … … 347 462 printk("\n[DBG] %s : core[%x,%d] / cause = %s\n" 348 463 " thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n", 349 __FUNCTION__, local_cxy, core->lid, cause,464 __FUNCTION__, local_cxy, lid, cause, 350 465 current, thread_type_str(current->type), current->process->pid, current->trdid,next , 351 466 thread_type_str(next->type) , next->process->pid , next->trdid , (uint32_t)hal_get_cycles() ); 352 467 #endif 353 468 354 // update scheduler355 sched->current = next;356 if( next->type == THREAD_USER ) sched->u_last = &next->sched_list;357 else sched->k_last = &next->sched_list;358 359 // handle FPU ownership360 if( next->type == THREAD_USER )361 {362 if( next == current->core->fpu_owner ) hal_fpu_enable();363 else hal_fpu_disable();364 }365 366 469 // switch CPU from current thread context to new thread context 367 470 hal_do_cpu_switch( current->cpu_context, next->cpu_context ); … … 369 472 else 370 473 { 474 // release lock protecting scheduler state 475 busylock_release( &sched->lock ); 371 476 372 477 #if DEBUG_SCHED_YIELD … … 374 479 printk("\n[DBG] %s : core[%x,%d] / cause = %s\n" 375 480 " thread %x (%s) (%x,%x) continue / cycle %d\n", 376 __FUNCTION__, local_cxy, core->lid, cause, current, thread_type_str(current->type),481 __FUNCTION__, local_cxy, lid, cause, current, thread_type_str(current->type), 377 482 current->process->pid, current->trdid, (uint32_t)hal_get_cycles() ); 378 483 #endif … … 394 499 list_entry_t * iter; 395 500 thread_t * thread; 396 uint32_t save_sr; 397 398 assert( (lid < LOCAL_CLUSTER->cores_nr), "illegal core index %d\n", lid); 501 502 // check lid 503 assert( (lid < LOCAL_CLUSTER->cores_nr), 504 "illegal core index %d\n", lid); 399 505 400 506 core_t * core = &LOCAL_CLUSTER->core_tbl[lid]; … … 406 512 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 407 513 408 // get extended pointer on remote TXT0 chdevlock514 // get extended pointer on remote TXT0 lock 409 515 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 410 516 411 // get TXT0 lock in busy waiting mode412 remote_ spinlock_lock_busy( lock_xp , &save_sr);517 // get TXT0 lock 518 remote_busylock_acquire( lock_xp ); 413 519 414 520 nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n", … … 443 549 444 550 // release TXT0 lock 445 remote_ spinlock_unlock_busy( lock_xp , save_sr);551 remote_busylock_release( lock_xp ); 446 552 447 553 } // end sched_display() … … 452 558 { 453 559 thread_t * thread; 454 uint32_t save_sr; 455 456 // check cxy 457 bool_t undefined = cluster_is_undefined( cxy ); 458 assert( (undefined == false), "illegal cluster %x\n", cxy ); 459 460 // check lid 461 uint32_t cores = hal_remote_lw( XPTR( cxy , &LOCAL_CLUSTER->cores_nr ) ); 462 assert( (lid < cores), "illegal core index %d\n", lid); 560 561 // check cxy 562 assert( (cluster_is_undefined( cxy ) == false), 563 "illegal cluster %x\n", cxy ); 564 565 // check lid 566 assert( (lid < hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->cores_nr ) ) ), 567 "illegal core index %d\n", lid ); 463 568 464 569 // get local pointer on target scheduler … … 481 586 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 482 587 483 // get TXT0 lock in busy waiting mode484 remote_ spinlock_lock_busy( lock_xp , &save_sr);588 // get TXT0 lock 589 remote_busylock_acquire( lock_xp ); 485 590 486 591 // display header … … 495 600 496 601 // get relevant thead info 497 thread_type_t type = hal_remote_l w( XPTR( cxy , &thread->type ) );498 trdid_t trdid = hal_remote_l w( XPTR( cxy , &thread->trdid ) );499 uint32_t blocked = hal_remote_l w( XPTR( cxy , &thread->blocked ) );500 uint32_t flags = hal_remote_l w( XPTR( cxy , &thread->flags ) );602 thread_type_t type = hal_remote_l32 ( XPTR( cxy , &thread->type ) ); 603 trdid_t trdid = hal_remote_l32 ( XPTR( cxy , &thread->trdid ) ); 604 uint32_t blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) ); 605 uint32_t flags = hal_remote_l32 ( XPTR( cxy , &thread->flags ) ); 501 606 process_t * process = hal_remote_lpt( XPTR( cxy , &thread->process ) ); 502 pid_t pid = hal_remote_l w( XPTR( cxy , &process->pid ) );607 pid_t pid = hal_remote_l32 ( XPTR( cxy , &process->pid ) ); 503 608 504 609 // display thread info … … 529 634 530 635 // get relevant thead info 531 thread_type_t type = hal_remote_l w( XPTR( cxy , &thread->type ) );532 trdid_t trdid = hal_remote_l w( XPTR( cxy , &thread->trdid ) );533 uint32_t blocked = hal_remote_l w( XPTR( cxy , &thread->blocked ) );534 uint32_t flags = hal_remote_l w( XPTR( cxy , &thread->flags ) );636 thread_type_t type = hal_remote_l32 ( XPTR( cxy , &thread->type ) ); 637 trdid_t trdid = hal_remote_l32 ( XPTR( cxy , &thread->trdid ) ); 638 uint32_t blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) ); 639 uint32_t flags = hal_remote_l32 ( XPTR( cxy , &thread->flags ) ); 535 640 process_t * process = hal_remote_lpt( XPTR( cxy , &thread->process ) ); 536 pid_t pid = hal_remote_l w( XPTR( cxy , &process->pid ) );641 pid_t pid = hal_remote_l32 ( XPTR( cxy , &process->pid ) ); 537 642 538 643 nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n", … … 544 649 545 650 // release TXT0 lock 546 remote_ spinlock_unlock_busy( lock_xp , save_sr);651 remote_busylock_release( lock_xp ); 547 652 548 653 } // end sched_remote_display() 549 654 655
Note: See TracChangeset
for help on using the changeset viewer.