Changeset 450 for trunk/kernel/kern/scheduler.c
- Timestamp:
- Jun 29, 2018, 10:44:14 AM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/kern/scheduler.c
r445 r450 104 104 list_entry_t * root; 105 105 bool_t done; 106 uint32_t count; 106 107 107 108 // take lock protecting sheduler lists … … 113 114 root = &sched->k_root; 114 115 last = sched->k_last; 116 done = false; 117 count = 0; 115 118 current = last; 116 done = false;117 119 118 120 while( done == false ) 119 121 { 122 assert( (count < sched->k_threads_nr), __FUNCTION__, "bad kernel threads list" ); 123 120 124 // get next entry in kernel list 121 125 current = current->next; … … 126 130 // skip the root that does not contain a thread 127 131 if( current == root ) continue; 132 else count++; 128 133 129 134 // get thread pointer for this entry 130 135 thread = LIST_ELEMENT( current , thread_t , sched_list ); 131 136 132 // select kernel thread if non blocked and non IDLE137 // select kernel thread if non blocked and non THREAD_IDLE 133 138 if( (thread->blocked == 0) && (thread->type != THREAD_IDLE) ) 134 139 { … … 137 142 } 138 143 } // end loop on kernel threads 139 } // end ifkernel threads144 } // end kernel threads 140 145 141 146 // second : scan the user threads list if not empty … … 144 149 root = &sched->u_root; 145 150 last = sched->u_last; 151 done = false; 152 count = 0; 146 153 current = last; 147 done = false;148 154 149 155 while( done == false ) 150 156 { 157 assert( (count < sched->u_threads_nr), __FUNCTION__, "bad user threads list" ); 158 151 159 // get next entry in user list 152 160 current = current->next; … … 157 165 // skip the root that does not contain a thread 158 166 if( current == root ) continue; 167 else count++; 159 168 160 169 // get thread pointer for this entry 161 170 thread = LIST_ELEMENT( current , thread_t , sched_list ); 162 171 163 // returnthread if non blocked172 // select thread if non blocked 164 173 if( thread->blocked == 0 ) 165 174 { … … 168 177 } 169 178 } // end loop on user threads 170 } // end ifuser threads179 } // end user threads 171 180 172 181 // third : return idle thread if no other runnable thread … … 240 249 sched->u_threads_nr = threads_nr - 1; 241 250 list_unlink( &thread->sched_list ); 242 if( threads_nr == 1 ) sched->u_last = NULL; 243 244 // delete thread 251 if( sched->u_last == &thread->sched_list ) 252 { 253 if( threads_nr == 1 ) 254 { 255 sched->u_last = NULL; 256 } 257 else if( sched->u_root.next == &thread->sched_list ) 258 { 259 sched->u_last = sched->u_root.pred; 260 } 261 else 262 { 263 sched->u_last = sched->u_root.next; 264 } 265 } 266 267 // delete thread descriptor 245 268 last_thread = thread_destroy( thread ); 246 269 … … 263 286 __FUNCTION__ , process->pid , local_cxy , cycle ); 264 287 #endif 265 266 288 } 267 289 } … … 421 443 } // end sched_display() 422 444 445 ///////////////////////////////////// 446 void sched_remote_display( cxy_t cxy, 447 lid_t lid ) 448 { 449 thread_t * thread; 450 uint32_t save_sr; 451 452 // check cxy 453 bool_t undefined = cluster_is_undefined( cxy ); 454 assert( (undefined == false), __FUNCTION__, "illegal cluster %x\n", cxy ); 455 456 // check lid 457 uint32_t cores = hal_remote_lw( XPTR( cxy , &LOCAL_CLUSTER->cores_nr ) ); 458 assert( (lid < cores), __FUNCTION__, "illegal core index %d\n", lid); 459 460 // get local pointer on target scheduler 461 core_t * core = &LOCAL_CLUSTER->core_tbl[lid]; 462 scheduler_t * sched = &core->scheduler; 463 464 // get local pointer on current thread in target scheduler 465 thread_t * current = hal_remote_lpt( XPTR( cxy, &sched->current ) ); 466 467 // get local pointer on the first kernel and user threads list_entry 468 list_entry_t * k_entry = hal_remote_lpt( XPTR( cxy , &sched->k_root.next ) ); 469 list_entry_t * u_entry = hal_remote_lpt( XPTR( cxy , &sched->u_root.next ) ); 470 471 // get pointers on TXT0 chdev 472 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 473 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 474 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 475 476 // get extended pointer on remote TXT0 chdev lock 477 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 478 479 // get TXT0 lock in busy waiting mode 480 remote_spinlock_lock_busy( lock_xp , &save_sr ); 481 482 // display header 483 nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n", 484 cxy , lid, current, (uint32_t)hal_get_cycles() ); 485 486 // display kernel threads 487 while( k_entry != &sched->k_root ) 488 { 489 // get local pointer on kernel_thread 490 thread = LIST_ELEMENT( k_entry , thread_t , sched_list ); 491 492 // get relevant thead info 493 thread_type_t type = hal_remote_lw ( XPTR( cxy , &thread->type ) ); 494 trdid_t trdid = hal_remote_lw ( XPTR( cxy , &thread->trdid ) ); 495 uint32_t blocked = hal_remote_lw ( XPTR( cxy , &thread->blocked ) ); 496 uint32_t flags = hal_remote_lw ( XPTR( cxy , &thread->flags ) ); 497 process_t * process = hal_remote_lpt( XPTR( cxy , &thread->process ) ); 498 pid_t pid = hal_remote_lw ( XPTR( cxy , &process->pid ) ); 499 500 // display thread info 501 if (type == THREAD_DEV) 502 { 503 char name[16]; 504 chdev_t * chdev = hal_remote_lpt( XPTR( cxy , &thread->chdev ) ); 505 hal_remote_strcpy( XPTR( local_cxy , name ), XPTR( cxy , &chdev->name ) ); 506 507 nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X / %s\n", 508 thread_type_str( type ), pid, trdid, thread, blocked, flags, name ); 509 } 510 else 511 { 512 nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n", 513 thread_type_str( type ), pid, trdid, thread, blocked, flags ); 514 } 515 516 // get next remote kernel thread list_entry 517 k_entry = hal_remote_lpt( XPTR( cxy , &k_entry->next ) ); 518 } 519 520 // display user threads 521 while( u_entry != &sched->u_root ) 522 { 523 // get local pointer on user_thread 524 thread = LIST_ELEMENT( u_entry , thread_t , sched_list ); 525 526 // get relevant thead info 527 thread_type_t type = hal_remote_lw ( XPTR( cxy , &thread->type ) ); 528 trdid_t trdid = hal_remote_lw ( XPTR( cxy , &thread->trdid ) ); 529 uint32_t blocked = hal_remote_lw ( XPTR( cxy , &thread->blocked ) ); 530 uint32_t flags = hal_remote_lw ( XPTR( cxy , &thread->flags ) ); 531 process_t * process = hal_remote_lpt( XPTR( cxy , &thread->process ) ); 532 pid_t pid = hal_remote_lw ( XPTR( cxy , &process->pid ) ); 533 534 nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n", 535 thread_type_str( type ), pid, trdid, thread, blocked, flags ); 536 537 // get next user thread list_entry 538 u_entry = hal_remote_lpt( XPTR( cxy , &u_entry->next ) ); 539 } 540 541 // release TXT0 lock 542 remote_spinlock_unlock_busy( lock_xp , save_sr ); 543 544 } // end sched_remote_display() 545
Note: See TracChangeset
for help on using the changeset viewer.