[1] | 1 | /* |
---|
| 2 | * scheduler.c - Core scheduler implementation. |
---|
| 3 | * |
---|
[683] | 4 | * Author Alain Greiner (2016,2017,2018,2019,2020) |
---|
[1] | 5 | * |
---|
| 6 | * Copyright (c) UPMC Sorbonne Universites |
---|
| 7 | * |
---|
| 8 | * This file is part of ALMOS-MKH. |
---|
| 9 | * |
---|
| 10 | * ALMOS-MKH. is free software; you can redistribute it and/or modify it |
---|
| 11 | * under the terms of the GNU General Public License as published by |
---|
| 12 | * the Free Software Foundation; version 2.0 of the License. |
---|
| 13 | * |
---|
| 14 | * ALMOS-MKH. is distributed in the hope that it will be useful, but |
---|
| 15 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
| 17 | * General Public License for more details. |
---|
| 18 | * |
---|
| 19 | * You should have received a copy of the GNU General Public License |
---|
| 20 | * along with ALMOS-MKH.; if not, write to the Free Software Foundation, |
---|
| 21 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
| 22 | */ |
---|
| 23 | |
---|
[14] | 24 | #include <kernel_config.h> |
---|
[457] | 25 | #include <hal_kernel_types.h> |
---|
[407] | 26 | #include <hal_switch.h> |
---|
[1] | 27 | #include <hal_irqmask.h> |
---|
| 28 | #include <hal_context.h> |
---|
| 29 | #include <printk.h> |
---|
| 30 | #include <list.h> |
---|
[619] | 31 | #include <rpc.h> |
---|
[1] | 32 | #include <core.h> |
---|
| 33 | #include <thread.h> |
---|
[296] | 34 | #include <chdev.h> |
---|
[1] | 35 | #include <scheduler.h> |
---|
| 36 | |
---|
[443] | 37 | |
---|
[296] | 38 | /////////////////////////////////////////////////////////////////////////////////////////// |
---|
[564] | 39 | // global variables |
---|
[296] | 40 | /////////////////////////////////////////////////////////////////////////////////////////// |
---|
[1] | 41 | |
---|
[564] | 42 | extern chdev_directory_t chdev_dir; // allocated in kernel_init.c |
---|
[583] | 43 | extern process_t process_zero; // allocated in kernel_init.c |
---|
[296] | 44 | |
---|
[564] | 45 | /////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 46 | // private functions |
---|
| 47 | /////////////////////////////////////////////////////////////////////////////////////////// |
---|
[443] | 48 | |
---|
[1] | 49 | |
---|
[564] | 50 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 51 | // This static function does NOT modify the scheduler state. |
---|
| 52 | // It just select a thread in the list of attached threads, implementing the following |
---|
| 53 | // three steps policy: |
---|
| 54 | // 1) It scan the list of kernel threads, from the next thread after the last executed one, |
---|
| 55 | // and returns the first runnable found : not IDLE, not blocked, client queue not empty. |
---|
| 56 | // It can be the current thread. |
---|
| 57 | // 2) If no kernel thread found, it scan the list of user thread, from the next thread after |
---|
| 58 | // the last executed one, and returns the first runable found : not blocked. |
---|
| 59 | // It can be the current thread. |
---|
| 60 | // 3) If no runable thread found, it returns the idle thread. |
---|
| 61 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 62 | // @ sched : local pointer on scheduler. |
---|
| 63 | // @ returns pointer on selected thread descriptor |
---|
| 64 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
[683] | 65 | static thread_t * __attribute__((__noinline__))sched_select( scheduler_t * sched ) |
---|
[1] | 66 | { |
---|
[408] | 67 | thread_t * thread; |
---|
| 68 | list_entry_t * current; |
---|
| 69 | list_entry_t * last; |
---|
[437] | 70 | list_entry_t * root; |
---|
| 71 | bool_t done; |
---|
[450] | 72 | uint32_t count; |
---|
[1] | 73 | |
---|
[437] | 74 | // first : scan the kernel threads list if not empty |
---|
[279] | 75 | if( list_is_empty( &sched->k_root ) == false ) |
---|
[1] | 76 | { |
---|
[437] | 77 | root = &sched->k_root; |
---|
[279] | 78 | last = sched->k_last; |
---|
[450] | 79 | done = false; |
---|
| 80 | count = 0; |
---|
[437] | 81 | current = last; |
---|
| 82 | |
---|
| 83 | while( done == false ) |
---|
[279] | 84 | { |
---|
| 85 | // get next entry in kernel list |
---|
[437] | 86 | current = current->next; |
---|
[1] | 87 | |
---|
[437] | 88 | // check exit condition |
---|
| 89 | if( current == last ) done = true; |
---|
| 90 | |
---|
[279] | 91 | // skip the root that does not contain a thread |
---|
[437] | 92 | if( current == root ) continue; |
---|
[450] | 93 | else count++; |
---|
[1] | 94 | |
---|
[279] | 95 | // get thread pointer for this entry |
---|
| 96 | thread = LIST_ELEMENT( current , thread_t , sched_list ); |
---|
[1] | 97 | |
---|
[450] | 98 | // select kernel thread if non blocked and non THREAD_IDLE |
---|
[564] | 99 | if( (thread->blocked == 0) && (thread->type != THREAD_IDLE) ) return thread; |
---|
| 100 | |
---|
[437] | 101 | } // end loop on kernel threads |
---|
[450] | 102 | } // end kernel threads |
---|
[437] | 103 | |
---|
| 104 | // second : scan the user threads list if not empty |
---|
[279] | 105 | if( list_is_empty( &sched->u_root ) == false ) |
---|
[1] | 106 | { |
---|
[437] | 107 | root = &sched->u_root; |
---|
[279] | 108 | last = sched->u_last; |
---|
[450] | 109 | done = false; |
---|
| 110 | count = 0; |
---|
[437] | 111 | current = last; |
---|
| 112 | |
---|
| 113 | while( done == false ) |
---|
[279] | 114 | { |
---|
| 115 | // get next entry in user list |
---|
[437] | 116 | current = current->next; |
---|
[1] | 117 | |
---|
[437] | 118 | // check exit condition |
---|
| 119 | if( current == last ) done = true; |
---|
| 120 | |
---|
[279] | 121 | // skip the root that does not contain a thread |
---|
[437] | 122 | if( current == root ) continue; |
---|
[450] | 123 | else count++; |
---|
[1] | 124 | |
---|
[279] | 125 | // get thread pointer for this entry |
---|
| 126 | thread = LIST_ELEMENT( current , thread_t , sched_list ); |
---|
[1] | 127 | |
---|
[450] | 128 | // select thread if non blocked |
---|
[564] | 129 | if( thread->blocked == 0 ) return thread; |
---|
| 130 | |
---|
[437] | 131 | } // end loop on user threads |
---|
[450] | 132 | } // end user threads |
---|
[1] | 133 | |
---|
[437] | 134 | // third : return idle thread if no other runnable thread |
---|
[1] | 135 | return sched->idle; |
---|
| 136 | |
---|
[296] | 137 | } // end sched_select() |
---|
[1] | 138 | |
---|
[564] | 139 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
[592] | 140 | // This static function is the only function that can actually delete a thread, |
---|
[619] | 141 | // (and the associated process descriptor if required). |
---|
| 142 | // It is private, because it is only called by the sched_yield() public function. |
---|
[564] | 143 | // It scan all threads attached to a given scheduler, and executes the relevant |
---|
[583] | 144 | // actions for two types of pending requests: |
---|
[592] | 145 | // |
---|
[564] | 146 | // - REQ_ACK : it checks that target thread is blocked, decrements the response counter |
---|
| 147 | // to acknowledge the client thread, and reset the pending request. |
---|
[583] | 148 | // - REQ_DELETE : it removes the target thread from the process th_tbl[], remove it |
---|
| 149 | // from the scheduler list, and release the memory allocated to thread descriptor. |
---|
| 150 | // For an user thread, it destroys the process descriptor it the target thread is |
---|
| 151 | // the last thread in the local process descriptor. |
---|
| 152 | // |
---|
| 153 | // Implementation note: |
---|
| 154 | // We use a while to scan the threads in scheduler lists, because some threads can |
---|
| 155 | // be destroyed, and we want not use a LIST_FOREACH() |
---|
[564] | 156 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 157 | // @ core : local pointer on the core descriptor. |
---|
| 158 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 159 | static void sched_handle_signals( core_t * core ) |
---|
[1] | 160 | { |
---|
[437] | 161 | |
---|
[1] | 162 | list_entry_t * iter; |
---|
[440] | 163 | list_entry_t * root; |
---|
[1] | 164 | thread_t * thread; |
---|
[428] | 165 | process_t * process; |
---|
[564] | 166 | scheduler_t * sched; |
---|
[583] | 167 | uint32_t threads_nr; // number of threads in scheduler list |
---|
| 168 | ltid_t ltid; // thread local index |
---|
| 169 | uint32_t count; // number of threads in local process |
---|
[409] | 170 | |
---|
[440] | 171 | // get pointer on scheduler |
---|
[564] | 172 | sched = &core->scheduler; |
---|
[1] | 173 | |
---|
[635] | 174 | ////////////////// scan user threads to handle ACK and DELETE requests |
---|
[440] | 175 | root = &sched->u_root; |
---|
| 176 | iter = root->next; |
---|
| 177 | while( iter != root ) |
---|
[1] | 178 | { |
---|
[440] | 179 | // get pointer on thread |
---|
[1] | 180 | thread = LIST_ELEMENT( iter , thread_t , sched_list ); |
---|
| 181 | |
---|
[440] | 182 | // increment iterator |
---|
| 183 | iter = iter->next; |
---|
| 184 | |
---|
[416] | 185 | // handle REQ_ACK |
---|
| 186 | if( thread->flags & THREAD_FLAG_REQ_ACK ) |
---|
[408] | 187 | { |
---|
[564] | 188 | |
---|
[635] | 189 | // check target thread blocked |
---|
[669] | 190 | assert( __FUNCTION__, (thread->blocked & THREAD_BLOCKED_GLOBAL) , "thread not blocked" ); |
---|
[416] | 191 | |
---|
| 192 | // decrement response counter |
---|
| 193 | hal_atomic_add( thread->ack_rsp_count , -1 ); |
---|
[408] | 194 | |
---|
[416] | 195 | // reset REQ_ACK in thread descriptor |
---|
| 196 | thread_reset_req_ack( thread ); |
---|
[408] | 197 | } |
---|
[416] | 198 | |
---|
[564] | 199 | // handle REQ_DELETE only if target thread != calling thread |
---|
[635] | 200 | if( thread->flags & THREAD_FLAG_REQ_DELETE ) |
---|
[416] | 201 | { |
---|
[635] | 202 | |
---|
| 203 | // check calling thread != target thread |
---|
[669] | 204 | assert( __FUNCTION__, (thread != CURRENT_THREAD) , "calling thread cannot delete itself" ); |
---|
[635] | 205 | |
---|
[428] | 206 | // get thread process descriptor |
---|
| 207 | process = thread->process; |
---|
[416] | 208 | |
---|
[583] | 209 | // get thread ltid |
---|
| 210 | ltid = LTID_FROM_TRDID( thread->trdid); |
---|
[416] | 211 | |
---|
[593] | 212 | // take the lock protecting sheduler state |
---|
| 213 | busylock_acquire( &sched->lock ); |
---|
| 214 | |
---|
[564] | 215 | // update scheduler state |
---|
[583] | 216 | threads_nr = sched->u_threads_nr; |
---|
[428] | 217 | sched->u_threads_nr = threads_nr - 1; |
---|
[416] | 218 | list_unlink( &thread->sched_list ); |
---|
[450] | 219 | if( sched->u_last == &thread->sched_list ) |
---|
| 220 | { |
---|
| 221 | if( threads_nr == 1 ) |
---|
| 222 | { |
---|
| 223 | sched->u_last = NULL; |
---|
| 224 | } |
---|
| 225 | else if( sched->u_root.next == &thread->sched_list ) |
---|
| 226 | { |
---|
| 227 | sched->u_last = sched->u_root.pred; |
---|
| 228 | } |
---|
| 229 | else |
---|
| 230 | { |
---|
| 231 | sched->u_last = sched->u_root.next; |
---|
| 232 | } |
---|
| 233 | } |
---|
[416] | 234 | |
---|
[593] | 235 | // release the lock protecting sheduler state |
---|
| 236 | busylock_release( &sched->lock ); |
---|
| 237 | |
---|
[625] | 238 | // release memory allocated for thread |
---|
| 239 | count = thread_destroy( thread ); |
---|
[583] | 240 | |
---|
[593] | 241 | hal_fence(); |
---|
| 242 | |
---|
[438] | 243 | #if DEBUG_SCHED_HANDLE_SIGNALS |
---|
[440] | 244 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 245 | if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) |
---|
[630] | 246 | printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted / cycle %d\n", |
---|
| 247 | __FUNCTION__, process->pid, thread->trdid, local_cxy, thread->core->lid, cycle ); |
---|
[433] | 248 | #endif |
---|
[583] | 249 | // destroy process descriptor if last thread |
---|
| 250 | if( count == 1 ) |
---|
[428] | 251 | { |
---|
| 252 | // delete process |
---|
| 253 | process_destroy( process ); |
---|
| 254 | |
---|
[438] | 255 | #if DEBUG_SCHED_HANDLE_SIGNALS |
---|
[433] | 256 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 257 | if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) |
---|
[610] | 258 | printk("\n[%s] process %x in cluster %x deleted / cycle %d\n", |
---|
[443] | 259 | __FUNCTION__ , process->pid , local_cxy , cycle ); |
---|
[433] | 260 | #endif |
---|
[428] | 261 | } |
---|
[416] | 262 | } |
---|
[583] | 263 | } // end user threads |
---|
| 264 | |
---|
[625] | 265 | ///////////// scan kernel threads for DELETE only |
---|
[583] | 266 | root = &sched->k_root; |
---|
| 267 | iter = root->next; |
---|
| 268 | while( iter != root ) |
---|
| 269 | { |
---|
| 270 | // get pointer on thread |
---|
| 271 | thread = LIST_ELEMENT( iter , thread_t , sched_list ); |
---|
| 272 | |
---|
| 273 | // increment iterator |
---|
| 274 | iter = iter->next; |
---|
| 275 | |
---|
| 276 | // handle REQ_DELETE only if target thread != calling thread |
---|
| 277 | if( (thread->flags & THREAD_FLAG_REQ_DELETE) && (thread != CURRENT_THREAD) ) |
---|
| 278 | { |
---|
| 279 | |
---|
| 280 | // check process descriptor is local kernel process |
---|
[669] | 281 | assert( __FUNCTION__, ( thread->process == &process_zero ) , "illegal process descriptor"); |
---|
[583] | 282 | |
---|
| 283 | // get thread ltid |
---|
| 284 | ltid = LTID_FROM_TRDID( thread->trdid); |
---|
| 285 | |
---|
[593] | 286 | // take the lock protecting sheduler state |
---|
| 287 | busylock_acquire( &sched->lock ); |
---|
| 288 | |
---|
[583] | 289 | // update scheduler state |
---|
| 290 | threads_nr = sched->k_threads_nr; |
---|
| 291 | sched->k_threads_nr = threads_nr - 1; |
---|
| 292 | list_unlink( &thread->sched_list ); |
---|
| 293 | if( sched->k_last == &thread->sched_list ) |
---|
| 294 | { |
---|
| 295 | if( threads_nr == 1 ) |
---|
| 296 | { |
---|
| 297 | sched->k_last = NULL; |
---|
| 298 | } |
---|
| 299 | else if( sched->k_root.next == &thread->sched_list ) |
---|
| 300 | { |
---|
| 301 | sched->k_last = sched->k_root.pred; |
---|
| 302 | } |
---|
| 303 | else |
---|
| 304 | { |
---|
| 305 | sched->k_last = sched->k_root.next; |
---|
| 306 | } |
---|
| 307 | } |
---|
| 308 | |
---|
[593] | 309 | // release the lock protecting sheduler state |
---|
| 310 | busylock_release( &sched->lock ); |
---|
| 311 | |
---|
[583] | 312 | // get number of threads in local kernel process |
---|
| 313 | count = process_zero.th_nr; |
---|
| 314 | |
---|
| 315 | // check th_nr value |
---|
[669] | 316 | assert( __FUNCTION__, (process_zero.th_nr > 0) , "kernel process th_nr cannot be 0" ); |
---|
[583] | 317 | |
---|
| 318 | // remove thread from process th_tbl[] |
---|
| 319 | process_zero.th_tbl[ltid] = NULL; |
---|
[592] | 320 | hal_atomic_add( &process_zero.th_nr , - 1 ); |
---|
[583] | 321 | |
---|
| 322 | // delete thread descriptor |
---|
| 323 | thread_destroy( thread ); |
---|
| 324 | |
---|
| 325 | #if DEBUG_SCHED_HANDLE_SIGNALS |
---|
| 326 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
| 327 | if( DEBUG_SCHED_HANDLE_SIGNALS < cycle ) |
---|
[610] | 328 | printk("\n[%s] thread[%x,%x] on core[%x,%d] deleted / cycle %d\n", |
---|
[583] | 329 | __FUNCTION__ , process_zero.pid , thread->trdid , local_cxy , thread->core->lid , cycle ); |
---|
| 330 | #endif |
---|
| 331 | } |
---|
[1] | 332 | } |
---|
[564] | 333 | } // end sched_handle_signals() |
---|
[1] | 334 | |
---|
[564] | 335 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 336 | // This static function is called by the sched_yield function when the RFC_FIFO |
---|
| 337 | // associated to the core is not empty. |
---|
[583] | 338 | // It search an idle RPC thread for this core, and unblock it if found. |
---|
| 339 | // It creates a new RPC thread if no idle RPC thread is found. |
---|
[564] | 340 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 341 | // @ sched : local pointer on scheduler. |
---|
| 342 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
[582] | 343 | static void sched_rpc_activate( scheduler_t * sched ) |
---|
[564] | 344 | { |
---|
| 345 | error_t error; |
---|
| 346 | thread_t * thread; |
---|
| 347 | list_entry_t * iter; |
---|
| 348 | lid_t lid = CURRENT_THREAD->core->lid; |
---|
| 349 | bool_t found = false; |
---|
| 350 | |
---|
| 351 | // search one IDLE RPC thread associated to the selected core |
---|
| 352 | LIST_FOREACH( &sched->k_root , iter ) |
---|
| 353 | { |
---|
| 354 | thread = LIST_ELEMENT( iter , thread_t , sched_list ); |
---|
[583] | 355 | |
---|
| 356 | if( (thread->type == THREAD_RPC) && |
---|
| 357 | (thread->blocked == THREAD_BLOCKED_IDLE ) ) |
---|
[564] | 358 | { |
---|
| 359 | found = true; |
---|
| 360 | break; |
---|
| 361 | } |
---|
| 362 | } |
---|
| 363 | |
---|
| 364 | if( found == false ) // create new RPC thread |
---|
| 365 | { |
---|
| 366 | error = thread_kernel_create( &thread, |
---|
| 367 | THREAD_RPC, |
---|
[619] | 368 | &rpc_server_func, |
---|
[564] | 369 | NULL, |
---|
| 370 | lid ); |
---|
| 371 | // check memory |
---|
| 372 | if ( error ) |
---|
| 373 | { |
---|
[583] | 374 | printk("\n[ERROR] in %s : no memory to create a RPC thread in cluster %x\n", |
---|
[564] | 375 | __FUNCTION__, local_cxy ); |
---|
| 376 | } |
---|
| 377 | else |
---|
| 378 | { |
---|
| 379 | // unblock created RPC thread |
---|
| 380 | thread->blocked = 0; |
---|
| 381 | |
---|
| 382 | // update RPC threads counter |
---|
| 383 | hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[lid] , 1 ); |
---|
| 384 | |
---|
| 385 | #if DEBUG_SCHED_RPC_ACTIVATE |
---|
| 386 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
| 387 | if( DEBUG_SCHED_RPC_ACTIVATE < cycle ) |
---|
[610] | 388 | printk("\n[%s] new RPC thread %x created for core[%x,%d] / total %d / cycle %d\n", |
---|
[583] | 389 | __FUNCTION__, thread->trdid, local_cxy, lid, LOCAL_CLUSTER->rpc_threads[lid], cycle ); |
---|
[564] | 390 | #endif |
---|
| 391 | } |
---|
| 392 | } |
---|
| 393 | else // RPC thread found => unblock it |
---|
| 394 | { |
---|
| 395 | // unblock found RPC thread |
---|
| 396 | thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_IDLE ); |
---|
| 397 | |
---|
| 398 | #if DEBUG_SCHED_RPC_ACTIVATE |
---|
| 399 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
| 400 | if( DEBUG_SCHED_RPC_ACTIVATE < cycle ) |
---|
[610] | 401 | printk("\n[%s] idle RPC thread %x unblocked for core[%x,%d] / cycle %d\n", |
---|
[564] | 402 | __FUNCTION__, thread->trdid, local_cxy, lid, cycle ); |
---|
| 403 | #endif |
---|
| 404 | |
---|
| 405 | } |
---|
| 406 | |
---|
| 407 | } // end sched_rpc_activate() |
---|
| 408 | |
---|
| 409 | |
---|
| 410 | |
---|
| 411 | /////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 412 | // public functions |
---|
| 413 | /////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 414 | |
---|
| 415 | //////////////////////////////// |
---|
| 416 | void sched_init( core_t * core ) |
---|
| 417 | { |
---|
| 418 | scheduler_t * sched = &core->scheduler; |
---|
| 419 | |
---|
| 420 | sched->u_threads_nr = 0; |
---|
| 421 | sched->k_threads_nr = 0; |
---|
| 422 | |
---|
| 423 | sched->current = CURRENT_THREAD; |
---|
| 424 | sched->idle = NULL; // initialized in kernel_init() |
---|
| 425 | sched->u_last = NULL; // initialized in sched_register_thread() |
---|
| 426 | sched->k_last = NULL; // initialized in sched_register_thread() |
---|
| 427 | |
---|
| 428 | // initialise threads lists |
---|
| 429 | list_root_init( &sched->u_root ); |
---|
| 430 | list_root_init( &sched->k_root ); |
---|
| 431 | |
---|
| 432 | // init lock |
---|
| 433 | busylock_init( &sched->lock , LOCK_SCHED_STATE ); |
---|
| 434 | |
---|
| 435 | sched->req_ack_pending = false; // no pending request |
---|
| 436 | sched->trace = false; // context switches trace desactivated |
---|
| 437 | |
---|
| 438 | } // end sched_init() |
---|
| 439 | |
---|
| 440 | //////////////////////////////////////////// |
---|
| 441 | void sched_register_thread( core_t * core, |
---|
| 442 | thread_t * thread ) |
---|
| 443 | { |
---|
| 444 | scheduler_t * sched = &core->scheduler; |
---|
| 445 | thread_type_t type = thread->type; |
---|
| 446 | |
---|
| 447 | // take lock protecting sheduler state |
---|
| 448 | busylock_acquire( &sched->lock ); |
---|
| 449 | |
---|
| 450 | if( type == THREAD_USER ) |
---|
| 451 | { |
---|
| 452 | list_add_last( &sched->u_root , &thread->sched_list ); |
---|
| 453 | sched->u_threads_nr++; |
---|
| 454 | if( sched->u_last == NULL ) sched->u_last = &thread->sched_list; |
---|
| 455 | } |
---|
| 456 | else // kernel thread |
---|
| 457 | { |
---|
| 458 | list_add_last( &sched->k_root , &thread->sched_list ); |
---|
| 459 | sched->k_threads_nr++; |
---|
| 460 | if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; |
---|
| 461 | } |
---|
| 462 | |
---|
[1] | 463 | // release lock |
---|
[564] | 464 | busylock_release( &sched->lock ); |
---|
[1] | 465 | |
---|
[564] | 466 | } // end sched_register_thread() |
---|
[416] | 467 | |
---|
[625] | 468 | ////////////////////////////////////////////////////////////////// |
---|
| 469 | void sched_yield( const char * cause __attribute__((__unused__)) ) |
---|
[1] | 470 | { |
---|
[564] | 471 | thread_t * next; |
---|
| 472 | thread_t * current = CURRENT_THREAD; |
---|
| 473 | core_t * core = current->core; |
---|
| 474 | lid_t lid = core->lid; |
---|
| 475 | scheduler_t * sched = &core->scheduler; |
---|
| 476 | remote_fifo_t * fifo = &LOCAL_CLUSTER->rpc_fifo[lid]; |
---|
[407] | 477 | |
---|
[635] | 478 | #if DEBUG_SCHED_YIELD |
---|
| 479 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
| 480 | #endif |
---|
| 481 | |
---|
[438] | 482 | #if (DEBUG_SCHED_YIELD & 0x1) |
---|
[635] | 483 | if( sched->trace || (cycle > DEBUG_SCHED_YIELD) ) |
---|
[640] | 484 | sched_remote_display( local_cxy , lid ); |
---|
[407] | 485 | #endif |
---|
[1] | 486 | |
---|
[651] | 487 | // This assert should always be true, as this check has been |
---|
[614] | 488 | // done before, by any function that can possibly deschedule... |
---|
[669] | 489 | assert( __FUNCTION__, (current->busylocks == 0), |
---|
[651] | 490 | "current thread hold %d busylocks\n", current->busylocks ); |
---|
[1] | 491 | |
---|
[564] | 492 | // activate or create an RPC thread if RPC_FIFO non empty |
---|
| 493 | if( remote_fifo_is_empty( fifo ) == false ) sched_rpc_activate( sched ); |
---|
[408] | 494 | |
---|
[564] | 495 | // disable IRQs / save SR in current thread descriptor |
---|
| 496 | hal_disable_irq( ¤t->save_sr ); |
---|
| 497 | |
---|
| 498 | // take lock protecting sheduler state |
---|
| 499 | busylock_acquire( &sched->lock ); |
---|
| 500 | |
---|
| 501 | // select next thread |
---|
[408] | 502 | next = sched_select( sched ); |
---|
[1] | 503 | |
---|
[564] | 504 | // check next thread kernel_stack overflow |
---|
[669] | 505 | assert( __FUNCTION__, (next->signature == THREAD_SIGNATURE), |
---|
[625] | 506 | "kernel stack overflow for thread %x on core[%x,%d]", next, local_cxy, lid ); |
---|
[436] | 507 | |
---|
[651] | 508 | // check next thread attached to same core as the current thread |
---|
[669] | 509 | assert( __FUNCTION__, (next->core == current->core), |
---|
[651] | 510 | "next_core_lid %d / current_core_lid %d", current->core->lid, next->core->lid ); |
---|
| 511 | |
---|
[564] | 512 | // check next thread not blocked when type != IDLE |
---|
[669] | 513 | assert( __FUNCTION__, ((next->blocked == 0) || (next->type == THREAD_IDLE)) , |
---|
[625] | 514 | "next thread %x (%s) is blocked on core[%x,%d]", |
---|
[564] | 515 | next->trdid , thread_type_str(next->type) , local_cxy , lid ); |
---|
[296] | 516 | |
---|
| 517 | // switch contexts and update scheduler state if next != current |
---|
| 518 | if( next != current ) |
---|
[1] | 519 | { |
---|
[296] | 520 | // update scheduler |
---|
[408] | 521 | sched->current = next; |
---|
| 522 | if( next->type == THREAD_USER ) sched->u_last = &next->sched_list; |
---|
| 523 | else sched->k_last = &next->sched_list; |
---|
[1] | 524 | |
---|
[407] | 525 | // handle FPU ownership |
---|
[306] | 526 | if( next->type == THREAD_USER ) |
---|
[296] | 527 | { |
---|
[407] | 528 | if( next == current->core->fpu_owner ) hal_fpu_enable(); |
---|
| 529 | else hal_fpu_disable(); |
---|
[296] | 530 | } |
---|
[1] | 531 | |
---|
[564] | 532 | // release lock protecting scheduler state |
---|
| 533 | busylock_release( &sched->lock ); |
---|
| 534 | |
---|
| 535 | #if DEBUG_SCHED_YIELD |
---|
[635] | 536 | if( sched->trace || (cycle > DEBUG_SCHED_YIELD) ) |
---|
[610] | 537 | printk("\n[%s] core[%x,%d] / cause = %s\n" |
---|
[564] | 538 | " thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n", |
---|
| 539 | __FUNCTION__, local_cxy, lid, cause, |
---|
| 540 | current, thread_type_str(current->type), current->process->pid, current->trdid,next , |
---|
[635] | 541 | thread_type_str(next->type) , next->process->pid , next->trdid , cycle ); |
---|
[564] | 542 | #endif |
---|
| 543 | |
---|
[435] | 544 | // switch CPU from current thread context to new thread context |
---|
[407] | 545 | hal_do_cpu_switch( current->cpu_context, next->cpu_context ); |
---|
[296] | 546 | } |
---|
| 547 | else |
---|
| 548 | { |
---|
[564] | 549 | // release lock protecting scheduler state |
---|
| 550 | busylock_release( &sched->lock ); |
---|
[407] | 551 | |
---|
[635] | 552 | #if DEBUG_SCHED_YIELD |
---|
| 553 | if( sched->trace || (cycle > DEBUG_SCHED_YIELD) ) |
---|
[610] | 554 | printk("\n[%s] core[%x,%d] / cause = %s\n" |
---|
[435] | 555 | " thread %x (%s) (%x,%x) continue / cycle %d\n", |
---|
[564] | 556 | __FUNCTION__, local_cxy, lid, cause, current, thread_type_str(current->type), |
---|
[443] | 557 | current->process->pid, current->trdid, (uint32_t)hal_get_cycles() ); |
---|
[428] | 558 | #endif |
---|
[407] | 559 | |
---|
[296] | 560 | } |
---|
[408] | 561 | |
---|
[416] | 562 | // handle pending requests for all threads executing on this core. |
---|
[433] | 563 | sched_handle_signals( core ); |
---|
[409] | 564 | |
---|
[435] | 565 | // exit critical section / restore SR from current thread descriptor |
---|
| 566 | hal_restore_irq( CURRENT_THREAD->save_sr ); |
---|
[408] | 567 | |
---|
[1] | 568 | } // end sched_yield() |
---|
| 569 | |
---|
[407] | 570 | |
---|
[450] | 571 | ///////////////////////////////////// |
---|
| 572 | void sched_remote_display( cxy_t cxy, |
---|
| 573 | lid_t lid ) |
---|
| 574 | { |
---|
| 575 | thread_t * thread; |
---|
| 576 | |
---|
| 577 | // get local pointer on target scheduler |
---|
| 578 | core_t * core = &LOCAL_CLUSTER->core_tbl[lid]; |
---|
| 579 | scheduler_t * sched = &core->scheduler; |
---|
| 580 | |
---|
| 581 | // get local pointer on current thread in target scheduler |
---|
| 582 | thread_t * current = hal_remote_lpt( XPTR( cxy, &sched->current ) ); |
---|
| 583 | |
---|
| 584 | // get local pointer on the first kernel and user threads list_entry |
---|
| 585 | list_entry_t * k_entry = hal_remote_lpt( XPTR( cxy , &sched->k_root.next ) ); |
---|
| 586 | list_entry_t * u_entry = hal_remote_lpt( XPTR( cxy , &sched->u_root.next ) ); |
---|
| 587 | |
---|
| 588 | // get pointers on TXT0 chdev |
---|
| 589 | xptr_t txt0_xp = chdev_dir.txt_tx[0]; |
---|
| 590 | cxy_t txt0_cxy = GET_CXY( txt0_xp ); |
---|
| 591 | chdev_t * txt0_ptr = GET_PTR( txt0_xp ); |
---|
| 592 | |
---|
| 593 | // get extended pointer on remote TXT0 chdev lock |
---|
| 594 | xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); |
---|
| 595 | |
---|
[564] | 596 | // get TXT0 lock |
---|
| 597 | remote_busylock_acquire( lock_xp ); |
---|
[450] | 598 | |
---|
[583] | 599 | // get rpc_threads |
---|
| 600 | uint32_t rpcs = hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->rpc_threads[lid] ) ); |
---|
| 601 | |
---|
[450] | 602 | // display header |
---|
[583] | 603 | nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n", |
---|
| 604 | cxy , lid, current, rpcs, (uint32_t)hal_get_cycles() ); |
---|
[640] | 605 | nolock_printk(" type | pid | trdid | desc | block | flags | func\n"); |
---|
[450] | 606 | |
---|
| 607 | // display kernel threads |
---|
| 608 | while( k_entry != &sched->k_root ) |
---|
| 609 | { |
---|
| 610 | // get local pointer on kernel_thread |
---|
| 611 | thread = LIST_ELEMENT( k_entry , thread_t , sched_list ); |
---|
| 612 | |
---|
| 613 | // get relevant thead info |
---|
[564] | 614 | thread_type_t type = hal_remote_l32 ( XPTR( cxy , &thread->type ) ); |
---|
| 615 | trdid_t trdid = hal_remote_l32 ( XPTR( cxy , &thread->trdid ) ); |
---|
| 616 | uint32_t blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) ); |
---|
| 617 | uint32_t flags = hal_remote_l32 ( XPTR( cxy , &thread->flags ) ); |
---|
[610] | 618 | process_t * process = hal_remote_lpt ( XPTR( cxy , &thread->process ) ); |
---|
[564] | 619 | pid_t pid = hal_remote_l32 ( XPTR( cxy , &process->pid ) ); |
---|
[450] | 620 | |
---|
| 621 | // display thread info |
---|
| 622 | if (type == THREAD_DEV) |
---|
| 623 | { |
---|
| 624 | char name[16]; |
---|
| 625 | chdev_t * chdev = hal_remote_lpt( XPTR( cxy , &thread->chdev ) ); |
---|
[610] | 626 | hal_remote_strcpy( XPTR( local_cxy , name ), XPTR( cxy , chdev->name ) ); |
---|
[450] | 627 | |
---|
[640] | 628 | nolock_printk(" - %s | %X | %X | %X | %X | %X | %s\n", |
---|
[450] | 629 | thread_type_str( type ), pid, trdid, thread, blocked, flags, name ); |
---|
| 630 | } |
---|
| 631 | else |
---|
| 632 | { |
---|
[640] | 633 | nolock_printk(" - %s | %X | %X | %X | %X | %X |\n", |
---|
[450] | 634 | thread_type_str( type ), pid, trdid, thread, blocked, flags ); |
---|
| 635 | } |
---|
| 636 | |
---|
| 637 | // get next remote kernel thread list_entry |
---|
| 638 | k_entry = hal_remote_lpt( XPTR( cxy , &k_entry->next ) ); |
---|
| 639 | } |
---|
| 640 | |
---|
| 641 | // display user threads |
---|
| 642 | while( u_entry != &sched->u_root ) |
---|
| 643 | { |
---|
| 644 | // get local pointer on user_thread |
---|
| 645 | thread = LIST_ELEMENT( u_entry , thread_t , sched_list ); |
---|
| 646 | |
---|
| 647 | // get relevant thead info |
---|
[564] | 648 | thread_type_t type = hal_remote_l32 ( XPTR( cxy , &thread->type ) ); |
---|
| 649 | trdid_t trdid = hal_remote_l32 ( XPTR( cxy , &thread->trdid ) ); |
---|
| 650 | uint32_t blocked = hal_remote_l32 ( XPTR( cxy , &thread->blocked ) ); |
---|
| 651 | uint32_t flags = hal_remote_l32 ( XPTR( cxy , &thread->flags ) ); |
---|
[610] | 652 | process_t * process = hal_remote_lpt ( XPTR( cxy , &thread->process ) ); |
---|
[564] | 653 | pid_t pid = hal_remote_l32 ( XPTR( cxy , &process->pid ) ); |
---|
[640] | 654 | void * func = hal_remote_lpt ( XPTR( cxy , &thread->entry_func ) ); |
---|
[450] | 655 | |
---|
[640] | 656 | nolock_printk(" - %s | %X | %X | %X | %X | %X | %x\n", |
---|
| 657 | thread_type_str( type ), pid, trdid, thread, blocked, flags, (uint32_t)func ); |
---|
[450] | 658 | |
---|
| 659 | // get next user thread list_entry |
---|
| 660 | u_entry = hal_remote_lpt( XPTR( cxy , &u_entry->next ) ); |
---|
| 661 | } |
---|
| 662 | |
---|
| 663 | // release TXT0 lock |
---|
[564] | 664 | remote_busylock_release( lock_xp ); |
---|
[450] | 665 | |
---|
| 666 | } // end sched_remote_display() |
---|
| 667 | |
---|
[564] | 668 | |
---|