| 1 | /* | 
|---|
| 2 |  * scheduler.c - Core scheduler implementation. | 
|---|
| 3 |  *  | 
|---|
| 4 |  * Author    Alain Greiner (2016) | 
|---|
| 5 |  * | 
|---|
| 6 |  * Copyright (c)  UPMC Sorbonne Universites | 
|---|
| 7 |  * | 
|---|
| 8 |  * This file is part of ALMOS-MKH. | 
|---|
| 9 |  * | 
|---|
| 10 |  * ALMOS-MKH. is free software; you can redistribute it and/or modify it | 
|---|
| 11 |  * under the terms of the GNU General Public License as published by | 
|---|
| 12 |  * the Free Software Foundation; version 2.0 of the License. | 
|---|
| 13 |  * | 
|---|
| 14 |  * ALMOS-MKH. is distributed in the hope that it will be useful, but | 
|---|
| 15 |  * WITHOUT ANY WARRANTY; without even the implied warranty of | 
|---|
| 16 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|---|
| 17 |  * General Public License for more details. | 
|---|
| 18 |  * | 
|---|
| 19 |  * You should have received a copy of the GNU General Public License | 
|---|
| 20 |  * along with ALMOS-MKH.; if not, write to the Free Software Foundation, | 
|---|
| 21 |  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 
|---|
| 22 |  */ | 
|---|
| 23 |  | 
|---|
| 24 | #include <kernel_config.h> | 
|---|
| 25 | #include <hal_types.h> | 
|---|
| 26 | #include <hal_irqmask.h> | 
|---|
| 27 | #include <hal_context.h> | 
|---|
| 28 | #include <printk.h> | 
|---|
| 29 | #include <list.h> | 
|---|
| 30 | #include <core.h> | 
|---|
| 31 | #include <thread.h> | 
|---|
| 32 | #include <scheduler.h> | 
|---|
| 33 |  | 
|---|
| 34 |  | 
|---|
| 35 | //////////////////////////////// | 
|---|
| 36 | void sched_init( core_t * core ) | 
|---|
| 37 | { | 
|---|
| 38 |     scheduler_t * sched = &core->scheduler; | 
|---|
| 39 |  | 
|---|
| 40 |     sched->u_threads_nr   = 0; | 
|---|
| 41 |     sched->k_threads_nr   = 0; | 
|---|
| 42 |  | 
|---|
| 43 |     sched->current        = CURRENT_THREAD; | 
|---|
| 44 |     sched->idle           = NULL;             // initialized in kernel_init() | 
|---|
| 45 |     sched->u_last         = NULL;             // initialized in sched_register_thread() | 
|---|
| 46 |     sched->k_last         = NULL;             // initialized in sched_register_thread() | 
|---|
| 47 |  | 
|---|
| 48 |     // initialise threads lists | 
|---|
| 49 |     list_root_init( &sched->u_root ); | 
|---|
| 50 |     list_root_init( &sched->k_root ); | 
|---|
| 51 |  | 
|---|
| 52 | }  // end sched_init() | 
|---|
| 53 |  | 
|---|
| 54 | //////////////////////////////////////////// | 
|---|
| 55 | void sched_register_thread( core_t   * core, | 
|---|
| 56 |                             thread_t * thread ) | 
|---|
| 57 | { | 
|---|
| 58 |     scheduler_t * sched = &core->scheduler; | 
|---|
| 59 |     thread_type_t type  = thread->type; | 
|---|
| 60 |  | 
|---|
| 61 |     // take lock protecting sheduler lists | 
|---|
| 62 |     spinlock_lock( &sched->lock ); | 
|---|
| 63 |  | 
|---|
| 64 |     if( type == THREAD_USER ) | 
|---|
| 65 |     { | 
|---|
| 66 |         // register thread in scheduler user list | 
|---|
| 67 |         list_add_last( &sched->u_root , &thread->sched_list ); | 
|---|
| 68 |         sched->u_threads_nr++; | 
|---|
| 69 |  | 
|---|
| 70 |         // initialize u_last field if first user thread | 
|---|
| 71 |         if( sched->u_last == NULL ) sched->u_last = &thread->sched_list; | 
|---|
| 72 |     } | 
|---|
| 73 |     else // kernel thread | 
|---|
| 74 |     { | 
|---|
| 75 |         // register thread in scheduler kernel list | 
|---|
| 76 |         list_add_last( &sched->k_root , &thread->sched_list ); | 
|---|
| 77 |         sched->k_threads_nr++; | 
|---|
| 78 |  | 
|---|
| 79 |         // initialize k_last field if first kernel thread | 
|---|
| 80 |         if( sched->k_last == NULL ) sched->k_last = &thread->sched_list;  | 
|---|
| 81 |     } | 
|---|
| 82 |  | 
|---|
| 83 |     // release lock  | 
|---|
| 84 |     spinlock_unlock( &sched->lock ); | 
|---|
| 85 |  | 
|---|
| 86 | }  // end sched_register() | 
|---|
| 87 |  | 
|---|
| 88 | ///////////////////////////////////////////// | 
|---|
| 89 | void sched_remove_thread( thread_t * thread ) | 
|---|
| 90 | { | 
|---|
| 91 |     core_t       * core  = thread->core; | 
|---|
| 92 |     scheduler_t  * sched = &core->scheduler; | 
|---|
| 93 |     thread_type_t  type  = thread->type; | 
|---|
| 94 |  | 
|---|
| 95 |     // take lock protecting sheduler lists | 
|---|
| 96 |     spinlock_lock( &sched->lock ); | 
|---|
| 97 |  | 
|---|
| 98 |     if( type == THREAD_USER ) | 
|---|
| 99 |     { | 
|---|
| 100 |         // remove thread from user list | 
|---|
| 101 |         list_unlink( &thread->sched_list ); | 
|---|
| 102 |         sched->u_threads_nr--; | 
|---|
| 103 |  | 
|---|
| 104 |         // reset the u_last field if list empty | 
|---|
| 105 |         if( sched->u_threads_nr == 0 ) sched->u_last = NULL; | 
|---|
| 106 |     } | 
|---|
| 107 |     else // kernel thread  | 
|---|
| 108 |     { | 
|---|
| 109 |         // remove thread from kernel list | 
|---|
| 110 |         list_unlink( &thread->sched_list ); | 
|---|
| 111 |         sched->k_threads_nr--; | 
|---|
| 112 |  | 
|---|
| 113 |         // reset the k_last field if list empty | 
|---|
| 114 |         if( sched->k_threads_nr == 0 ) sched->k_last = NULL; | 
|---|
| 115 |     } | 
|---|
| 116 |  | 
|---|
| 117 |     // release lock | 
|---|
| 118 |     spinlock_unlock( &sched->lock ); | 
|---|
| 119 |  | 
|---|
| 120 | }  // end sched_remove() | 
|---|
| 121 |  | 
|---|
| 122 | /////////////////////////////////////////// | 
|---|
| 123 | void sched_kill_thread( thread_t * thread ) | 
|---|
| 124 | { | 
|---|
| 125 |     // check thread locks | 
|---|
| 126 |     if( thread_can_yield() == false ) | 
|---|
| 127 |     { | 
|---|
| 128 |         printk("\n[PANIC] in %s : thread %x in process %x on core[%x][%d]" | 
|---|
| 129 |                " did not released all locks\n", | 
|---|
| 130 |                __FUNCTION__ , thread->trdid , thread->process->pid,  | 
|---|
| 131 |                local_cxy , thread->core->lid );  | 
|---|
| 132 |         hal_core_sleep(); | 
|---|
| 133 |     } | 
|---|
| 134 |  | 
|---|
| 135 |     // remove thread from scheduler | 
|---|
| 136 |     sched_remove_thread( thread ); | 
|---|
| 137 |  | 
|---|
| 138 |     // reset the THREAD_SIG_KILL signal | 
|---|
| 139 |     thread_reset_signal( thread , THREAD_SIG_KILL ); | 
|---|
| 140 |  | 
|---|
| 141 | }  // end sched_kill_thread() | 
|---|
| 142 |  | 
|---|
| 143 | //////////////////////////////////////// | 
|---|
| 144 | thread_t * sched_select( core_t * core ) | 
|---|
| 145 | { | 
|---|
| 146 |     thread_t * thread; | 
|---|
| 147 |  | 
|---|
| 148 |     scheduler_t * sched = &core->scheduler; | 
|---|
| 149 |  | 
|---|
| 150 |     // take lock protecting sheduler lists | 
|---|
| 151 |     spinlock_lock( &sched->lock ); | 
|---|
| 152 |  | 
|---|
| 153 |     list_entry_t * current; | 
|---|
| 154 |     list_entry_t * last; | 
|---|
| 155 |  | 
|---|
| 156 |     // first : scan the kernel threads list, | 
|---|
| 157 |     // only if this list is not empty  | 
|---|
| 158 |     if( list_is_empty( &sched->k_root ) == false ) | 
|---|
| 159 |     { | 
|---|
| 160 |         last    = sched->k_last; | 
|---|
| 161 |         current = sched->k_last; | 
|---|
| 162 |         do | 
|---|
| 163 |         { | 
|---|
| 164 |             // get next entry in kernel list | 
|---|
| 165 |             current = list_next( &sched->k_root , current ); | 
|---|
| 166 |  | 
|---|
| 167 |             // skip the root that does not contain a thread | 
|---|
| 168 |             if( current == NULL ) current = sched->k_root.next; | 
|---|
| 169 |  | 
|---|
| 170 |             // get thread pointer for this entry | 
|---|
| 171 |             thread = LIST_ELEMENT( current , thread_t , sched_list ); | 
|---|
| 172 |  | 
|---|
| 173 |             // return thread if runnable | 
|---|
| 174 |             if( thread->blocked == 0 )  | 
|---|
| 175 |             { | 
|---|
| 176 |                 // release lock  | 
|---|
| 177 |                 spinlock_unlock( &sched->lock ); | 
|---|
| 178 |                 return thread; | 
|---|
| 179 |             } | 
|---|
| 180 |         } | 
|---|
| 181 |         while( current != last ); | 
|---|
| 182 |     } | 
|---|
| 183 |  | 
|---|
| 184 |     // second : scan the user threads list, | 
|---|
| 185 |     // only if this list is not empty  | 
|---|
| 186 |     if( list_is_empty( &sched->u_root ) == false ) | 
|---|
| 187 |     { | 
|---|
| 188 |         last    = sched->u_last; | 
|---|
| 189 |         current = sched->u_last; | 
|---|
| 190 |         do | 
|---|
| 191 |         { | 
|---|
| 192 |             // get next entry in user list | 
|---|
| 193 |             current = list_next( &sched->u_root , current ); | 
|---|
| 194 |  | 
|---|
| 195 |             // skip the root that does not contain a thread | 
|---|
| 196 |             if( current == NULL ) current = sched->u_root.next; | 
|---|
| 197 |  | 
|---|
| 198 |             // get thread pointer for this entry | 
|---|
| 199 |             thread = LIST_ELEMENT( current , thread_t , sched_list ); | 
|---|
| 200 |  | 
|---|
| 201 |             // return thread if runnable | 
|---|
| 202 |             if( thread->blocked == 0 ) | 
|---|
| 203 |             { | 
|---|
| 204 |                 // release lock  | 
|---|
| 205 |                 spinlock_unlock( &sched->lock ); | 
|---|
| 206 |                 return thread; | 
|---|
| 207 |             } | 
|---|
| 208 |         } | 
|---|
| 209 |         while( current != last ); | 
|---|
| 210 |     } | 
|---|
| 211 |  | 
|---|
| 212 |     // release lock  | 
|---|
| 213 |     spinlock_unlock( &sched->lock ); | 
|---|
| 214 |  | 
|---|
| 215 |     // third : return idle thread if no runnable thread | 
|---|
| 216 |     return sched->idle; | 
|---|
| 217 |  | 
|---|
| 218 | }  // end sched_elect() | 
|---|
| 219 |  | 
|---|
| 220 | ////////////////////////////////////////// | 
|---|
| 221 | void sched_handle_signals( core_t * core ) | 
|---|
| 222 | { | 
|---|
| 223 |     list_entry_t * iter; | 
|---|
| 224 |     thread_t     * thread; | 
|---|
| 225 |  | 
|---|
| 226 |     scheduler_t  * sched = &core->scheduler; | 
|---|
| 227 |  | 
|---|
| 228 |     // take lock protecting threads lists | 
|---|
| 229 |     spinlock_lock( &sched->lock ); | 
|---|
| 230 |  | 
|---|
| 231 |     // handle user threads | 
|---|
| 232 |     LIST_FOREACH( &sched->u_root , iter ) | 
|---|
| 233 |     { | 
|---|
| 234 |         thread = LIST_ELEMENT( iter , thread_t , sched_list ); | 
|---|
| 235 |         if( thread->signals & THREAD_SIG_KILL )  sched_kill_thread( thread ); | 
|---|
| 236 |     } | 
|---|
| 237 |  | 
|---|
| 238 |     // handle kernel threads | 
|---|
| 239 |     LIST_FOREACH( &sched->k_root , iter ) | 
|---|
| 240 |     { | 
|---|
| 241 |         thread = LIST_ELEMENT( iter , thread_t , sched_list ); | 
|---|
| 242 |         if( thread->signals & THREAD_SIG_KILL )  sched_kill_thread( thread ); | 
|---|
| 243 |     } | 
|---|
| 244 |  | 
|---|
| 245 |     // release lock  | 
|---|
| 246 |     spinlock_unlock( &sched->lock ); | 
|---|
| 247 |  | 
|---|
| 248 | } // end sched_handle_signals() | 
|---|
| 249 |  | 
|---|
| 250 | ////////////////// | 
|---|
| 251 | void sched_yield() | 
|---|
| 252 | { | 
|---|
| 253 |     reg_t         sr_save; | 
|---|
| 254 |     thread_t    * next; | 
|---|
| 255 |  | 
|---|
| 256 |     thread_t    * current = CURRENT_THREAD; | 
|---|
| 257 |     core_t      * core    = current->core; | 
|---|
| 258 |     scheduler_t * sched   = &core->scheduler; | 
|---|
| 259 |  | 
|---|
| 260 |     if( thread_can_yield() == false ) | 
|---|
| 261 |     { | 
|---|
| 262 |         printk("\n[PANIC] in %s : thread %x for process %x on core_gid %x" | 
|---|
| 263 |                " has not released all locks at cycle %d\n", | 
|---|
| 264 |                __FUNCTION__, current->trdid, current->process->pid,  | 
|---|
| 265 |                local_cxy , core->lid , hal_get_cycles() ); | 
|---|
| 266 |         hal_core_sleep(); | 
|---|
| 267 |     } | 
|---|
| 268 |  | 
|---|
| 269 |     // desactivate IRQs | 
|---|
| 270 |     hal_disable_irq( &sr_save ); | 
|---|
| 271 |  | 
|---|
| 272 |     // first loop on all threads to handle pending signals | 
|---|
| 273 |     sched_handle_signals( core ); | 
|---|
| 274 |  | 
|---|
| 275 |     // second loop on threads to select next thread | 
|---|
| 276 |     next = sched_select( core ); | 
|---|
| 277 |  | 
|---|
| 278 |     // check stack overflow for selected thread | 
|---|
| 279 |     if( next->signature != THREAD_SIGNATURE ) | 
|---|
| 280 |     { | 
|---|
| 281 |         printk("\n[PANIC] in %s : detected stack overflow for thread %x of process %x" | 
|---|
| 282 |                " on core [%x][%d]\n", | 
|---|
| 283 |                __FUNCTION__, next->trdid, next->process->pid, local_cxy , core->lid ); | 
|---|
| 284 |         hal_core_sleep(); | 
|---|
| 285 |         } | 
|---|
| 286 |          | 
|---|
| 287 |         sched_dmsg("\n[INFO] %s on core %d in cluster %x / old thread = %x / new thread = %x\n", | 
|---|
| 288 |                __FUNCTION__, core->lid, local_cxy, current->trdid, next->trdid ); | 
|---|
| 289 |  | 
|---|
| 290 |     // switch contexts and update scheduler state if new thread | 
|---|
| 291 |         if( next != current )   | 
|---|
| 292 |         { | 
|---|
| 293 |         hal_cpu_context_save( current ); | 
|---|
| 294 |         hal_cpu_context_restore( next ); | 
|---|
| 295 |  | 
|---|
| 296 |         if( current->type == THREAD_USER ) sched->u_last = ¤t->sched_list; | 
|---|
| 297 |         else                               sched->k_last = ¤t->sched_list; | 
|---|
| 298 |  | 
|---|
| 299 |         sched->current = next; | 
|---|
| 300 |         } | 
|---|
| 301 |  | 
|---|
| 302 |     // restore IRQs | 
|---|
| 303 |     hal_restore_irq( sr_save ); | 
|---|
| 304 |  | 
|---|
| 305 |         if( current->type != THREAD_USER ) return; | 
|---|
| 306 |  | 
|---|
| 307 |         if( next == core->fpu_owner ) hal_fpu_enable(); | 
|---|
| 308 |         else                          hal_fpu_disable(); | 
|---|
| 309 |  | 
|---|
| 310 | }  // end sched_yield() | 
|---|
| 311 |  | 
|---|
| 312 | ////////////////////////////////////// | 
|---|
| 313 | void sched_switch_to( thread_t * new ) | 
|---|
| 314 | { | 
|---|
| 315 |     reg_t         sr_save; | 
|---|
| 316 |  | 
|---|
| 317 |     thread_t    * current = CURRENT_THREAD; | 
|---|
| 318 |     core_t      * core    = current->core; | 
|---|
| 319 |     process_t   * process = current->process; | 
|---|
| 320 |  | 
|---|
| 321 |     // check calling thread released all locks | 
|---|
| 322 |     if( thread_can_yield() == false ) | 
|---|
| 323 |     { | 
|---|
| 324 |         printk("\n[PANIC] in %s : thread %x for process %x on core %d in cluster %x" | 
|---|
| 325 |                " has not released all locks\n", | 
|---|
| 326 |                __FUNCTION__, current->trdid, process->pid, core->lid, local_cxy ); | 
|---|
| 327 |         hal_core_sleep(); | 
|---|
| 328 |     } | 
|---|
| 329 |  | 
|---|
| 330 |     // check new thread attached to same core as the calling thread | 
|---|
| 331 |     if( new->core != current->core ) | 
|---|
| 332 |     { | 
|---|
| 333 |         printk("\n[PANIC] in %s : new thread %x is attached to core %d" | 
|---|
| 334 |                " different from core %d of current thread\n", | 
|---|
| 335 |                __FUNCTION__, new->trdid, new->core->lid, core->lid , current->trdid ); | 
|---|
| 336 |         hal_core_sleep(); | 
|---|
| 337 |     } | 
|---|
| 338 |  | 
|---|
| 339 |     // check new thread not blocked | 
|---|
| 340 |     if( new->blocked == 0 ) | 
|---|
| 341 |     { | 
|---|
| 342 |         printk("\n[PANIC] in %s for thread %x of process %x on core %d in cluster %x" | 
|---|
| 343 |                " : new thread %x is blocked\n", | 
|---|
| 344 |                __FUNCTION__, current->trdid, process->pid , core->lid, local_cxy , new->trdid ); | 
|---|
| 345 |         hal_core_sleep(); | 
|---|
| 346 |     } | 
|---|
| 347 |  | 
|---|
| 348 |     // check stack overflow for new thread | 
|---|
| 349 |     if( new->signature != THREAD_SIGNATURE ) | 
|---|
| 350 |     { | 
|---|
| 351 |         printk("\n[PANIC] in %s : stack overflow for new thread %x of process %x" | 
|---|
| 352 |                " on core %d in cluster %x\n", | 
|---|
| 353 |                __FUNCTION__, new->trdid, process->pid , core->lid , local_cxy ); | 
|---|
| 354 |         hal_core_sleep(); | 
|---|
| 355 |         } | 
|---|
| 356 |  | 
|---|
| 357 |     // desactivate IRQs | 
|---|
| 358 |     hal_disable_irq( &sr_save ); | 
|---|
| 359 |  | 
|---|
| 360 |     // loop on all threads to handle pending signals | 
|---|
| 361 |     sched_handle_signals( core ); | 
|---|
| 362 |  | 
|---|
| 363 |     // check stack overflow for new thread | 
|---|
| 364 |     if( new->signature != THREAD_SIGNATURE ) | 
|---|
| 365 |     { | 
|---|
| 366 |         printk("PANIC %s detected stack overflow for thread %x of process %x" | 
|---|
| 367 |                " on core %d in cluster %x\n", | 
|---|
| 368 |                __FUNCTION__, new->trdid, new->process->pid, core->lid, local_cxy); | 
|---|
| 369 |         hal_core_sleep(); | 
|---|
| 370 |         } | 
|---|
| 371 |          | 
|---|
| 372 |         sched_dmsg("INFO : %s on core %d in cluster %x / old thread = %x / new thread = %x\n", | 
|---|
| 373 |                __FUNCTION__, core->lid, local_cxy, current->trdid, new->trdid ); | 
|---|
| 374 |  | 
|---|
| 375 |     // switch contexts if new thread | 
|---|
| 376 |     hal_cpu_context_save( current ); | 
|---|
| 377 |     hal_cpu_context_restore( new ); | 
|---|
| 378 |  | 
|---|
| 379 |     // restore IRQs | 
|---|
| 380 |     hal_restore_irq( sr_save ); | 
|---|
| 381 |  | 
|---|
| 382 |         if( current->type != THREAD_USER ) return; | 
|---|
| 383 |  | 
|---|
| 384 |         if( current == core->fpu_owner )  hal_fpu_enable(); | 
|---|
| 385 |         else                              hal_fpu_disable(); | 
|---|
| 386 |  | 
|---|
| 387 | }  // end sched_switch_to() | 
|---|
| 388 |  | 
|---|