source: trunk/kernel/kern/scheduler.c @ 294

Last change on this file since 294 was 279, checked in by alain, 7 years ago

1) Introduce independant command fields for the various devices in the thread descriptor.
2) Introduce a new dev_pic_enable_ipi() function in the generic PIC device
3) Fix two bugs identified by Maxime in the scheduler initialisation, and in the sched_select().
4) fix several bugs in the TSAR hal_kentry.S.
5) Introduce a third kgiet segment (besides kdata and kcode) in the TSAR bootloader.

File size: 11.4 KB
RevLine 
[1]1/*
2 * scheduler.c - Core scheduler implementation.
3 *
4 * Author    Alain Greiner (2016)
5 *
6 * Copyright (c)  UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH. is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
[14]24#include <kernel_config.h>
[1]25#include <hal_types.h>
26#include <hal_irqmask.h>
27#include <hal_context.h>
28#include <printk.h>
29#include <list.h>
30#include <core.h>
31#include <thread.h>
32#include <scheduler.h>
33
34
35////////////////////////////////
36void sched_init( core_t * core )
37{
38    scheduler_t * sched = &core->scheduler;
39
40    sched->u_threads_nr   = 0;
41    sched->k_threads_nr   = 0;
42
[279]43    sched->current        = CURRENT_THREAD;
44    sched->idle           = NULL;             // initialized in kernel_init()
45    sched->u_last         = NULL;             // initialized in sched_register_thread()
46    sched->k_last         = NULL;             // initialized in sched_register_thread()
[1]47
48    // initialise threads lists
49    list_root_init( &sched->u_root );
50    list_root_init( &sched->k_root );
51
52}  // end sched_init()
53
54////////////////////////////////////////////
55void sched_register_thread( core_t   * core,
56                            thread_t * thread )
57{
58    scheduler_t * sched = &core->scheduler;
59    thread_type_t type  = thread->type;
60
61    // take lock protecting sheduler lists
62    spinlock_lock( &sched->lock );
63
64    if( type == THREAD_USER )
65    {
[279]66        // register thread in scheduler user list
[1]67        list_add_last( &sched->u_root , &thread->sched_list );
68        sched->u_threads_nr++;
[279]69
70        // initialize u_last field if first user thread
71        if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
[1]72    }
73    else // kernel thread
74    {
[279]75        // register thread in scheduler kernel list
[1]76        list_add_last( &sched->k_root , &thread->sched_list );
77        sched->k_threads_nr++;
[279]78
79        // initialize k_last field if first kernel thread
80        if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; 
[1]81    }
82
83    // release lock
84    spinlock_unlock( &sched->lock );
85
86}  // end sched_register()
87
88/////////////////////////////////////////////
89void sched_remove_thread( thread_t * thread )
90{
91    core_t       * core  = thread->core;
92    scheduler_t  * sched = &core->scheduler;
93    thread_type_t  type  = thread->type;
94
95    // take lock protecting sheduler lists
96    spinlock_lock( &sched->lock );
97
98    if( type == THREAD_USER )
99    {
[279]100        // remove thread from user list
[1]101        list_unlink( &thread->sched_list );
102        sched->u_threads_nr--;
[279]103
104        // reset the u_last field if list empty
105        if( sched->u_threads_nr == 0 ) sched->u_last = NULL;
[1]106    }
107    else // kernel thread
108    {
[279]109        // remove thread from kernel list
[1]110        list_unlink( &thread->sched_list );
111        sched->k_threads_nr--;
[279]112
113        // reset the k_last field if list empty
114        if( sched->k_threads_nr == 0 ) sched->k_last = NULL;
[1]115    }
116
117    // release lock
118    spinlock_unlock( &sched->lock );
119
120}  // end sched_remove()
121
122///////////////////////////////////////////
123void sched_kill_thread( thread_t * thread )
124{
125    // check thread locks
126    if( thread_can_yield() == false )
127    {
[14]128        printk("\n[PANIC] in %s : thread %x in process %x on core[%x][%d]"
[1]129               " did not released all locks\n",
130               __FUNCTION__ , thread->trdid , thread->process->pid, 
[14]131               local_cxy , thread->core->lid ); 
[1]132        hal_core_sleep();
133    }
134
135    // remove thread from scheduler
136    sched_remove_thread( thread );
137
138    // reset the THREAD_SIG_KILL signal
139    thread_reset_signal( thread , THREAD_SIG_KILL );
140
141}  // end sched_kill_thread()
142
143////////////////////////////////////////
144thread_t * sched_select( core_t * core )
145{
146    thread_t * thread;
147
148    scheduler_t * sched = &core->scheduler;
149
150    // take lock protecting sheduler lists
151    spinlock_lock( &sched->lock );
152
153    list_entry_t * current;
154    list_entry_t * last;
155
[279]156    // first : scan the kernel threads list,
157    // only if this list is not empty
158    if( list_is_empty( &sched->k_root ) == false )
[1]159    {
[279]160        last    = sched->k_last;
161        current = sched->k_last;
162        do
163        {
164            // get next entry in kernel list
165            current = list_next( &sched->k_root , current );
[1]166
[279]167            // skip the root that does not contain a thread
168            if( current == NULL ) current = sched->k_root.next;
[1]169
[279]170            // get thread pointer for this entry
171            thread = LIST_ELEMENT( current , thread_t , sched_list );
[1]172
[279]173            // return thread if runnable
174            if( thread->blocked == 0 ) 
175            {
176                // release lock
177                spinlock_unlock( &sched->lock );
178                return thread;
179            }
[1]180        }
[279]181        while( current != last );
[1]182    }
183
[279]184    // second : scan the user threads list,
185    // only if this list is not empty
186    if( list_is_empty( &sched->u_root ) == false )
[1]187    {
[279]188        last    = sched->u_last;
189        current = sched->u_last;
190        do
191        {
192            // get next entry in user list
193            current = list_next( &sched->u_root , current );
[1]194
[279]195            // skip the root that does not contain a thread
196            if( current == NULL ) current = sched->u_root.next;
[1]197
[279]198            // get thread pointer for this entry
199            thread = LIST_ELEMENT( current , thread_t , sched_list );
[1]200
[279]201            // return thread if runnable
202            if( thread->blocked == 0 )
203            {
204                // release lock
205                spinlock_unlock( &sched->lock );
206                return thread;
207            }
[1]208        }
[279]209        while( current != last );
[1]210    }
211
212    // release lock
213    spinlock_unlock( &sched->lock );
214
[279]215    // third : return idle thread if no runnable thread
[1]216    return sched->idle;
217
218}  // end sched_elect()
219
220//////////////////////////////////////////
221void sched_handle_signals( core_t * core )
222{
223    list_entry_t * iter;
224    thread_t     * thread;
225
226    scheduler_t  * sched = &core->scheduler;
227
228    // take lock protecting threads lists
229    spinlock_lock( &sched->lock );
230
231    // handle user threads
232    LIST_FOREACH( &sched->u_root , iter )
233    {
234        thread = LIST_ELEMENT( iter , thread_t , sched_list );
235        if( thread->signals & THREAD_SIG_KILL )  sched_kill_thread( thread );
236    }
237
238    // handle kernel threads
239    LIST_FOREACH( &sched->k_root , iter )
240    {
241        thread = LIST_ELEMENT( iter , thread_t , sched_list );
242        if( thread->signals & THREAD_SIG_KILL )  sched_kill_thread( thread );
243    }
244
245    // release lock
246    spinlock_unlock( &sched->lock );
247
248} // end sched_handle_signals()
249
250//////////////////
251void sched_yield()
252{
[60]253    reg_t         sr_save;
[1]254    thread_t    * next;
255
256    thread_t    * current = CURRENT_THREAD;
257    core_t      * core    = current->core;
[279]258    scheduler_t * sched   = &core->scheduler;
[1]259
260    if( thread_can_yield() == false )
261    {
[14]262        printk("\n[PANIC] in %s : thread %x for process %x on core_gid %x"
263               " has not released all locks at cycle %d\n",
264               __FUNCTION__, current->trdid, current->process->pid, 
[101]265               local_cxy , core->lid , hal_get_cycles() );
[1]266        hal_core_sleep();
267    }
268
269    // desactivate IRQs
270    hal_disable_irq( &sr_save );
271
272    // first loop on all threads to handle pending signals
273    sched_handle_signals( core );
274
275    // second loop on threads to select next thread
276    next = sched_select( core );
277
278    // check stack overflow for selected thread
279    if( next->signature != THREAD_SIGNATURE )
280    {
281        printk("\n[PANIC] in %s : detected stack overflow for thread %x of process %x"
[14]282               " on core [%x][%d]\n",
283               __FUNCTION__, next->trdid, next->process->pid, local_cxy , core->lid );
[1]284        hal_core_sleep();
285        }
286       
287        sched_dmsg("\n[INFO] %s on core %d in cluster %x / old thread = %x / new thread = %x\n",
[14]288               __FUNCTION__, core->lid, local_cxy, current->trdid, next->trdid );
[1]289
[279]290    // switch contexts and update scheduler state if new thread
[1]291        if( next != current ) 
292        {
293        hal_cpu_context_save( current );
294        hal_cpu_context_restore( next );
[279]295
296        if( current->type == THREAD_USER ) sched->u_last = &current->sched_list;
297        else                               sched->k_last = &current->sched_list;
298
299        sched->current = next;
[1]300        }
301
302    // restore IRQs
303    hal_restore_irq( sr_save );
304
305        if( current->type != THREAD_USER ) return;
306
[278]307        if( next == core->fpu_owner ) hal_fpu_enable();
308        else                          hal_fpu_disable();
[1]309
310}  // end sched_yield()
311
312//////////////////////////////////////
313void sched_switch_to( thread_t * new )
314{
[60]315    reg_t         sr_save;
[1]316
317    thread_t    * current = CURRENT_THREAD;
318    core_t      * core    = current->core;
319    process_t   * process = current->process;
320
321    // check calling thread released all locks
322    if( thread_can_yield() == false )
323    {
324        printk("\n[PANIC] in %s : thread %x for process %x on core %d in cluster %x"
325               " has not released all locks\n",
326               __FUNCTION__, current->trdid, process->pid, core->lid, local_cxy );
327        hal_core_sleep();
328    }
329
330    // check new thread attached to same core as the calling thread
331    if( new->core != current->core )
332    {
333        printk("\n[PANIC] in %s : new thread %x is attached to core %d"
334               " different from core %d of current thread\n",
335               __FUNCTION__, new->trdid, new->core->lid, core->lid , current->trdid );
336        hal_core_sleep();
337    }
338
339    // check new thread not blocked
340    if( new->blocked == 0 )
341    {
342        printk("\n[PANIC] in %s for thread %x of process %x on core %d in cluster %x"
343               " : new thread %x is blocked\n",
344               __FUNCTION__, current->trdid, process->pid , core->lid, local_cxy , new->trdid );
345        hal_core_sleep();
346    }
347
348    // check stack overflow for new thread
349    if( new->signature != THREAD_SIGNATURE )
350    {
351        printk("\n[PANIC] in %s : stack overflow for new thread %x of process %x"
352               " on core %d in cluster %x\n",
353               __FUNCTION__, new->trdid, process->pid , core->lid , local_cxy );
354        hal_core_sleep();
355        }
356
357    // desactivate IRQs
358    hal_disable_irq( &sr_save );
359
360    // loop on all threads to handle pending signals
361    sched_handle_signals( core );
362
363    // check stack overflow for new thread
364    if( new->signature != THREAD_SIGNATURE )
365    {
366        printk("PANIC %s detected stack overflow for thread %x of process %x"
367               " on core %d in cluster %x\n",
368               __FUNCTION__, new->trdid, new->process->pid, core->lid, local_cxy);
369        hal_core_sleep();
370        }
371       
372        sched_dmsg("INFO : %s on core %d in cluster %x / old thread = %x / new thread = %x\n",
[14]373               __FUNCTION__, core->lid, local_cxy, current->trdid, new->trdid );
[1]374
375    // switch contexts if new thread
376    hal_cpu_context_save( current );
377    hal_cpu_context_restore( new );
378
379    // restore IRQs
380    hal_restore_irq( sr_save );
381
382        if( current->type != THREAD_USER ) return;
383
384        if( current == core->fpu_owner )  hal_fpu_enable();
385        else                              hal_fpu_disable();
386
387}  // end sched_switch_to()
388
Note: See TracBrowser for help on using the repository browser.