source: trunk/kernel/kern/scheduler.c @ 355

Last change on this file since 355 was 337, checked in by alain, 7 years ago

Introduce the delayed context switch if current thread has a lock.

File size: 12.1 KB
RevLine 
[1]1/*
2 * scheduler.c - Core scheduler implementation.
3 *
4 * Author    Alain Greiner (2016)
5 *
6 * Copyright (c)  UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH. is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
[14]24#include <kernel_config.h>
[1]25#include <hal_types.h>
26#include <hal_irqmask.h>
27#include <hal_context.h>
28#include <printk.h>
29#include <list.h>
30#include <core.h>
31#include <thread.h>
[296]32#include <chdev.h>
[1]33#include <scheduler.h>
34
[296]35///////////////////////////////////////////////////////////////////////////////////////////
36// Extern global variables
37///////////////////////////////////////////////////////////////////////////////////////////
[1]38
[296]39extern chdev_directory_t    chdev_dir;            // allocated in kernel_init.c file
40
41
[1]42////////////////////////////////
43void sched_init( core_t * core )
44{
45    scheduler_t * sched = &core->scheduler;
46
47    sched->u_threads_nr   = 0;
48    sched->k_threads_nr   = 0;
49
[279]50    sched->current        = CURRENT_THREAD;
51    sched->idle           = NULL;             // initialized in kernel_init()
52    sched->u_last         = NULL;             // initialized in sched_register_thread()
53    sched->k_last         = NULL;             // initialized in sched_register_thread()
[1]54
55    // initialise threads lists
56    list_root_init( &sched->u_root );
57    list_root_init( &sched->k_root );
58
59}  // end sched_init()
60
61////////////////////////////////////////////
62void sched_register_thread( core_t   * core,
63                            thread_t * thread )
64{
65    scheduler_t * sched = &core->scheduler;
66    thread_type_t type  = thread->type;
67
68    // take lock protecting sheduler lists
69    spinlock_lock( &sched->lock );
70
71    if( type == THREAD_USER )
72    {
[279]73        // register thread in scheduler user list
[1]74        list_add_last( &sched->u_root , &thread->sched_list );
75        sched->u_threads_nr++;
[279]76
77        // initialize u_last field if first user thread
78        if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
[1]79    }
80    else // kernel thread
81    {
[279]82        // register thread in scheduler kernel list
[1]83        list_add_last( &sched->k_root , &thread->sched_list );
84        sched->k_threads_nr++;
[279]85
86        // initialize k_last field if first kernel thread
87        if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; 
[1]88    }
89
90    // release lock
91    spinlock_unlock( &sched->lock );
92
93}  // end sched_register()
94
95/////////////////////////////////////////////
96void sched_remove_thread( thread_t * thread )
97{
98    core_t       * core  = thread->core;
99    scheduler_t  * sched = &core->scheduler;
100    thread_type_t  type  = thread->type;
101
102    // take lock protecting sheduler lists
103    spinlock_lock( &sched->lock );
104
105    if( type == THREAD_USER )
106    {
[279]107        // remove thread from user list
[1]108        list_unlink( &thread->sched_list );
109        sched->u_threads_nr--;
[279]110
111        // reset the u_last field if list empty
112        if( sched->u_threads_nr == 0 ) sched->u_last = NULL;
[1]113    }
114    else // kernel thread
115    {
[279]116        // remove thread from kernel list
[1]117        list_unlink( &thread->sched_list );
118        sched->k_threads_nr--;
[279]119
120        // reset the k_last field if list empty
121        if( sched->k_threads_nr == 0 ) sched->k_last = NULL;
[1]122    }
123
124    // release lock
125    spinlock_unlock( &sched->lock );
126
127}  // end sched_remove()
128
129///////////////////////////////////////////
130void sched_kill_thread( thread_t * thread )
131{
132    // check thread locks
133    if( thread_can_yield() == false )
134    {
[14]135        printk("\n[PANIC] in %s : thread %x in process %x on core[%x][%d]"
[1]136               " did not released all locks\n",
137               __FUNCTION__ , thread->trdid , thread->process->pid, 
[14]138               local_cxy , thread->core->lid ); 
[1]139        hal_core_sleep();
140    }
141
142    // remove thread from scheduler
143    sched_remove_thread( thread );
144
145    // reset the THREAD_SIG_KILL signal
146    thread_reset_signal( thread , THREAD_SIG_KILL );
147
148}  // end sched_kill_thread()
149
150////////////////////////////////////////
151thread_t * sched_select( core_t * core )
152{
[296]153    thread_t    * thread;
[1]154
155    scheduler_t * sched = &core->scheduler;
156
[296]157    sched_dmsg("\n[INFO] %s : enter core[%x,%d] / cycle %d\n",
158    __FUNCTION__ , local_cxy , core->lid , hal_time_stamp() );
159
[1]160    // take lock protecting sheduler lists
161    spinlock_lock( &sched->lock );
162
163    list_entry_t * current;
164    list_entry_t * last;
165
[296]166    // first : scan the kernel threads list if not empty
[279]167    if( list_is_empty( &sched->k_root ) == false )
[1]168    {
[279]169        last    = sched->k_last;
170        current = sched->k_last;
171        do
172        {
173            // get next entry in kernel list
174            current = list_next( &sched->k_root , current );
[1]175
[279]176            // skip the root that does not contain a thread
177            if( current == NULL ) current = sched->k_root.next;
[1]178
[279]179            // get thread pointer for this entry
180            thread = LIST_ELEMENT( current , thread_t , sched_list );
[1]181
[296]182            // return thread if not idle_thread and runnable
183            if( (thread->type != THREAD_IDLE) && (thread->blocked == 0) ) 
[279]184            {
185                // release lock
186                spinlock_unlock( &sched->lock );
[296]187
188                sched_dmsg("\n[INFO] %s : exit core[%x,%d] / k_thread = %x / cycle %d\n",
189                __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );
190
[279]191                return thread;
192            }
[1]193        }
[279]194        while( current != last );
[1]195    }
196
[296]197    // second : scan the user threads list if not empty
[279]198    if( list_is_empty( &sched->u_root ) == false )
[1]199    {
[279]200        last    = sched->u_last;
201        current = sched->u_last;
202        do
203        {
204            // get next entry in user list
205            current = list_next( &sched->u_root , current );
[1]206
[279]207            // skip the root that does not contain a thread
208            if( current == NULL ) current = sched->u_root.next;
[1]209
[279]210            // get thread pointer for this entry
211            thread = LIST_ELEMENT( current , thread_t , sched_list );
[1]212
[279]213            // return thread if runnable
214            if( thread->blocked == 0 )
215            {
216                // release lock
217                spinlock_unlock( &sched->lock );
[296]218
219                sched_dmsg("\n[INFO] %s : exit core[%x,%d] / u_thread = %x / cycle %d\n",
220                __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );
[279]221                return thread;
222            }
[1]223        }
[279]224        while( current != last );
[1]225    }
226
227    // release lock
228    spinlock_unlock( &sched->lock );
229
[296]230    sched_dmsg("\n[INFO] %s : exit core[%x,%d] / idle = %x / cycle %d\n",
231    __FUNCTION__ , local_cxy , core->lid , sched->idle->trdid , hal_time_stamp() );
232
[279]233    // third : return idle thread if no runnable thread
[1]234    return sched->idle;
235
[296]236}  // end sched_select()
[1]237
238//////////////////////////////////////////
239void sched_handle_signals( core_t * core )
240{
241    list_entry_t * iter;
242    thread_t     * thread;
243    scheduler_t  * sched = &core->scheduler;
244
[296]245    sched_dmsg("\n[INFO] %s : enter / thread %x on core[%x,%d]\n",
246    __FUNCTION__, CURRENT_THREAD->trdid , local_cxy , core->lid );
247
[1]248    // take lock protecting threads lists
249    spinlock_lock( &sched->lock );
250
251    // handle user threads
252    LIST_FOREACH( &sched->u_root , iter )
253    {
254        thread = LIST_ELEMENT( iter , thread_t , sched_list );
255        if( thread->signals & THREAD_SIG_KILL )  sched_kill_thread( thread );
256    }
257
258    // handle kernel threads
259    LIST_FOREACH( &sched->k_root , iter )
260    {
261        thread = LIST_ELEMENT( iter , thread_t , sched_list );
262        if( thread->signals & THREAD_SIG_KILL )  sched_kill_thread( thread );
263    }
264
265    // release lock
266    spinlock_unlock( &sched->lock );
267
[296]268    sched_dmsg("\n[INFO] %s : exit / thread %x on core[%x,%d]\n",
269    __FUNCTION__, CURRENT_THREAD->trdid , local_cxy , core->lid );
270
[1]271} // end sched_handle_signals()
272
[296]273///////////////////////////////////
274void sched_yield( thread_t * next )
[1]275{
[60]276    reg_t         sr_save;
[1]277
278    thread_t    * current = CURRENT_THREAD;
279    core_t      * core    = current->core;
[279]280    scheduler_t * sched   = &core->scheduler;
[1]281
[296]282    sched_dmsg("\n[INFO] %s : thread %x on core[%x,%d] enter / cycle %d\n",
283    __FUNCTION__, current->trdid, local_cxy, core->lid, hal_time_stamp() );
[1]284
[337]285    // delay the yield if current thread has locks
286    if( thread_can_yield() == false )
287    {
288        current->flags |= THREAD_FLAG_SCHED;
289        return;
290    }
[1]291
292    // first loop on all threads to handle pending signals
293    sched_handle_signals( core );
294
[296]295    // second loop on threads to select next thread if required
296    if( next == NULL ) next = sched_select( core );
[1]297
[296]298    // check next thread attached to same core as the calling thread
299    assert( (next->core == current->core), __FUNCTION__ , "next core != current core\n");
300
301    // check next thread not blocked
302    assert( (next->blocked == 0), __FUNCTION__ , "next thread is blocked\n");
303
304    // switch contexts and update scheduler state if next != current
305        if( next != current )
[1]306    {
[296]307        sched_dmsg("\n[INFO] %s : trd %x (%s) on core[%x,%d] => trd %x (%s) / cycle %d\n",
308        __FUNCTION__, current->trdid, thread_type_str(current->type), local_cxy, core->lid, 
309        next->trdid, thread_type_str(next->type), hal_time_stamp() );
[1]310
[296]311        // calling thread desactivate IRQs
312        hal_disable_irq( &sr_save );
[279]313
[296]314        // update scheduler
[279]315        if( current->type == THREAD_USER ) sched->u_last = &current->sched_list;
316        else                               sched->k_last = &current->sched_list;
317        sched->current = next;
[1]318
[296]319        // handle FPU
[306]320            if( next->type == THREAD_USER )
[296]321        {
322                if( next == core->fpu_owner )  hal_fpu_enable();
323                else                           hal_fpu_disable();
324        }
[1]325
[296]326        // switch contexts
[311]327        hal_cpu_context_switch( current , next );
[1]328
[296]329        // restore IRQs when calling thread resume
330        hal_restore_irq( sr_save );
331    }
332    else
333    {
334        sched_dmsg("\n[INFO] %s : thread %x on core[%x,%d] continue / cycle %d\n",
335        __FUNCTION__, current->trdid, local_cxy, core->lid, hal_time_stamp() );
336    }
[1]337}  // end sched_yield()
338
[296]339////////////////////
340void sched_display()
[1]341{
[296]342    list_entry_t * iter;
343    thread_t     * thread;
344    uint32_t       save_sr;
[1]345
[296]346    thread_t     * current = CURRENT_THREAD;
347    core_t       * core    = current->core;
348    scheduler_t  * sched   = &core->scheduler;
349   
350    // get pointers on TXT0 chdev
351    xptr_t    txt0_xp  = chdev_dir.txt[0];
352    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
353    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
[1]354
[296]355    // get extended pointer on remote TXT0 chdev lock
356    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
[1]357
[296]358    // get TXT0 lock in busy waiting mode
359    remote_spinlock_lock_busy( lock_xp , &save_sr );
360
[317]361    nolock_printk("\n***** scheduler state for core[%x,%d]\n"
[296]362           "kernel_threads = %d / user_threads = %d / current = %x\n",
363            local_cxy , core->lid, 
364            sched->k_threads_nr, sched->u_threads_nr, sched->current->trdid );
365
366    // display kernel threads
367    LIST_FOREACH( &sched->k_root , iter )
[1]368    {
[296]369        thread = LIST_ELEMENT( iter , thread_t , sched_list );
370        nolock_printk(" - type = %s / trdid = %x / pid = %x / func = %x / blocked_vect = %x\n",
371        thread_type_str( thread->type ), thread->trdid, thread->process->pid,
372        thread->entry_func, thread->blocked );
[1]373    }
374
[296]375    // display user threads
376    LIST_FOREACH( &sched->u_root , iter )
[1]377    {
[296]378        thread = LIST_ELEMENT( iter , thread_t , sched_list );
379        nolock_printk(" - type = %s / trdid = %x / pid = %x / func = %x / blocked_vect = %x\n",
380        thread_type_str( thread->type ), thread->trdid, thread->process->pid,
381        thread->entry_func, thread->blocked );
[1]382    }
383
[296]384    // release TXT0 lock
385    remote_spinlock_unlock_busy( lock_xp , save_sr );
[1]386
[296]387}  // end sched_display()
[1]388
Note: See TracBrowser for help on using the repository browser.