source: trunk/kernel/kern/scheduler.c @ 479

Last change on this file since 479 was 470, checked in by viala@…, 6 years ago

minor: add const to cause parameter in sched_yield.

We never mutate the string parameter.

Gcc warning fixed:
`
hal/tsar_mips32/drivers/soclib_tty.c:183:30:
warning: passing argument 1 of 'sched_yield' discards 'const' qualifier from pointer target type [-Wdiscarded-qualifiers]

sched_yield( "TTY_TX_FIFO full" );

~

In file included from kernel/kern/core.h:32,

from kernel/kern/thread.h:35,
from hal/tsar_mips32/drivers/soclib_tty.c:30:

kernel/kern/scheduler.h:80:26: note: expected 'char *' but argument is of type 'const char *'

void sched_yield( char * cause );

~

`

File size: 18.1 KB
RevLine 
[1]1/*
2 * scheduler.c - Core scheduler implementation.
3 *
4 * Author    Alain Greiner (2016)
5 *
6 * Copyright (c)  UPMC Sorbonne Universites
7 *
8 * This file is part of ALMOS-MKH.
9 *
10 * ALMOS-MKH. is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2.0 of the License.
13 *
14 * ALMOS-MKH. is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
[14]24#include <kernel_config.h>
[457]25#include <hal_kernel_types.h>
[407]26#include <hal_switch.h>
[1]27#include <hal_irqmask.h>
28#include <hal_context.h>
29#include <printk.h>
30#include <list.h>
31#include <core.h>
32#include <thread.h>
[296]33#include <chdev.h>
[1]34#include <scheduler.h>
35
[443]36
[296]37///////////////////////////////////////////////////////////////////////////////////////////
38// Extern global variables
39///////////////////////////////////////////////////////////////////////////////////////////
[1]40
[443]41uint32_t   idle_thread_count;
42uint32_t   idle_thread_count_active;
[296]43
[443]44extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c file
45extern uint32_t             switch_save_sr[];   // allocated in kernel_init.c file
46
[1]47////////////////////////////////
48void sched_init( core_t * core )
49{
50    scheduler_t * sched = &core->scheduler;
51
52    sched->u_threads_nr   = 0;
53    sched->k_threads_nr   = 0;
54
[279]55    sched->current        = CURRENT_THREAD;
[443]56    sched->idle           = NULL;               // initialized in kernel_init()
57    sched->u_last         = NULL;               // initialized in sched_register_thread()
58    sched->k_last         = NULL;               // initialized in sched_register_thread()
[462]59    spinlock_init(&sched->lock);
[1]60
61    // initialise threads lists
62    list_root_init( &sched->u_root );
63    list_root_init( &sched->k_root );
64
[469]65    // init spinlock
66    spinlock_init( &sched->lock );
67
[443]68    sched->req_ack_pending = false;             // no pending request
69    sched->trace           = false;             // context switches trace desactivated
[409]70
[1]71}  // end sched_init()
72
73////////////////////////////////////////////
74void sched_register_thread( core_t   * core,
75                            thread_t * thread )
76{
77    scheduler_t * sched = &core->scheduler;
78    thread_type_t type  = thread->type;
79
80    // take lock protecting sheduler lists
81    spinlock_lock( &sched->lock );
82
83    if( type == THREAD_USER )
84    {
85        list_add_last( &sched->u_root , &thread->sched_list );
86        sched->u_threads_nr++;
[279]87        if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
[1]88    }
89    else // kernel thread
90    {
91        list_add_last( &sched->k_root , &thread->sched_list );
92        sched->k_threads_nr++;
[279]93        if( sched->k_last == NULL ) sched->k_last = &thread->sched_list; 
[1]94    }
95
96    // release lock
[428]97    hal_fence();
[1]98    spinlock_unlock( &sched->lock );
99
[409]100}  // end sched_register_thread()
[1]101
[408]102//////////////////////////////////////////////
103thread_t * sched_select( scheduler_t * sched )
[1]104{
[408]105    thread_t     * thread;
106    list_entry_t * current;
107    list_entry_t * last;
[437]108    list_entry_t * root;
109    bool_t         done;
[450]110    uint32_t       count;
[1]111
112    // take lock protecting sheduler lists
113    spinlock_lock( &sched->lock );
114
[437]115    // first : scan the kernel threads list if not empty
[279]116    if( list_is_empty( &sched->k_root ) == false )
[1]117    {
[437]118        root    = &sched->k_root;
[279]119        last    = sched->k_last;
[450]120        done    = false;
121        count   = 0;
[437]122        current = last;
123
124        while( done == false )
[279]125        {
[450]126            assert( (count < sched->k_threads_nr), __FUNCTION__, "bad kernel threads list" );
127
[279]128            // get next entry in kernel list
[437]129            current = current->next;
[1]130
[437]131            // check exit condition
132            if( current == last ) done = true;
133
[279]134            // skip the root that does not contain a thread
[437]135            if( current == root ) continue;
[450]136            else                  count++;
[1]137
[279]138            // get thread pointer for this entry
139            thread = LIST_ELEMENT( current , thread_t , sched_list );
[1]140
[450]141            // select kernel thread if non blocked and non THREAD_IDLE
[440]142            if( (thread->blocked == 0)  && (thread->type != THREAD_IDLE) )
[279]143            {
[438]144                spinlock_unlock( &sched->lock );
145                return thread;
146            }
[437]147        } // end loop on kernel threads
[450]148    } // end kernel threads
[437]149
150    // second : scan the user threads list if not empty
[279]151    if( list_is_empty( &sched->u_root ) == false )
[1]152    {
[437]153        root    = &sched->u_root;
[279]154        last    = sched->u_last;
[450]155        done    = false;
156        count   = 0;
[437]157        current = last;
158
159        while( done == false )
[279]160        {
[450]161            assert( (count < sched->u_threads_nr), __FUNCTION__, "bad user threads list" );
162
[279]163            // get next entry in user list
[437]164            current = current->next;
[1]165
[437]166            // check exit condition
167            if( current == last ) done = true;
168
[279]169            // skip the root that does not contain a thread
[437]170            if( current == root ) continue;
[450]171            else                  count++;
[1]172
[279]173            // get thread pointer for this entry
174            thread = LIST_ELEMENT( current , thread_t , sched_list );
[1]175
[450]176            // select thread if non blocked
[279]177            if( thread->blocked == 0 )
178            {
179                spinlock_unlock( &sched->lock );
180                return thread;
181            }
[437]182        } // end loop on user threads
[450]183    } // end user threads
[1]184
[437]185    // third : return idle thread if no other runnable thread
[1]186    spinlock_unlock( &sched->lock );
187    return sched->idle;
188
[296]189}  // end sched_select()
[1]190
[416]191///////////////////////////////////////////
[433]192void sched_handle_signals( core_t * core )
[1]193{
[437]194
[1]195    list_entry_t * iter;
[440]196    list_entry_t * root;
[1]197    thread_t     * thread;
[428]198    process_t    * process;
[443]199    bool_t         last_thread;
[409]200
[440]201    // get pointer on scheduler
[1]202    scheduler_t  * sched = &core->scheduler;
203
[440]204    // get pointer on user threads root
205    root = &sched->u_root;
206
[1]207    // take lock protecting threads lists
208    spinlock_lock( &sched->lock );
209
[440]210    // We use a while to scan the user threads, to control the iterator increment,
211    // because some threads will be destroyed, and we cannot use a LIST_FOREACH()
212
213    // initialise list iterator
214    iter = root->next;
215
[416]216    // scan all user threads
[440]217    while( iter != root )
[1]218    {
[440]219        // get pointer on thread
[1]220        thread = LIST_ELEMENT( iter , thread_t , sched_list );
221
[440]222        // increment iterator
223        iter = iter->next;
224
[416]225        // handle REQ_ACK
226        if( thread->flags & THREAD_FLAG_REQ_ACK )
[408]227        {
[416]228            // check thread blocked
229            assert( (thread->blocked & THREAD_BLOCKED_GLOBAL) , 
230            __FUNCTION__ , "thread not blocked" );
231 
232            // decrement response counter
233            hal_atomic_add( thread->ack_rsp_count , -1 );
[408]234
[416]235            // reset REQ_ACK in thread descriptor
236            thread_reset_req_ack( thread );
[408]237        }
[416]238
239        // handle REQ_DELETE
240        if( thread->flags & THREAD_FLAG_REQ_DELETE )
241        {
[428]242            // get thread process descriptor
243            process = thread->process;
[416]244
245                // release FPU if required
246                if( thread->core->fpu_owner == thread )  thread->core->fpu_owner = NULL;
247
[428]248            // remove thread from scheduler (scheduler lock already taken)
249            uint32_t threads_nr = sched->u_threads_nr;
[440]250
[428]251            assert( (threads_nr != 0) , __FUNCTION__ , "u_threads_nr cannot be 0\n" );
[440]252
[428]253            sched->u_threads_nr = threads_nr - 1;
[416]254            list_unlink( &thread->sched_list );
[450]255            if( sched->u_last == &thread->sched_list )
256            {
257                if( threads_nr == 1 ) 
258                {
259                    sched->u_last = NULL;
260                }
261                else if( sched->u_root.next == &thread->sched_list )
262                {
263                    sched->u_last = sched->u_root.pred;
264                }
265                else
266                {
267                    sched->u_last = sched->u_root.next;
268                }
269            }
[416]270
[450]271            // delete thread descriptor
[443]272            last_thread = thread_destroy( thread );
[416]273
[438]274#if DEBUG_SCHED_HANDLE_SIGNALS
[440]275uint32_t cycle = (uint32_t)hal_get_cycles();
[438]276if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
[445]277printk("\n[DBG] %s : thread %x in process %x on core[%x,%d] deleted / cycle %d\n",
[443]278__FUNCTION__ , thread->trdid , process->pid , local_cxy , thread->core->lid , cycle );
[433]279#endif
[416]280            // destroy process descriptor if no more threads
[443]281            if( last_thread ) 
[428]282            {
283                // delete process   
284                process_destroy( process );
285
[438]286#if DEBUG_SCHED_HANDLE_SIGNALS
[433]287cycle = (uint32_t)hal_get_cycles();
[438]288if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
[443]289printk("\n[DBG] %s : process %x in cluster %x deleted / cycle %d\n",
290__FUNCTION__ , process->pid , local_cxy , cycle );
[433]291#endif
[428]292            }
[416]293        }
[1]294    }
295
296    // release lock
[428]297    hal_fence();
[1]298    spinlock_unlock( &sched->lock );
299
[433]300} // end sched_handle_signals()
[416]301
[408]302////////////////////////////////
[470]303void sched_yield( const char * cause )
[1]304{
[407]305    thread_t    * next;
[1]306    thread_t    * current = CURRENT_THREAD;
[409]307    core_t      * core    = current->core;
308    scheduler_t * sched   = &core->scheduler;
[407]309 
[438]310#if (DEBUG_SCHED_YIELD & 0x1)
[443]311if( sched->trace )
[433]312sched_display( core->lid );
[407]313#endif
[1]314
[337]315    // delay the yield if current thread has locks
[407]316    if( (current->local_locks != 0) || (current->remote_locks != 0) )
[337]317    {
318        current->flags |= THREAD_FLAG_SCHED;
319        return;
320    }
[1]321
[435]322    // enter critical section / save SR in current thread descriptor
323    hal_disable_irq( &CURRENT_THREAD->save_sr );
[408]324
[407]325    // loop on threads to select next thread
[408]326    next = sched_select( sched );
[1]327
[436]328    // check next thread kernel_stack overflow
[443]329    assert( (next->signature == THREAD_SIGNATURE), __FUNCTION__ , 
330    "kernel stack overflow for thread %x on core[%x,%d] \n", next, local_cxy, core->lid );
[436]331
[296]332    // check next thread attached to same core as the calling thread
[443]333    assert( (next->core == current->core), __FUNCTION__ , 
334    "next core %x != current core %x\n", next->core, current->core );
[296]335
[407]336    // check next thread not blocked when type != IDLE
[428]337    assert( ((next->blocked == 0) || (next->type == THREAD_IDLE)) , __FUNCTION__ ,
[407]338    "next thread %x (%s) is blocked on core[%x,%d]\n", 
[409]339    next->trdid , thread_type_str(next->type) , local_cxy , core->lid );
[296]340
341    // switch contexts and update scheduler state if next != current
342        if( next != current )
[1]343    {
344
[438]345#if DEBUG_SCHED_YIELD
[443]346if( sched->trace )
[433]347printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
[408]348"      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n",
[409]349__FUNCTION__, local_cxy, core->lid, cause, 
[443]350current, thread_type_str(current->type), current->process->pid, current->trdid,next ,
351thread_type_str(next->type) , next->process->pid , next->trdid , (uint32_t)hal_get_cycles() );
[433]352#endif
[279]353
[296]354        // update scheduler
[408]355        sched->current = next;
356        if( next->type == THREAD_USER ) sched->u_last = &next->sched_list;
357        else                            sched->k_last = &next->sched_list;
[1]358
[407]359        // handle FPU ownership
[306]360            if( next->type == THREAD_USER )
[296]361        {
[407]362                if( next == current->core->fpu_owner )  hal_fpu_enable();
363                else                                    hal_fpu_disable();
[296]364        }
[1]365
[435]366        // switch CPU from current thread context to new thread context
[407]367        hal_do_cpu_switch( current->cpu_context, next->cpu_context );
[296]368    }
369    else
370    {
[407]371
[443]372#if DEBUG_SCHED_YIELD
373if( sched->trace )
[435]374printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
375"      thread %x (%s) (%x,%x) continue / cycle %d\n",
[443]376__FUNCTION__, local_cxy, core->lid, cause, current, thread_type_str(current->type),
377current->process->pid, current->trdid, (uint32_t)hal_get_cycles() );
[428]378#endif
[407]379
[296]380    }
[408]381
[416]382    // handle pending requests for all threads executing on this core.
[433]383    sched_handle_signals( core );
[409]384
[435]385    // exit critical section / restore SR from current thread descriptor
386    hal_restore_irq( CURRENT_THREAD->save_sr );
[408]387
[1]388}  // end sched_yield()
389
[407]390
391///////////////////////////////
392void sched_display( lid_t lid )
[1]393{
[296]394    list_entry_t * iter;
395    thread_t     * thread;
396    uint32_t       save_sr;
[1]397
[436]398    assert( (lid < LOCAL_CLUSTER->cores_nr), __FUNCTION__, "illegal core index %d\n", lid);
[407]399
400    core_t       * core    = &LOCAL_CLUSTER->core_tbl[lid];
[296]401    scheduler_t  * sched   = &core->scheduler;
402   
403    // get pointers on TXT0 chdev
[407]404    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
[296]405    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
406    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
[1]407
[296]408    // get extended pointer on remote TXT0 chdev lock
409    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
[1]410
[296]411    // get TXT0 lock in busy waiting mode
412    remote_spinlock_lock_busy( lock_xp , &save_sr );
413
[437]414    nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n",
[443]415    local_cxy , core->lid, sched->current, (uint32_t)hal_get_cycles() );
[296]416
417    // display kernel threads
418    LIST_FOREACH( &sched->k_root , iter )
[1]419    {
[296]420        thread = LIST_ELEMENT( iter , thread_t , sched_list );
[408]421        if (thread->type == THREAD_DEV) 
422        {
[416]423            nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X / %s\n",
[408]424            thread_type_str( thread->type ), thread->process->pid, thread->trdid,
[416]425            thread, thread->blocked, thread->flags, thread->chdev->name );
[408]426        }
427        else
428        {
[437]429            nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
[408]430            thread_type_str( thread->type ), thread->process->pid, thread->trdid,
[437]431            thread, thread->blocked, thread->flags );
[408]432        }
[1]433    }
434
[296]435    // display user threads
436    LIST_FOREACH( &sched->u_root , iter )
[1]437    {
[296]438        thread = LIST_ELEMENT( iter , thread_t , sched_list );
[416]439        nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
[408]440        thread_type_str( thread->type ), thread->process->pid, thread->trdid,
[416]441        thread, thread->blocked, thread->flags );
[1]442    }
443
[296]444    // release TXT0 lock
445    remote_spinlock_unlock_busy( lock_xp , save_sr );
[1]446
[296]447}  // end sched_display()
[1]448
[450]449/////////////////////////////////////
450void sched_remote_display( cxy_t cxy,
451                           lid_t lid )
452{
453    thread_t     * thread;
454    uint32_t       save_sr;
455
456    // check cxy
457    bool_t undefined = cluster_is_undefined( cxy );
458    assert( (undefined == false), __FUNCTION__, "illegal cluster %x\n", cxy );
459
460    // check lid
461    uint32_t cores = hal_remote_lw( XPTR( cxy , &LOCAL_CLUSTER->cores_nr ) );
462    assert( (lid < cores), __FUNCTION__, "illegal core index %d\n", lid);
463
464    // get local pointer on target scheduler
465    core_t      * core  = &LOCAL_CLUSTER->core_tbl[lid];
466    scheduler_t * sched = &core->scheduler;
467
468    // get local pointer on current thread in target scheduler
469    thread_t * current = hal_remote_lpt( XPTR( cxy, &sched->current ) );
470
471    // get local pointer on the first kernel and user threads list_entry
472    list_entry_t * k_entry = hal_remote_lpt( XPTR( cxy , &sched->k_root.next ) );
473    list_entry_t * u_entry = hal_remote_lpt( XPTR( cxy , &sched->u_root.next ) );
474   
475    // get pointers on TXT0 chdev
476    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
477    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
478    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
479
480    // get extended pointer on remote TXT0 chdev lock
481    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
482
483    // get TXT0 lock in busy waiting mode
484    remote_spinlock_lock_busy( lock_xp , &save_sr );
485
486    // display header
487    nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n",
488    cxy , lid, current, (uint32_t)hal_get_cycles() );
489
490    // display kernel threads
491    while( k_entry != &sched->k_root )
492    {
493        // get local pointer on kernel_thread
494        thread = LIST_ELEMENT( k_entry , thread_t , sched_list );
495
496        // get relevant thead info
497        thread_type_t type    = hal_remote_lw ( XPTR( cxy , &thread->type ) );
498        trdid_t       trdid   = hal_remote_lw ( XPTR( cxy , &thread->trdid ) );
499        uint32_t      blocked = hal_remote_lw ( XPTR( cxy , &thread->blocked ) );
500        uint32_t      flags   = hal_remote_lw ( XPTR( cxy , &thread->flags ) );
501        process_t *   process = hal_remote_lpt( XPTR( cxy , &thread->process ) );
502        pid_t         pid     = hal_remote_lw ( XPTR( cxy , &process->pid ) );
503
504        // display thread info
505        if (type == THREAD_DEV) 
506        {
507            char      name[16];
508            chdev_t * chdev = hal_remote_lpt( XPTR( cxy , &thread->chdev ) );
509            hal_remote_strcpy( XPTR( local_cxy , name ), XPTR( cxy , &chdev->name ) );
510
511            nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X / %s\n",
512            thread_type_str( type ), pid, trdid, thread, blocked, flags, name );
513        }
514        else
515        {
516            nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
517            thread_type_str( type ), pid, trdid, thread, blocked, flags );
518        }
519
520        // get next remote kernel thread list_entry
521        k_entry = hal_remote_lpt( XPTR( cxy , &k_entry->next ) );
522    }
523
524    // display user threads
525    while( u_entry != &sched->u_root )
526    {
527        // get local pointer on user_thread
528        thread = LIST_ELEMENT( u_entry , thread_t , sched_list );
529
530        // get relevant thead info
531        thread_type_t type    = hal_remote_lw ( XPTR( cxy , &thread->type ) );
532        trdid_t       trdid   = hal_remote_lw ( XPTR( cxy , &thread->trdid ) );
533        uint32_t      blocked = hal_remote_lw ( XPTR( cxy , &thread->blocked ) );
534        uint32_t      flags   = hal_remote_lw ( XPTR( cxy , &thread->flags ) );
535        process_t *   process = hal_remote_lpt( XPTR( cxy , &thread->process ) );
536        pid_t         pid     = hal_remote_lw ( XPTR( cxy , &process->pid ) );
537
538        nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
539        thread_type_str( type ), pid, trdid, thread, blocked, flags );
540
541        // get next user thread list_entry
542        u_entry = hal_remote_lpt( XPTR( cxy , &u_entry->next ) );
543    }
544
545    // release TXT0 lock
546    remote_spinlock_unlock_busy( lock_xp , save_sr );
547
548}  // end sched_remote_display()
549
Note: See TracBrowser for help on using the repository browser.