Ignore:
Timestamp:
Jul 31, 2017, 1:59:52 PM (7 years ago)
Author:
alain
Message:

Several modifs in the generic scheduler and in the hal_context to
fix the context switch mechanism.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/scheduler.c

    r279 r296  
    3030#include <core.h>
    3131#include <thread.h>
     32#include <chdev.h>
    3233#include <scheduler.h>
     34
     35///////////////////////////////////////////////////////////////////////////////////////////
     36// Extern global variables
     37///////////////////////////////////////////////////////////////////////////////////////////
     38
     39extern chdev_directory_t    chdev_dir;            // allocated in kernel_init.c file
    3340
    3441
     
    144151thread_t * sched_select( core_t * core )
    145152{
    146     thread_t * thread;
     153    thread_t    * thread;
    147154
    148155    scheduler_t * sched = &core->scheduler;
     156
     157    sched_dmsg("\n[INFO] %s : enter core[%x,%d] / cycle %d\n",
     158    __FUNCTION__ , local_cxy , core->lid , hal_time_stamp() );
    149159
    150160    // take lock protecting sheduler lists
     
    154164    list_entry_t * last;
    155165
    156     // first : scan the kernel threads list,
    157     // only if this list is not empty
     166    // first : scan the kernel threads list if not empty
    158167    if( list_is_empty( &sched->k_root ) == false )
    159168    {
     
    171180            thread = LIST_ELEMENT( current , thread_t , sched_list );
    172181
    173             // return thread if runnable
    174             if( thread->blocked == 0 )
     182            // return thread if not idle_thread and runnable
     183            if( (thread->type != THREAD_IDLE) && (thread->blocked == 0) )
    175184            {
    176185                // release lock
    177186                spinlock_unlock( &sched->lock );
     187
     188                sched_dmsg("\n[INFO] %s : exit core[%x,%d] / k_thread = %x / cycle %d\n",
     189                __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );
     190
    178191                return thread;
    179192            }
     
    182195    }
    183196
    184     // second : scan the user threads list,
    185     // only if this list is not empty
     197    // second : scan the user threads list if not empty
    186198    if( list_is_empty( &sched->u_root ) == false )
    187199    {
     
    204216                // release lock
    205217                spinlock_unlock( &sched->lock );
     218
     219                sched_dmsg("\n[INFO] %s : exit core[%x,%d] / u_thread = %x / cycle %d\n",
     220                __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );
    206221                return thread;
    207222            }
     
    213228    spinlock_unlock( &sched->lock );
    214229
     230    sched_dmsg("\n[INFO] %s : exit core[%x,%d] / idle = %x / cycle %d\n",
     231    __FUNCTION__ , local_cxy , core->lid , sched->idle->trdid , hal_time_stamp() );
     232
    215233    // third : return idle thread if no runnable thread
    216234    return sched->idle;
    217235
    218 }  // end sched_elect()
     236}  // end sched_select()
    219237
    220238//////////////////////////////////////////
     
    223241    list_entry_t * iter;
    224242    thread_t     * thread;
    225 
    226243    scheduler_t  * sched = &core->scheduler;
     244
     245    sched_dmsg("\n[INFO] %s : enter / thread %x on core[%x,%d]\n",
     246    __FUNCTION__, CURRENT_THREAD->trdid , local_cxy , core->lid );
    227247
    228248    // take lock protecting threads lists
     
    246266    spinlock_unlock( &sched->lock );
    247267
     268    sched_dmsg("\n[INFO] %s : exit / thread %x on core[%x,%d]\n",
     269    __FUNCTION__, CURRENT_THREAD->trdid , local_cxy , core->lid );
     270
    248271} // end sched_handle_signals()
    249272
    250 //////////////////
    251 void sched_yield()
     273///////////////////////////////////
     274void sched_yield( thread_t * next )
    252275{
    253276    reg_t         sr_save;
    254     thread_t    * next;
    255277
    256278    thread_t    * current = CURRENT_THREAD;
     
    258280    scheduler_t * sched   = &core->scheduler;
    259281
    260     if( thread_can_yield() == false )
    261     {
    262         printk("\n[PANIC] in %s : thread %x for process %x on core_gid %x"
    263                " has not released all locks at cycle %d\n",
    264                __FUNCTION__, current->trdid, current->process->pid,
    265                local_cxy , core->lid , hal_get_cycles() );
    266         hal_core_sleep();
    267     }
    268 
    269     // desactivate IRQs
    270     hal_disable_irq( &sr_save );
     282    sched_dmsg("\n[INFO] %s : thread %x on core[%x,%d] enter / cycle %d\n",
     283    __FUNCTION__, current->trdid, local_cxy, core->lid, hal_time_stamp() );
     284
     285    // check calling thread released all locks
     286    assert( (thread_can_yield() == true), __FUNCTION__, "locks not released\n");
    271287
    272288    // first loop on all threads to handle pending signals
    273289    sched_handle_signals( core );
    274290
    275     // second loop on threads to select next thread
    276     next = sched_select( core );
    277 
    278     // check stack overflow for selected thread
    279     if( next->signature != THREAD_SIGNATURE )
    280     {
    281         printk("\n[PANIC] in %s : detected stack overflow for thread %x of process %x"
    282                " on core [%x][%d]\n",
    283                __FUNCTION__, next->trdid, next->process->pid, local_cxy , core->lid );
    284         hal_core_sleep();
    285         }
    286        
    287         sched_dmsg("\n[INFO] %s on core %d in cluster %x / old thread = %x / new thread = %x\n",
    288                __FUNCTION__, core->lid, local_cxy, current->trdid, next->trdid );
    289 
    290     // switch contexts and update scheduler state if new thread
    291         if( next != current ) 
    292         {
    293         hal_cpu_context_save( current );
    294         hal_cpu_context_restore( next );
    295 
     291    // second loop on threads to select next thread if required
     292    if( next == NULL ) next = sched_select( core );
     293
     294    // check next thread attached to same core as the calling thread
     295    assert( (next->core == current->core), __FUNCTION__ , "next core != current core\n");
     296
     297    // check next thread not blocked
     298    assert( (next->blocked == 0), __FUNCTION__ , "next thread is blocked\n");
     299
     300    // switch contexts and update scheduler state if next != current
     301        if( next != current )
     302    {
     303        sched_dmsg("\n[INFO] %s : trd %x (%s) on core[%x,%d] => trd %x (%s) / cycle %d\n",
     304        __FUNCTION__, current->trdid, thread_type_str(current->type), local_cxy, core->lid,
     305        next->trdid, thread_type_str(next->type), hal_time_stamp() );
     306
     307        // calling thread desactivate IRQs
     308        hal_disable_irq( &sr_save );
     309
     310        // update scheduler
    296311        if( current->type == THREAD_USER ) sched->u_last = &current->sched_list;
    297312        else                               sched->k_last = &current->sched_list;
    298 
    299313        sched->current = next;
    300         }
    301 
    302     // restore IRQs
    303     hal_restore_irq( sr_save );
    304 
    305         if( current->type != THREAD_USER ) return;
    306 
    307         if( next == core->fpu_owner ) hal_fpu_enable();
    308         else                          hal_fpu_disable();
    309 
     314
     315        // handle FPU
     316            if( next->type == THREAD_USER ) return;
     317        {
     318                if( next == core->fpu_owner )  hal_fpu_enable();
     319                else                           hal_fpu_disable();
     320        }
     321
     322        // switch contexts
     323        hal_cpu_context_save( current->cpu_context );
     324        hal_cpu_context_restore( next->cpu_context );
     325
     326        // restore IRQs when calling thread resume
     327        hal_restore_irq( sr_save );
     328
     329        sched_dmsg("\n[INFO] %s : thread %x on core[%x,%d] / cycle %d\n",
     330        __FUNCTION__, current->trdid, local_cxy, core->lid, hal_time_stamp() );
     331    }
     332    else
     333    {
     334        sched_dmsg("\n[INFO] %s : thread %x on core[%x,%d] continue / cycle %d\n",
     335        __FUNCTION__, current->trdid, local_cxy, core->lid, hal_time_stamp() );
     336    }
    310337}  // end sched_yield()
    311338
    312 //////////////////////////////////////
    313 void sched_switch_to( thread_t * new )
    314 {
    315     reg_t         sr_save;
    316 
    317     thread_t    * current = CURRENT_THREAD;
    318     core_t      * core    = current->core;
    319     process_t   * process = current->process;
    320 
    321     // check calling thread released all locks
    322     if( thread_can_yield() == false )
    323     {
    324         printk("\n[PANIC] in %s : thread %x for process %x on core %d in cluster %x"
    325                " has not released all locks\n",
    326                __FUNCTION__, current->trdid, process->pid, core->lid, local_cxy );
    327         hal_core_sleep();
    328     }
    329 
    330     // check new thread attached to same core as the calling thread
    331     if( new->core != current->core )
    332     {
    333         printk("\n[PANIC] in %s : new thread %x is attached to core %d"
    334                " different from core %d of current thread\n",
    335                __FUNCTION__, new->trdid, new->core->lid, core->lid , current->trdid );
    336         hal_core_sleep();
    337     }
    338 
    339     // check new thread not blocked
    340     if( new->blocked == 0 )
    341     {
    342         printk("\n[PANIC] in %s for thread %x of process %x on core %d in cluster %x"
    343                " : new thread %x is blocked\n",
    344                __FUNCTION__, current->trdid, process->pid , core->lid, local_cxy , new->trdid );
    345         hal_core_sleep();
    346     }
    347 
    348     // check stack overflow for new thread
    349     if( new->signature != THREAD_SIGNATURE )
    350     {
    351         printk("\n[PANIC] in %s : stack overflow for new thread %x of process %x"
    352                " on core %d in cluster %x\n",
    353                __FUNCTION__, new->trdid, process->pid , core->lid , local_cxy );
    354         hal_core_sleep();
    355         }
    356 
    357     // desactivate IRQs
    358     hal_disable_irq( &sr_save );
    359 
    360     // loop on all threads to handle pending signals
    361     sched_handle_signals( core );
    362 
    363     // check stack overflow for new thread
    364     if( new->signature != THREAD_SIGNATURE )
    365     {
    366         printk("PANIC %s detected stack overflow for thread %x of process %x"
    367                " on core %d in cluster %x\n",
    368                __FUNCTION__, new->trdid, new->process->pid, core->lid, local_cxy);
    369         hal_core_sleep();
    370         }
    371        
    372         sched_dmsg("INFO : %s on core %d in cluster %x / old thread = %x / new thread = %x\n",
    373                __FUNCTION__, core->lid, local_cxy, current->trdid, new->trdid );
    374 
    375     // switch contexts if new thread
    376     hal_cpu_context_save( current );
    377     hal_cpu_context_restore( new );
    378 
    379     // restore IRQs
    380     hal_restore_irq( sr_save );
    381 
    382         if( current->type != THREAD_USER ) return;
    383 
    384         if( current == core->fpu_owner )  hal_fpu_enable();
    385         else                              hal_fpu_disable();
    386 
    387 }  // end sched_switch_to()
    388 
     339////////////////////
     340void sched_display()
     341{
     342    list_entry_t * iter;
     343    thread_t     * thread;
     344    uint32_t       save_sr;
     345
     346    thread_t     * current = CURRENT_THREAD;
     347    core_t       * core    = current->core;
     348    scheduler_t  * sched   = &core->scheduler;
     349   
     350    // get pointers on TXT0 chdev
     351    xptr_t    txt0_xp  = chdev_dir.txt[0];
     352    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
     353    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     354
     355    // get extended pointer on remote TXT0 chdev lock
     356    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     357
     358    // get TXT0 lock in busy waiting mode
     359    remote_spinlock_lock_busy( lock_xp , &save_sr );
     360
     361    nolock_printk("\n********** scheduler state for core[%x,%d] **********************\n"
     362           "kernel_threads = %d / user_threads = %d / current = %x\n",
     363            local_cxy , core->lid,
     364            sched->k_threads_nr, sched->u_threads_nr, sched->current->trdid );
     365
     366    // display kernel threads
     367    LIST_FOREACH( &sched->k_root , iter )
     368    {
     369        thread = LIST_ELEMENT( iter , thread_t , sched_list );
     370        nolock_printk(" - type = %s / trdid = %x / pid = %x / func = %x / blocked_vect = %x\n",
     371        thread_type_str( thread->type ), thread->trdid, thread->process->pid,
     372        thread->entry_func, thread->blocked );
     373    }
     374
     375    // display user threads
     376    LIST_FOREACH( &sched->u_root , iter )
     377    {
     378        thread = LIST_ELEMENT( iter , thread_t , sched_list );
     379        nolock_printk(" - type = %s / trdid = %x / pid = %x / func = %x / blocked_vect = %x\n",
     380        thread_type_str( thread->type ), thread->trdid, thread->process->pid,
     381        thread->entry_func, thread->blocked );
     382    }
     383
     384    // release TXT0 lock
     385    remote_spinlock_unlock_busy( lock_xp , save_sr );
     386
     387}  // end sched_display()
     388
Note: See TracChangeset for help on using the changeset viewer.