Ignore:
Timestamp:
Dec 20, 2017, 4:51:09 PM (7 years ago)
Author:
alain
Message:

Fix bugs in exec

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/scheduler.c

    r408 r409  
    5858    list_root_init( &sched->k_root );
    5959
     60    sched->sig_pending    = false;            // no pending signal
     61
    6062}  // end sched_init()
    6163
     
    7274    if( type == THREAD_USER )
    7375    {
    74         // register thread in scheduler user list
    7576        list_add_last( &sched->u_root , &thread->sched_list );
    7677        sched->u_threads_nr++;
    77 
    78         // initialize u_last field if first user thread
    7978        if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
    8079    }
    8180    else // kernel thread
    8281    {
    83         // register thread in scheduler kernel list
    8482        list_add_last( &sched->k_root , &thread->sched_list );
    8583        sched->k_threads_nr++;
    86 
    87         // initialize k_last field if first kernel thread
    8884        if( sched->k_last == NULL ) sched->k_last = &thread->sched_list;
    8985    }
     
    9288    spinlock_unlock( &sched->lock );
    9389
    94 }  // end sched_register()
     90}  // end sched_register_thread()
    9591
    9692/////////////////////////////////////////////
    9793void sched_remove_thread( thread_t * thread )
    9894{
    99     core_t       * core  = thread->core;
    100     scheduler_t  * sched = &core->scheduler;
    101     thread_type_t  type  = thread->type;
     95    scheduler_t * sched = &thread->core->scheduler;
     96    thread_type_t type  = thread->type;
    10297
    10398    // take lock protecting sheduler lists
     
    106101    if( type == THREAD_USER )
    107102    {
    108         // remove thread from user list
    109103        list_unlink( &thread->sched_list );
    110104        sched->u_threads_nr--;
    111 
    112         // reset the u_last field if list empty
    113105        if( sched->u_threads_nr == 0 ) sched->u_last = NULL;
    114106    }
    115     else // kernel thread
    116     {
    117         // remove thread from kernel list
     107    else // kernel thread
     108    {
    118109        list_unlink( &thread->sched_list );
    119110        sched->k_threads_nr--;
    120 
    121         // reset the k_last field if list empty
    122111        if( sched->k_threads_nr == 0 ) sched->k_last = NULL;
    123112    }
    124113
    125     // release lock
     114    // release lock 
    126115    spinlock_unlock( &sched->lock );
    127116
    128 }  // end sched_remove()
     117}  // end sched_remove_thread()
    129118
    130119//////////////////////////////////////////////
     
    214203}  // end sched_select()
    215204
    216 ///////////////////////////////////////////
    217 void sched_kill_thread( thread_t * thread )
    218 {
    219     // check locks
    220     if( thread_can_yield() == false )
    221     {
    222         panic("locks not released for thread %x in process %x on core[%x][%d]",
    223         thread->trdid , thread->process->pid, local_cxy , thread->core->lid );
    224     }
    225 
    226     // remove thread from scheduler
    227     sched_remove_thread( thread );
    228 
    229     // reset the THREAD_SIG_KILL signal
    230     thread_reset_signal( thread , THREAD_SIG_KILL );
    231 
    232     // detached thread can suicide
    233     if( thread->signals & THREAD_SIG_SUICIDE )
    234     {
    235         assert( (thread->flags & THREAD_FLAG_DETACHED), __FUNCTION__,
    236         "thread must be detached in case of suicide\n" );
    237 
    238         // remove thread from process
    239         process_remove_thread( thread );
    240 
    241         // release memory for thread descriptor
    242         thread_destroy( thread );
    243     }
    244 }  // end sched_kill_thread()
    245 
    246205//////////////////////////////////////////
    247206void sched_handle_signals( core_t * core )
     
    249208    list_entry_t * iter;
    250209    thread_t     * thread;
     210
    251211    scheduler_t  * sched = &core->scheduler;
    252 
    253 // signal_dmsg("\n@@@ %s enter at cycle %d\n",
    254 // __FUNCTION__ , hal_time_stamp() );
    255212
    256213    // take lock protecting threads lists
     
    261218    {
    262219        thread = LIST_ELEMENT( iter , thread_t , sched_list );
    263         if( thread->signals ) // sched_kill_thread( thread );
    264         {
    265             printk("\n[WARNING] %s : thread %x has signal %x at cycle %d\n",
    266             __FUNCTION__, thread, thread->signals, hal_time_stamp() );
    267         }
    268     }
    269 
    270     // handle kernel threads
    271     LIST_FOREACH( &sched->k_root , iter )
    272     {
    273         thread = LIST_ELEMENT( iter , thread_t , sched_list );
    274         if( thread->signals )  // sched_kill_thread( thread );
    275         {
    276             printk("\n[WARNING] %s : thread %x has signal %x at cycle %d\n",
    277             __FUNCTION__, thread, thread->signals, hal_time_stamp() );
    278 
     220
     221        if( thread->flags & THREAD_FLAG_SIGNAL )  // thread has signal
     222        {
     223            // decrement response counter to acknowledge signal
     224            hal_atomic_add( thread->sig_rsp_count , -1 );
     225
     226            // reset signal
     227            thread_reset_signal( thread );
    279228        }
    280229    }
     
    283232    spinlock_unlock( &sched->lock );
    284233
    285 // signal_dmsg("\n@@@ %s exit at cycle %d\n",
    286 // __FUNCTION__ , hal_time_stamp() );
    287 
    288234} // end sched_handle_signals()
    289235
     
    293239    thread_t    * next;
    294240    thread_t    * current = CURRENT_THREAD;
    295     scheduler_t * sched   = &current->core->scheduler;
     241    core_t      * core    = current->core;
     242    scheduler_t * sched   = &core->scheduler;
    296243 
    297244#if( CONFIG_SCHED_DEBUG & 0x1 )
    298 if( hal_time_stamp() > CONFIG_SCHED_DEBUG ) sched_display( current->core->lid );
     245if( hal_time_stamp() > CONFIG_SCHED_DEBUG ) sched_display( core->lid );
    299246#endif
    300247
     
    319266    assert( (next->blocked == 0) || (next->type = THREAD_IDLE) , __FUNCTION__ ,
    320267    "next thread %x (%s) is blocked on core[%x,%d]\n",
    321     next->trdid , thread_type_str(next->type) , local_cxy , current->core->lid );
     268    next->trdid , thread_type_str(next->type) , local_cxy , core->lid );
    322269
    323270    // switch contexts and update scheduler state if next != current
     
    327274sched_dmsg("\n[DBG] %s : core[%x,%d] / cause = %s\n"
    328275"      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n",
    329 __FUNCTION__, local_cxy, current->core->lid, cause,
     276__FUNCTION__, local_cxy, core->lid, cause,
    330277current, thread_type_str(current->type), current->process->pid, current->trdid,
    331278next   , thread_type_str(next->type)   , next->process->pid   , next->trdid,
     
    352299sched_dmsg("\n[DBG] %s : core[%x,%d] / cause = %s\n"
    353300"      thread %x (%s) (%x,%x) continue / cycle %d\n",
    354 __FUNCTION__, local_cxy, current->core->lid, cause,
     301__FUNCTION__, local_cxy, core->lid, cause,
    355302current, thread_type_str(current->type), current->process->pid, current->trdid,
    356303(uint32_t)hal_get_cycles() );
    357304
    358305    }
     306
     307    // handle signals for all threads executing on this core.
     308    sched_handle_signals( core );
    359309
    360310    // exit critical section / restore SR from next thread context
Note: See TracChangeset for help on using the changeset viewer.