Ignore:
Timestamp:
Nov 1, 2018, 12:10:42 PM (6 years ago)
Author:
alain
Message:

Improve signals.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/scheduler.c

    r582 r583  
    4040
    4141extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
     42extern process_t            process_zero;       // allocated in kernel_init.c
    4243
    4344///////////////////////////////////////////////////////////////////////////////////////////
     
    8384
    8485// check kernel threads list
    85 assert( (count < sched->k_threads_nr),
    86 "bad kernel threads list" );
     86assert( (count < sched->k_threads_nr), "bad kernel threads list" );
    8787
    8888            // get next entry in kernel list
     
    118118
    119119// check user threads list
    120 assert( (count < sched->u_threads_nr),
    121 "bad user threads list" );
     120assert( (count < sched->u_threads_nr), "bad user threads list" );
    122121
    123122            // get next entry in user list
     
    146145
    147146////////////////////////////////////////////////////////////////////////////////////////////
    148 // This static function is the only function that can remove a thread from the scheduler.
     147// This static function is the only function that can actually delete a thread.
    149148// It is private, because it is called by the sched_yield() public function.
    150149// It scan all threads attached to a given scheduler, and executes the relevant
    151 // actions for pending requests:
     150// actions for two types of pending requests:
    152151// - REQ_ACK : it checks that target thread is blocked, decrements the response counter
    153152//   to acknowledge the client thread, and reset the pending request.
    154 // - REQ_DELETE : it detach the target thread from parent if attached, detach it from
    155 //   the process, remove it from scheduler, release memory allocated to thread descriptor,
    156 //   and destroy the process descriptor it the target thread was the last thread.
     153// - REQ_DELETE : it removes the target thread from the process th_tbl[], remove it
     154//   from the scheduler list, and release the memory allocated to thread descriptor.
     155//   For an user thread, it destroys the process descriptor it the target thread is
     156//   the last thread in the local process descriptor.
     157//
     158// Implementation note:
     159// We use a while to scan the threads in scheduler lists, because some threads can
     160// be destroyed, and we want not use a LIST_FOREACH()
    157161////////////////////////////////////////////////////////////////////////////////////////////
    158162// @ core    : local pointer on the core descriptor.
     
    166170    process_t    * process;
    167171    scheduler_t  * sched;
    168     bool_t         last;
     172    uint32_t       threads_nr;   // number of threads in scheduler list
     173    ltid_t         ltid;         // thread local index
     174    uint32_t       count;        // number of threads in local process
    169175
    170176    // get pointer on scheduler
    171177    sched = &core->scheduler;
    172178
    173     // get pointer on user threads root
     179    ////// scan user threads to handle both ACK and DELETE requests
    174180    root = &sched->u_root;
    175 
    176     // We use a while to scan the user threads, to control the iterator increment,
    177     // because some threads will be destroyed, and we want not use a LIST_FOREACH()
    178 
    179     // initialise list iterator
    180181    iter = root->next;
    181 
    182     // scan all user threads
    183182    while( iter != root )
    184183    {
     
    210209            process = thread->process;
    211210
    212                 // release FPU if required
    213                 if( thread->core->fpu_owner == thread )  thread->core->fpu_owner = NULL;
    214 
    215             // take lock protecting sheduler state
     211            // get thread ltid
     212            ltid = LTID_FROM_TRDID( thread->trdid);
     213
     214            // take the lock protecting th_tbl[]
     215            rwlock_wr_acquire( &process->th_lock );
     216
     217            // take the lock protecting sheduler state
    216218            busylock_acquire( &sched->lock );
    217219
    218220            // update scheduler state
    219             uint32_t threads_nr = sched->u_threads_nr;
     221            threads_nr = sched->u_threads_nr;
    220222            sched->u_threads_nr = threads_nr - 1;
    221223            list_unlink( &thread->sched_list );
     
    236238            }
    237239
    238             // release lock protecting scheduler state
     240            // release the lock protecting sheduler state
    239241            busylock_release( &sched->lock );
    240242
    241             // delete thread descriptor
    242             last = thread_destroy( thread );
     243            // get number of threads in local process
     244            count = process->th_nr;
     245
     246// check th_nr value
     247assert( (count > 0) , "process th_nr cannot be 0\n" );
     248
     249            // remove thread from process th_tbl[]
     250            process->th_tbl[ltid] = NULL;
     251            process->th_nr = count - 1;
     252 
     253            // release the lock protecting th_tbl[]
     254            rwlock_wr_release( &process->th_lock );
     255
     256            // release memory allocated for thread descriptor
     257            thread_destroy( thread );
    243258
    244259#if DEBUG_SCHED_HANDLE_SIGNALS
    245260uint32_t cycle = (uint32_t)hal_get_cycles();
    246261if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
    247 printk("\n[DBG] %s : thread %x in process %x on core[%x,%d] deleted / cycle %d\n",
    248 __FUNCTION__ , thread->trdid , process->pid , local_cxy , thread->core->lid , cycle );
     262printk("\n[DBG] %s : thread[%x,%x] on core[%x,%d] deleted / cycle %d\n",
     263__FUNCTION__ , process->pid , thread->trdid , local_cxy , thread->core->lid , cycle );
    249264#endif
    250             // destroy process descriptor if no more threads
    251             if( last )
     265            // destroy process descriptor if last thread
     266            if( count == 1 )
    252267            {
    253268                // delete process   
     
    262277            }
    263278        }
     279    }  // end user threads
     280
     281    ////// scan kernel threads for DELETE only
     282    root = &sched->k_root;
     283    iter = root->next;
     284    while( iter != root )
     285    {
     286        // get pointer on thread
     287        thread = LIST_ELEMENT( iter , thread_t , sched_list );
     288
     289        // increment iterator
     290        iter = iter->next;
     291
     292        // handle REQ_DELETE only if target thread != calling thread
     293        if( (thread->flags & THREAD_FLAG_REQ_DELETE) && (thread != CURRENT_THREAD) )
     294        {
     295
     296// check process descriptor is local kernel process
     297assert( ( thread->process == &process_zero ) , "illegal process descriptor\n");
     298
     299            // get thread ltid
     300            ltid = LTID_FROM_TRDID( thread->trdid);
     301
     302            // take the lock protecting th_tbl[]
     303            rwlock_wr_acquire( &process_zero.th_lock );
     304
     305            // take lock protecting sheduler state
     306            busylock_acquire( &sched->lock );
     307
     308            // update scheduler state
     309            threads_nr = sched->k_threads_nr;
     310            sched->k_threads_nr = threads_nr - 1;
     311            list_unlink( &thread->sched_list );
     312            if( sched->k_last == &thread->sched_list )
     313            {
     314                if( threads_nr == 1 )
     315                {
     316                    sched->k_last = NULL;
     317                }
     318                else if( sched->k_root.next == &thread->sched_list )
     319                {
     320                    sched->k_last = sched->k_root.pred;
     321                }
     322                else
     323                {
     324                    sched->k_last = sched->k_root.next;
     325                }
     326            }
     327
     328            // release lock protecting scheduler state
     329            busylock_release( &sched->lock );
     330
     331            // get number of threads in local kernel process
     332            count = process_zero.th_nr;
     333
     334// check th_nr value
     335assert( (count > 0) , "kernel process th_nr cannot be 0\n" );
     336
     337            // remove thread from process th_tbl[]
     338            process_zero.th_tbl[ltid] = NULL;
     339            process_zero.th_nr = count - 1;
     340 
     341            // release the lock protecting th_tbl[]
     342            rwlock_wr_release( &process_zero.th_lock );
     343
     344            // delete thread descriptor
     345            thread_destroy( thread );
     346
     347#if DEBUG_SCHED_HANDLE_SIGNALS
     348uint32_t cycle = (uint32_t)hal_get_cycles();
     349if( DEBUG_SCHED_HANDLE_SIGNALS < cycle )
     350printk("\n[DBG] %s : thread[%x,%x] on core[%x,%d] deleted / cycle %d\n",
     351__FUNCTION__ , process_zero.pid , thread->trdid , local_cxy , thread->core->lid , cycle );
     352#endif
     353        }
    264354    }
    265355} // end sched_handle_signals()
     
    268358// This static function is called by the sched_yield function when the RFC_FIFO
    269359// associated to the core is not empty.
    270 // It checks if it exists an idle (blocked) RPC thread for this core, and unblock
    271 // it if found. It creates a new RPC thread if no idle RPC thread is found.
     360// It search an idle RPC thread for this core, and unblock it if found.
     361// It creates a new RPC thread if no idle RPC thread is found.
    272362////////////////////////////////////////////////////////////////////////////////////////////
    273363// @ sched   : local pointer on scheduler.
     
    285375    {
    286376        thread = LIST_ELEMENT( iter , thread_t , sched_list );
    287         if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) )
    288         {
    289             // exit loop
     377
     378        if( (thread->type == THREAD_RPC) &&
     379            (thread->blocked == THREAD_BLOCKED_IDLE ) )
     380        {
    290381            found = true;
    291382            break;
     
    303394        if ( error )
    304395        {
    305             printk("\n[WARNING] in %s : no memory to create a RPC thread in cluster %x\n",
     396            printk("\n[ERROR] in %s : no memory to create a RPC thread in cluster %x\n",
    306397            __FUNCTION__, local_cxy );
    307398        }
     
    317408uint32_t cycle = (uint32_t)hal_get_cycles();
    318409if( DEBUG_SCHED_RPC_ACTIVATE < cycle )
    319 printk("\n[DBG] %s : new RPC thread %x created for core[%x,%d] / cycle %d\n",
    320 __FUNCTION__, thread->trdid, local_cxy, lid, cycle );
     410printk("\n[DBG] %s : new RPC thread %x created for core[%x,%d] / total %d / cycle %d\n",
     411__FUNCTION__, thread->trdid, local_cxy, lid, LOCAL_CLUSTER->rpc_threads[lid], cycle );
    321412#endif
    322413        }
     
    476567        busylock_release( &sched->lock );
    477568
    478 #if DEBUG_SCHED_YIELD
     569#if (DEBUG_SCHED_YIELD & 1)
    479570if( sched->trace )
    480571printk("\n[DBG] %s : core[%x,%d] / cause = %s\n"
     
    519610    remote_busylock_acquire( lock_xp );
    520611
    521     nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n",
    522     local_cxy , core->lid, sched->current, (uint32_t)hal_get_cycles() );
     612    nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n",
     613    local_cxy , core->lid, sched->current, LOCAL_CLUSTER->rpc_threads[lid],
     614    (uint32_t)hal_get_cycles() );
    523615
    524616    // display kernel threads
     
    564656"illegal cluster %x\n", cxy );
    565657
    566 // check lid
    567658assert( (lid < hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->cores_nr ) ) ),
    568659"illegal core index %d\n", lid );
     
    590681    remote_busylock_acquire( lock_xp );
    591682
     683    // get rpc_threads
     684    uint32_t rpcs = hal_remote_l32( XPTR( cxy , &LOCAL_CLUSTER->rpc_threads[lid] ) );
     685 
    592686    // display header
    593     nolock_printk("\n***** threads on core[%x,%d] / current %x / cycle %d\n",
    594     cxy , lid, current, (uint32_t)hal_get_cycles() );
     687    nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n",
     688    cxy , lid, current, rpcs, (uint32_t)hal_get_cycles() );
    595689
    596690    // display kernel threads
Note: See TracChangeset for help on using the changeset viewer.