source: trunk/kernel/kern/thread.c @ 134

Last change on this file since 134 was 101, checked in by alain, 7 years ago

euh...

File size: 27.4 KB
RevLine 
[1]1/*
2 * thread.c -  implementation of thread operations (user & kernel)
3 *
4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
[23]5 *         Alain Greiner (2016,2017)
[1]6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
[5]9 * This file is part of ALMOS-MKH.
[1]10 *
[5]11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
[5]15 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
[5]21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[1]26#include <hal_types.h>
27#include <hal_context.h>
28#include <hal_irqmask.h>
29#include <hal_special.h>
30#include <hal_remote.h>
31#include <memcpy.h>
32#include <printk.h>
33#include <cluster.h>
34#include <process.h>
35#include <scheduler.h>
36#include <dev_icu.h>
37#include <core.h>
38#include <list.h>
39#include <xlist.h>
40#include <page.h>
41#include <kmem.h>
42#include <ppm.h>
43#include <thread.h>
44
45//////////////////////////////////////////////////////////////////////////////////////
46// Extern global variables
47//////////////////////////////////////////////////////////////////////////////////////
48
49extern process_t      process_zero;
50
51//////////////////////////////////////////////////////////////////////////////////////
[16]52// This function returns a printable string for the thread type.
[1]53//////////////////////////////////////////////////////////////////////////////////////
[5]54char * thread_type_str( uint32_t type )
55{
[16]56    if     ( type == THREAD_USER   ) return "USER";
57    else if( type == THREAD_RPC    ) return "RPC";
58    else if( type == THREAD_DEV    ) return "DEV";
59    else if( type == THREAD_KERNEL ) return "KERNEL";
60    else if( type == THREAD_IDLE   ) return "IDLE";
[5]61    else                             return "undefined";
62}
63
[1]64/////////////////////////////////////////////////////////////////////////////////////
[14]65// This static function allocates physical memory for a thread descriptor.
66// It can be called by the three functions:
[1]67// - thread_user_create()
[14]68// - thread_user_fork()
[1]69// - thread_kernel_create()
70/////////////////////////////////////////////////////////////////////////////////////
[14]71// @ return pointer on thread descriptor if success / return NULL if failure.
[1]72/////////////////////////////////////////////////////////////////////////////////////
[14]73static thread_t * thread_alloc()
[1]74{
[23]75        page_t       * page;   // pointer on page descriptor containing thread descriptor
76        kmem_req_t     req;    // kmem request
[1]77
78        // allocates memory for thread descriptor + kernel stack
79        req.type  = KMEM_PAGE;
[14]80        req.size  = CONFIG_THREAD_DESC_ORDER;
[1]81        req.flags = AF_KERNEL | AF_ZERO;
82        page      = kmem_alloc( &req );
83
[14]84    // return pointer on new thread descriptor
[23]85        if( page == NULL ) return NULL;
[53]86    else               return (thread_t *)ppm_page2vaddr( page );
[23]87} 
[1]88
[14]89/////////////////////////////////////////////////////////////////////////////////////
[23]90// This static function releases the physical memory for a thread descriptor.
[53]91// It is called by the three functions:
[23]92// - thread_user_create()
93// - thread_user_fork()
94// - thread_kernel_create()
95/////////////////////////////////////////////////////////////////////////////////////
96// @ thread  : pointer on thread descriptor.
97/////////////////////////////////////////////////////////////////////////////////////
98static void thread_release( thread_t * thread )
99{
100    kmem_req_t   req;
101
102    req.type  = KMEM_PAGE;
[53]103    req.ptr   = ppm_vaddr2page( thread );
[23]104    kmem_free( &req );
105}
106
107/////////////////////////////////////////////////////////////////////////////////////
[14]108// This static function initializes a thread descriptor (kernel or user).
109// It can be called by the four functions:
110// - thread_user_create()
111// - thread_user_fork()
112// - thread_kernel_create()
113// - thread_user_init()
114/////////////////////////////////////////////////////////////////////////////////////
115// @ thread       : pointer on thread descriptor
116// @ process      : pointer on process descriptor.
117// @ type         : thread type.
118// @ func         : pointer on thread entry function.
119// @ args         : pointer on thread entry function arguments.
120// @ core_lid     : target core local index.
121// @ u_stack_base : stack base (user thread only)
122// @ u_stack_size : stack base (user thread only)
123/////////////////////////////////////////////////////////////////////////////////////
124static error_t thread_init( thread_t      * thread,
125                            process_t     * process,
126                            thread_type_t   type,
127                            void          * func,
128                            void          * args,
129                            lid_t           core_lid,
130                            intptr_t        u_stack_base,
131                            uint32_t        u_stack_size )
132{
133    error_t        error;
134    trdid_t        trdid;      // allocated thread identifier
135
136        cluster_t    * local_cluster = LOCAL_CLUSTER;
137
138    // register new thread in process descriptor, and get a TRDID
[1]139    spinlock_lock( &process->th_lock );
140    error = process_register_thread( process, thread , &trdid );
141    spinlock_unlock( &process->th_lock );
142
143    if( error ) 
144    {
[14]145        printk("\n[ERROR] in %s : cannot get TRDID\n", __FUNCTION__ );
146        return EINVAL;
[1]147    }
[14]148
[1]149        // Initialize new thread descriptor
150    thread->trdid           = trdid;
151        thread->type            = type; 
152    thread->quantum         = 0;            // TODO
153    thread->ticks_nr        = 0;            // TODO
154    thread->time_last_check = 0;
155        thread->core            = &local_cluster->core_tbl[core_lid];
156        thread->process         = process;
157
158    thread->local_locks     = 0;
159    list_root_init( &thread->locks_root );
160
161    thread->remote_locks    = 0;
162    xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) );
163
164    thread->u_stack_base    = u_stack_base;     
165    thread->u_stack_size    = u_stack_size;
166    thread->k_stack_base    = (intptr_t)thread;     
[14]167    thread->k_stack_size    = CONFIG_THREAD_DESC_SIZE;
[1]168
169    thread->entry_func      = func;         // thread entry point
170    thread->entry_args      = args;         // thread function arguments
171    thread->flags           = 0;            // all flags reset 
172    thread->signals         = 0;            // no pending signal
173    thread->errno           = 0;            // no error detected
174    thread->fork_user       = 0;            // no fork required
175    thread->fork_cxy        = 0;
176
177    // thread blocked
178    thread->blocked = THREAD_BLOCKED_GLOBAL;
179
180    // reset children list
181    xlist_root_init( XPTR( local_cxy , &thread->children_root ) );
182    thread->children_nr = 0;
183
184    // reset sched list and brothers list
185    list_entry_init( &thread->sched_list );
186    xlist_entry_init( XPTR( local_cxy , &thread->brothers_list ) );
187
188    // reset thread info
189    memset( &thread->info , 0 , sizeof(thread_info_t) );
190
191    // initialise signature
192        thread->signature = THREAD_SIGNATURE;
193
194    // update local DQDT
195    dqdt_local_update_threads( 1 );
196
197    // register new thread in core scheduler
198    sched_register_thread( thread->core , thread );
199
200        return 0;
201
[14]202} // end thread_init()
[1]203
[14]204
[1]205/////////////////////////////////////////////////////////
[23]206error_t thread_user_create( pid_t             pid,
207                            void            * start_func,
208                            void            * start_arg,
[1]209                            pthread_attr_t  * attr,
[23]210                            thread_t       ** new_thread )
[1]211{
212    error_t        error;
213        thread_t     * thread;       // pointer on created thread descriptor
214    process_t    * process;      // pointer to local process descriptor
215    lid_t          core_lid;     // selected core local index
[23]216    vseg_t       * vseg;         // stack vseg
[1]217
[23]218    thread_dmsg("\n[INFO] %s : enters for process %x\n", __FUNCTION__ , pid );
[5]219
[23]220    // get process descriptor local copy
221    process = process_get_local_copy( pid );
[1]222
[23]223    if( process == NULL )
224    {
225                printk("\n[ERROR] in %s : cannot get process descriptor %x\n",
226               __FUNCTION__ , pid );
227        return ENOMEM;
228    }
229
[1]230    // select a target core in local cluster
[23]231    if( attr->attributes & PT_ATTR_CORE_DEFINED ) core_lid = attr->lid;
232    else                                          core_lid = cluster_select_local_core();
[1]233
234    // check core local index
[23]235    if( core_lid >= LOCAL_CLUSTER->cores_nr )
236    {
237            printk("\n[ERROR] in %s : illegal core index attribute = %d\n",
238               __FUNCTION__ , core_lid );
239       
240        return EINVAL;
241    }
[1]242
[23]243    // allocate a stack from local VMM
244    vseg = vmm_create_vseg( process, 0 , 0 , VSEG_TYPE_STACK );
[1]245
[23]246    if( vseg == NULL );
247    {
248            printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
249                return ENOMEM;
250    } 
251
[14]252    // allocates memory tor thread descriptor
253    thread = thread_alloc();
[1]254
[23]255    if( thread == NULL )
256    {
257            printk("\n[ERROR] in %s : cannot create new thread\n", __FUNCTION__ );
258        vmm_remove_vseg( vseg );
259        return ENOMEM;
260    }
[14]261
262    // initializes thread descriptor
263    error = thread_init( thread,
264                         process,
265                         THREAD_USER,
[23]266                         start_func,
267                         start_arg,
[14]268                         core_lid,
[23]269                         vseg->min,
270                         vseg->max - vseg->min );
[14]271
[23]272    if( error ) 
[14]273    {
[23]274            printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ );
275        vmm_remove_vseg( vseg );
276        thread_release( thread );
[14]277        return EINVAL;
278    }
279
280    // set LOADABLE flag
[1]281    thread->flags = THREAD_FLAG_LOADABLE;
[14]282
283    // set DETACHED flag if required
[23]284    if( attr->attributes & PT_ATTR_DETACH ) thread->flags |= THREAD_FLAG_DETACHED;
[1]285
286    // allocate & initialise CPU context
287        error = hal_cpu_context_create( thread ); 
288
[23]289    if( error ) 
290    {
291            printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
292        vmm_remove_vseg( vseg );
293        thread_release( thread );
294        return ENOMEM;
295    }
296
[1]297    // allocate & initialise FPU context
298    error = hal_fpu_context_create( thread ); 
[23]299
300    if( error )
301    {
302            printk("\n[ERROR] in %s : cannot create FPU context\n", __FUNCTION__ );
303        vmm_remove_vseg( vseg );
304        thread_release( thread );
305        return ENOMEM;
306    }
307
[5]308    thread_dmsg("\n[INFO] %s : exit / trdid = %x / process %x / core = %d\n", 
309                __FUNCTION__ , thread->trdid , process->pid , core_lid );
[1]310
311    *new_thread = thread;
312        return 0;
[14]313
[1]314} // end thread_user_create()
315
316
[23]317//////////////////////////////////////////////
318error_t thread_user_fork( process_t * process,
319                          thread_t ** new_thread )
[1]320{
321    error_t        error;
[14]322        thread_t     * thread;       // pointer on new thread descriptor
[1]323    lid_t          core_lid;     // selected core local index
[23]324        vseg_t       * vseg;         // stack vseg
[1]325
[14]326    thread_dmsg("\n[INFO] %s : enters\n", __FUNCTION__ );
[5]327
[23]328    // allocate a stack from local VMM
329    vseg = vmm_create_vseg( process, 0 , 0 , VSEG_TYPE_STACK );
330
331    if( vseg == NULL );
332    {
333            printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
334                return ENOMEM;
335    } 
336
[1]337    // select a target core in local cluster
338    core_lid = cluster_select_local_core();
339
340    // get pointer on calling thread descriptor
341    thread_t * this = CURRENT_THREAD;
342
[14]343    // allocated memory for new thread descriptor
344    thread = thread_alloc();
[1]345
[23]346    if( thread == NULL )
347    {
348        printk("\n[ERROR] in %s : cannot allocate new thread\n", __FUNCTION__ );
349        vmm_remove_vseg( vseg );
350        return ENOMEM;
351    }
[14]352
353    // initializes thread descriptor
354    error = thread_init( thread,
355                         process,
356                         THREAD_USER,
357                         this->entry_func,
358                         this->entry_args,
359                         core_lid,
[23]360                         vseg->min,
361                         vseg->max - vseg->min );
[14]362
[23]363    if( error )
[14]364    {
[23]365            printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ );
366        vmm_remove_vseg( vseg );
367        thread_release( thread );
[14]368        return EINVAL;
369    }
370
[1]371    // set ATTACHED flag if set in this thread
[14]372    if( this->flags & THREAD_FLAG_DETACHED ) thread->flags = THREAD_FLAG_DETACHED;
[1]373
374    // allocate & initialise CPU context from calling thread
[14]375        error = hal_cpu_context_copy( thread , this ); 
[1]376
[23]377    if( error )
378    {
379            printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
380        vmm_remove_vseg( vseg );
381        thread_release( thread );
382        return ENOMEM;
383    }
384
[1]385    // allocate & initialise FPU context from calling thread
[14]386        error = hal_fpu_context_copy( thread , this ); 
[1]387
[23]388    if( error )
389    {
390            printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
391        vmm_remove_vseg( vseg );
392        thread_release( thread );
393        return ENOMEM;
394    }
395
396    thread_dmsg("\n[INFO] %s : exit / thread %x for process %x on core %d in cluster %x\n", 
[14]397                 __FUNCTION__, thread->trdid, process->pid, core_lid, local_cxy );
[1]398
[14]399    *new_thread = thread;
[1]400        return 0;
[5]401
[1]402} // end thread_user_fork()
403
404
405
406/////////////////////////////////////////////////////////
407error_t thread_kernel_create( thread_t     ** new_thread,
408                              thread_type_t   type,
409                              void          * func, 
410                              void          * args, 
411                                              lid_t           core_lid )
412{
413    error_t        error;
[14]414        thread_t     * thread;       // pointer on new thread descriptor
415        kmem_req_t     req;          // kmem request (for release)
[1]416
[14]417    thread_dmsg("\n[INFO] %s : enters for type %s in cluster %x\n",
[5]418                __FUNCTION__ , thread_type_str( type ) , local_cxy );
[1]419
[5]420    assert( ( (type == THREAD_KERNEL) || (type == THREAD_RPC) || 
421              (type == THREAD_IDLE)   || (type == THREAD_DEV) ) ,
422              __FUNCTION__ , "illegal thread type" );
[1]423
[5]424    assert( (core_lid < LOCAL_CLUSTER->cores_nr) , 
425            __FUNCTION__ , "illegal core_lid" );
[1]426
[14]427    // allocated memory for new thread descriptor
428    thread = thread_alloc();
429
430    if( thread == NULL ) return ENOMEM;
431
432    // initializes thread descriptor
433    error = thread_init( thread,
434                         &process_zero,
435                         type,
436                         func,
437                         args,
438                         core_lid,
439                         0 , 0 );  // no user stack for a kernel thread
440
441    if( error ) // release allocated memory for thread descriptor
[1]442    {
[14]443            req.type  = KMEM_PAGE;
[53]444        req.ptr   = ppm_vaddr2page( thread );
[14]445        kmem_free( &req );
446        return EINVAL;
[1]447    }
448
[14]449
[1]450    // allocate & initialise CPU context
[14]451        hal_cpu_context_create( thread ); 
[1]452
[14]453    thread_dmsg("\n[INFO] %s : exit in cluster %x / trdid = %x / core_lid = %d\n", 
454                 __FUNCTION__ , local_cxy , thread->trdid , core_lid );
[1]455
[14]456    *new_thread = thread; 
[1]457        return 0;
[5]458
[1]459} // end thread_kernel_create()
460
[14]461///////////////////////////////////////////////////
462error_t thread_kernel_init( thread_t      * thread,
463                            thread_type_t   type,
464                            void          * func, 
465                            void          * args, 
466                                            lid_t           core_lid )
467{
468    assert( ( (type == THREAD_KERNEL) || (type == THREAD_RPC) || 
469              (type == THREAD_IDLE)   || (type == THREAD_DEV) ) ,
470              __FUNCTION__ , "illegal thread type" );
[1]471
[14]472    if( core_lid >= LOCAL_CLUSTER->cores_nr ) 
473    {
474        printk("\n[PANIC] in %s : illegal core_lid / cores = %d / lid = %d / cxy = %x\n", 
475               __FUNCTION__ , LOCAL_CLUSTER->cores_nr , core_lid , local_cxy );
476        hal_core_sleep();
477    }
478
479    error_t  error = thread_init( thread,
480                                  &process_zero,
481                                  type,
482                                  func,
483                                  args,
484                                  core_lid,
485                                  0 , 0 );   // no user stack for a kernel thread
486
487    // allocate & initialize CPU context if success
488    if( error == 0 ) hal_cpu_context_create( thread );
489     
490    return error;
491
492}  // end thread_kernel_init()
493
[1]494///////////////////////////////////////////////////////////////////////////////////////
495// TODO: check that all memory dynamically allocated during thread execution
496// has been released, using a cache of mmap and malloc requests. [AG]
497///////////////////////////////////////////////////////////////////////////////////////
498void thread_destroy( thread_t * thread )
499{
500        uint32_t     tm_start;
501        uint32_t     tm_end;
[60]502    reg_t        state;
[1]503
504    process_t  * process    = thread->process;
505    core_t     * core       = thread->core;
506
[5]507    thread_dmsg("\n[INFO] %s : enters for thread %x in process %x / type = %s\n",
508                __FUNCTION__ , thread->trdid , process->pid , thread_type_str( thread->type ) );
[1]509
[5]510    assert( (thread->children_nr == 0) , __FUNCTION__ , "still attached children" );
511
512    assert( (thread->local_locks == 0) , __FUNCTION__ , "all local locks not released" );
[1]513   
[5]514    assert( (thread->remote_locks == 0) , __FUNCTION__ , "all remote locks not released" );
515
[101]516        tm_start = hal_get_cycles();
[1]517
518    // update intrumentation values
519    uint32_t pgfaults = thread->info.pgfault_nr;
520    uint32_t u_errors = thread->info.u_err_nr;
521    uint32_t m_errors = thread->info.m_err_nr;
522
523        process->vmm.pgfault_nr += pgfaults;
524        process->vmm.u_err_nr   += u_errors;
525        process->vmm.m_err_nr   += m_errors;
526
527    // release memory allocated for CPU context and FPU context
528        hal_cpu_context_destroy( thread );
529        hal_fpu_context_destroy( thread );
530       
531    // release FPU if required
532    // TODO This should be done before calling thread_destroy()
533        hal_disable_irq( &state );
534        if( core->fpu_owner == thread )
535        {
536                core->fpu_owner = NULL;
537                hal_fpu_disable();
538        }
539        hal_restore_irq( state );
540
541    // remove thread from process th_tbl[]
542    // TODO This should be done before calling thread_destroy()
543    ltid_t ltid = LTID_FROM_TRDID( thread->trdid );
544
545        spinlock_lock( &process->th_lock );
546        process->th_tbl[ltid] = XPTR_NULL;
547        process->th_nr--;
548        spinlock_unlock( &process->th_lock );
549       
[23]550    // update local DQDT
551    dqdt_local_update_threads( -1 );
552
[1]553    // invalidate thread descriptor
554        thread->signature = 0;
555
556    // release memory for thread descriptor
[23]557    thread_release( thread );
[1]558
[101]559        tm_end = hal_get_cycles();
[1]560
[5]561        thread_dmsg("\n[INFO] %s : exit for thread %x in process %x / duration = %d\n",
562                       __FUNCTION__, thread->trdid , process->pid , tm_end - tm_start );
[1]563
564}  // end thread_destroy()
565
566
567/////////////////////////////////////////////////
568void thread_child_parent_link( xptr_t  xp_parent,
569                               xptr_t  xp_child )
570{
571    // get extended pointers on children list root
572    cxy_t      parent_cxy = GET_CXY( xp_parent );   
573    thread_t * parent_ptr = (thread_t *)GET_PTR( xp_parent );
574    xptr_t     root       = XPTR( parent_cxy , &parent_ptr->children_root );
575
576    // get extended pointer on children list entry
577    cxy_t      child_cxy  = GET_CXY( xp_child );   
578    thread_t * child_ptr  = (thread_t *)GET_PTR( xp_child );
579    xptr_t     entry      = XPTR( child_cxy , &child_ptr->brothers_list );
580
581    // set the link
582    xlist_add_first( root , entry );
583    hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , 1 );
584} 
585
586///////////////////////////////////////////////////
587void thread_child_parent_unlink( xptr_t  xp_parent,
588                                 xptr_t  xp_child )
589{
590    // get extended pointer on children list lock
591    cxy_t      parent_cxy = GET_CXY( xp_parent );   
592    thread_t * parent_ptr = (thread_t *)GET_PTR( xp_parent );
593    xptr_t     lock       = XPTR( parent_cxy , &parent_ptr->children_lock );
594
595    // get extended pointer on children list entry
596    cxy_t      child_cxy  = GET_CXY( xp_child );   
597    thread_t * child_ptr  = (thread_t *)GET_PTR( xp_child );
598    xptr_t     entry      = XPTR( child_cxy , &child_ptr->brothers_list );
599
600    // get the lock
601    remote_spinlock_lock( lock );
602
603    // remove the link
604    xlist_unlink( entry );
605    hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , -1 );
606   
607    // release the lock
608    remote_spinlock_unlock( lock );
609}
610
611/////////////////////////////////////////////////
612inline void thread_set_signal( thread_t * thread,
613                               uint32_t   mask )
614{
615    hal_atomic_or( &thread->signals , mask );
616}
617 
618///////////////////////////////////////////////////
619inline void thread_reset_signal( thread_t * thread,
620                                 uint32_t   mask )
621{
622    hal_atomic_and( &thread->signals , ~mask );
623}
624 
625//////////////////////////////////
626inline bool_t thread_is_joinable()
627{
628    thread_t * this = CURRENT_THREAD;
629    return( (this->brothers_list.next != XPTR_NULL) &&
630            (this->brothers_list.pred != XPTR_NULL) );
631}
632
633//////////////////////////////////
634inline bool_t thread_is_runnable()
635{
636    thread_t * this = CURRENT_THREAD;
637    return( this->blocked == 0 );
638}
639
640////////////////////////////////
641inline bool_t thread_can_yield()
642{
643    thread_t * this = CURRENT_THREAD;
644    return ( (this->local_locks == 0) && (this->remote_locks == 0) );
645}
646
647///////////////////////////
648bool_t thread_check_sched()
649{
650        thread_t * this = CURRENT_THREAD;
651
652    // check locks count
653        if( (this->local_locks != 0) || (this->remote_locks != 0) ) return false;
654
655    // compute elapsed time, taking into account 32 bits register wrap
656    uint32_t elapsed;
[101]657    uint32_t time_now   = hal_get_cycles();
[1]658    uint32_t time_last  = this->time_last_check;
659    if( time_now < time_last ) elapsed = (0xFFFFFFFF - time_last) + time_now;
660        else                       elapsed = time_now - time_last;
661
662    // update thread time
663    this->time_last_check = time_now;
664
665        // check elapsed time
666        if( elapsed < CONFIG_CORE_CHECK_EVERY ) return false;
667    else                                    return true;
668}
669
670/////////////////////
671error_t thread_exit()
672{
[60]673    reg_t      sr_save;
[1]674
675        thread_t * this = CURRENT_THREAD;
676
677    // test if this thread can be descheduled
678        if( !thread_can_yield() )
679        {
680        printk("ERROR in %s : thread %x in process %x on core %d in cluster %x\n"
681               " did not released all locks\n",
682               __FUNCTION__ , this->trdid , this->process->pid ,
683               CURRENT_CORE->lid , local_cxy );
684        return EINVAL;
685    }
686
687    if( this->flags & THREAD_FLAG_DETACHED )
688    {
689        // if detached set signal and set blocking cause atomically
690        hal_disable_irq( &sr_save );
691        thread_set_signal( this , THREAD_SIG_KILL );
692        thread_block( this , THREAD_BLOCKED_EXIT );
693        hal_restore_irq( sr_save );
694    }
695    else 
696    {
697        // if attached, set blocking cause
698        thread_block( this , THREAD_BLOCKED_EXIT );
699    }
700
701    // deschedule
702    sched_yield();
703    return 0;
704
705} // end thread_exit()
706
707/////////////////////////////////////
708void thread_block( thread_t * thread,
709                   uint32_t   cause )
710{
711    // set blocking cause
712    hal_atomic_or( &thread->blocked , cause );
713
714}  // end thread_block()
715
716////////////////////////////////////
717void thread_unblock( xptr_t   thread,
718                    uint32_t cause )
719{
720    // get thread cluster and local pointer
721    cxy_t      cxy = GET_CXY( thread ); 
722    thread_t * ptr = (thread_t *)GET_PTR( thread );
723
724    // reset blocking cause
725    hal_remote_atomic_and( XPTR( cxy , &ptr->blocked ) , ~cause );
726
727}  // end thread_unblock()
728
729/////////////////////////////////////
730void thread_kill( thread_t * target )
731{
732    // set SIG_KILL signal in target thread descriptor
733    thread_set_signal( target , THREAD_SIG_KILL );
734
735    // set the global blocked bit in target thread descriptor.
736    thread_block( target , THREAD_BLOCKED_GLOBAL );
737
738    // send an IPI to reschedule the target thread core.
739    dev_icu_send_ipi( local_cxy , target->core->lid );
740
741}  // end thread_kill()
742
743
[14]744///////////////////////
745void thread_idle_func()
[1]746{
[68]747
748#if CONFIG_IDLE_DEBUG
[14]749    lid_t  lid = CURRENT_CORE->lid;
[68]750#endif
[14]751
[1]752    while( 1 )
753    {
[50]754        idle_dmsg("\n[INFO] %s : core[%x][%d] goes to sleep at cycle %d\n",
[101]755                    __FUNCTION__ , local_cxy , lid , hal_get_cycles() );
[1]756
757        // force core to sleeping state
758        hal_core_sleep();
759
[50]760        idle_dmsg("\n[INFO] %s : core[%x][%d] wake up at cycle %d\n",
[101]761                    __FUNCTION__ , local_cxy , lid , hal_get_cycles() );
[1]762
[14]763                // acknowledge IRQ
764        dev_icu_irq_handler();
765
766        // force scheduling
[1]767        sched_yield();
768   }
769}  // end thread_idle()
770
[16]771/////////////////////////////////////////////////
772void thread_user_time_update( thread_t * thread )
773{
774    // TODO
775    printk("\n[WARNING] function %s not implemented\n", __FUNCTION__ );
776}
[1]777
[16]778///////////////////////////////////////////////////
779void thread_kernel_time_update( thread_t * thread )
780{
781    // TODO
782    printk("\n[WARNING] function %s not implemented\n", __FUNCTION__ );
783}
784
785////////////////////////////////////////////////
[23]786void thread_signals_handle( thread_t * thread )
[16]787{
788    // TODO
789    printk("\n[WARNING] function %s not implemented\n", __FUNCTION__ );
790}
791
[23]792/////////////////////////////////////
793xptr_t thread_get_xptr( pid_t    pid,
794                        trdid_t  trdid )
795{
796    cxy_t         target_cxy;          // target thread cluster identifier
797    ltid_t        target_thread_ltid;  // target thread local index
798    thread_t    * target_thread_ptr;   // target thread local pointer           
799    xptr_t        target_process_xp;   // extended pointer on target process descriptor
800    process_t   * target_process_ptr;  // local pointer on target process descriptor
801    pid_t         target_process_pid;  // target process identifier
802    xlist_entry_t root;                // root of list of process in target cluster
803    xptr_t        lock_xp;             // extended pointer on lock protecting  this list
[16]804
[23]805    // get target cluster identifier and local thread identifier
806    target_cxy         = CXY_FROM_TRDID( trdid );
807    target_thread_ltid = LTID_FROM_TRDID( trdid );
808
809    // get root of list of process descriptors in target cluster
810    hal_remote_memcpy( XPTR( local_cxy  , &root ),
811                       XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ),
812                       sizeof(xlist_entry_t) );
813
814    // get extended pointer on lock protecting the list of processes
815    lock_xp = XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_lock );
816
817    // take the lock protecting the list of processes in target cluster
818    remote_spinlock_lock( lock_xp );
819
820    // loop on list of process in target cluster to find the PID process
821    xptr_t  iter;
822    bool_t  found = false;
823    XLIST_FOREACH( XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_root ) , iter )
824    {
825        target_process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
826        target_process_ptr = (process_t *)GET_PTR( target_process_xp );
827        target_process_pid = hal_remote_lw( XPTR( target_cxy , &target_process_ptr->pid ) );
828        if( target_process_pid == pid )
829        {
830            found = true;
831            break;
832        }
833    }
834
835    // release the lock protecting the list of processes in target cluster
836    remote_spinlock_unlock( lock_xp );
837
838    // check target thread found
839    if( found == false )
840    {
841        return XPTR_NULL;
842    }
843
844    // get target thread local pointer
845    xptr_t xp = XPTR( target_cxy , &target_process_ptr->th_tbl[target_thread_ltid] );
846    target_thread_ptr = (thread_t *)hal_remote_lpt( xp );   
847
848    if( target_thread_ptr == NULL )
849    {
850        return XPTR_NULL;
851    }
852
853    return XPTR( target_cxy , target_thread_ptr );
854
855}  // end thread_get_xptr()
856
Note: See TracBrowser for help on using the repository browser.