source: trunk/kernel/kern/thread.c @ 8

Last change on this file since 8 was 5, checked in by alain, 8 years ago

Introduce the chdev_t structure in place of the device_t structure.

File size: 20.0 KB
RevLine 
[1]1/*
2 * thread.c -  implementation of thread operations (user & kernel)
3 *
4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
5 *         Mohamed Lamine Karaoui (2015)
6 *         Alain Greiner (2016)
7 *
8 * Copyright (c) UPMC Sorbonne Universites
9 *
[5]10 * This file is part of ALMOS-MKH.
[1]11 *
[5]12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
[1]13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
[5]16 * ALMOS-MKH is distributed in the hope that it will be useful, but
[1]17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
[5]22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
[1]23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <almos_config.h>
27#include <hal_types.h>
28#include <hal_context.h>
29#include <hal_irqmask.h>
30#include <hal_special.h>
31#include <hal_remote.h>
32#include <memcpy.h>
33#include <printk.h>
34#include <cluster.h>
35#include <process.h>
36#include <scheduler.h>
37#include <dev_icu.h>
38#include <core.h>
39#include <list.h>
40#include <xlist.h>
41#include <page.h>
42#include <kmem.h>
43#include <ppm.h>
44#include <thread.h>
45
46//////////////////////////////////////////////////////////////////////////////////////
47// Extern global variables
48//////////////////////////////////////////////////////////////////////////////////////
49
50extern process_t      process_zero;
51
52//////////////////////////////////////////////////////////////////////////////////////
53//   global variables for display / must be consistant with enum in "thread.h"
54//////////////////////////////////////////////////////////////////////////////////////
55
56const char* thread_type_name[THREAD_TYPES_NR] =
57{
58        "USER",
59        "RPC"
60        "KERNEL",
61        "IDLE", 
62};
63
[5]64//////////////////////////////////////////////////////////////////////////////////////
65// This static function returns a printable string for the thread type.
66//////////////////////////////////////////////////////////////////////////////////////
67char * thread_type_str( uint32_t type )
68{
69    if     ( type == THREAD_USER   ) return "THREAD_USER";
70    else if( type == THREAD_RPC    ) return "THREAD_RPC";
71    else if( type == THREAD_DEV    ) return "THREAD_DEV";
72    else if( type == THREAD_KERNEL ) return "THREAD_KERNEL";
73    else if( type == THREAD_IDLE   ) return "THREAD_IDLE";
74    else                             return "undefined";
75}
76
[1]77/////////////////////////////////////////////////////////////////////////////////////
78// This static function makes the actual allocation and initialisation for a thread
79// descriptor. It is called by the three functions:
80// - thread_user_create()
81// - thread_user_copy()
82// - thread_kernel_create()
83/////////////////////////////////////////////////////////////////////////////////////
84// @ new_thread : buffer for new thread pointer.
85// @ process    : local pointer on process descriptor.
86// @ type       : thread type.
87// @ func       : local pointer on thread entry function.
88// @ args       : local pointer on thread entry function arguments.
89// @ core_lid   : target core local index.
90/////////////////////////////////////////////////////////////////////////////////////
[5]91static error_t thread_create( thread_t     ** new_thread,
92                              process_t     * process,
93                              thread_type_t   type,
94                              void          * func,
95                              void          * args,
96                              lid_t           core_lid,
97                              intptr_t        u_stack_base,
98                              uint32_t        u_stack_size )
[1]99{
100    error_t        error;
101        thread_t     * thread;     // pointer on thread descriptor
102        page_t       * page;       // pointer on page descriptor containing thread descriptor
103        kmem_req_t     req;        // kmem request
104    trdid_t        trdid;      // allocated thread identifier
105
106        cluster_t    * local_cluster = LOCAL_CLUSTER;
107
108        // allocates memory for thread descriptor + kernel stack
109        req.type  = KMEM_PAGE;
110        req.size  = CONFIG_THREAD_PAGE_ORDER;
111        req.flags = AF_KERNEL | AF_ZERO;
112        page      = kmem_alloc( &req );
113        if( page == NULL ) return ENOMEM;
114
115    // get pointer on new thread descriptor
116        thread = (thread_t *)ppm_page2base( page );
117
118    // register new thread in local process descriptor, and get a TRDID
119    spinlock_lock( &process->th_lock );
120    error = process_register_thread( process, thread , &trdid );
121    spinlock_unlock( &process->th_lock );
122
123    if( error ) 
124    {
125        // release allocated memory for thread descriptor
126            req.type  = KMEM_PAGE;
127        req.ptr   = page;
128        kmem_free( &req );
129        return EAGAIN;
130    }
131 
132        // Initialize new thread descriptor
133    thread->trdid           = trdid;
134        thread->type            = type; 
135    thread->quantum         = 0;            // TODO
136    thread->ticks_nr        = 0;            // TODO
137    thread->time_last_check = 0;
138        thread->core            = &local_cluster->core_tbl[core_lid];
139        thread->process         = process;
140    thread->page            = page;
141
142    thread->local_locks     = 0;
143    list_root_init( &thread->locks_root );
144
145    thread->remote_locks    = 0;
146    xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) );
147
148    thread->u_stack_base    = u_stack_base;     
149    thread->u_stack_size    = u_stack_size;
150    thread->k_stack_base    = (intptr_t)thread;     
151    thread->k_stack_size    = CONFIG_PPM_PAGE_SIZE << CONFIG_THREAD_PAGE_ORDER;
152
153    thread->entry_func      = func;         // thread entry point
154    thread->entry_args      = args;         // thread function arguments
155    thread->flags           = 0;            // all flags reset 
156    thread->signals         = 0;            // no pending signal
157    thread->errno           = 0;            // no error detected
158    thread->fork_user       = 0;            // no fork required
159    thread->fork_cxy        = 0;
160
161    // thread blocked
162    thread->blocked = THREAD_BLOCKED_GLOBAL;
163
164    // reset children list
165    xlist_root_init( XPTR( local_cxy , &thread->children_root ) );
166    thread->children_nr = 0;
167
168    // reset sched list and brothers list
169    list_entry_init( &thread->sched_list );
170    xlist_entry_init( XPTR( local_cxy , &thread->brothers_list ) );
171
172    // reset thread info
173    memset( &thread->info , 0 , sizeof(thread_info_t) );
174
175    // initialise signature
176        thread->signature = THREAD_SIGNATURE;
177
178    // update local DQDT
179    dqdt_local_update_threads( 1 );
180
181    // register new thread in core scheduler
182    sched_register_thread( thread->core , thread );
183
184    *new_thread = thread;
185        return 0;
[5]186} // end thread_create()
[1]187
188
189/////////////////////////////////////////////////////////
190error_t thread_user_create( thread_t       ** new_thread,
191                            pthread_attr_t  * attr,
192                            intptr_t          u_stack_base,
193                            uint32_t          u_stack_size )
194{
195    error_t        error;
196        thread_t     * thread;       // pointer on created thread descriptor
197    process_t    * process;      // pointer to local process descriptor
198    lid_t          core_lid;     // selected core local index
199
[5]200    thread_dmsg("\n[INFO] %s : enters\n", 
201                __FUNCTION__ );
202
[1]203        cluster_t    * local_cluster = LOCAL_CLUSTER;
204
205    // select a target core in local cluster
206    if( attr->flags & PT_FLAG_CORE_DEFINED ) core_lid = attr->lid;
207    else                                     core_lid = cluster_select_local_core();
208
209    // check core local index
210    if( core_lid >= local_cluster->cores_nr ) return EINVAL;
211
212    // get process descriptor local copy
213    process = process_get_local_copy( attr->pid );
214    if( process == NULL ) return ENOMEM;
215
216    // make allocation / initialisation
[5]217    error = thread_create( &thread,
218                           process,
219                           THREAD_USER,
220                           attr->entry_func,
221                           attr->entry_args,
222                           core_lid,
223                           u_stack_base,
224                           u_stack_size );
[1]225    if( error ) return ENOMEM;
226
227    // set LOADABLE flag / set ATTACHED flag if required
228    thread->flags = THREAD_FLAG_LOADABLE;
229    if( attr->flags & PT_FLAG_DETACH ) thread->flags |= THREAD_FLAG_DETACHED;
230
231    // allocate & initialise CPU context
232        error = hal_cpu_context_create( thread ); 
233    if( error ) return ENOMEM;
234
235    // allocate & initialise FPU context
236    error = hal_fpu_context_create( thread ); 
237    if( error ) return ENOMEM;
[5]238 
239    thread_dmsg("\n[INFO] %s : exit / trdid = %x / process %x / core = %d\n", 
240                __FUNCTION__ , thread->trdid , process->pid , core_lid );
[1]241
242    *new_thread = thread;
243        return 0;
244} // end thread_user_create()
245
246
247/////////////////////////////////////////////////
248error_t thread_user_fork( thread_t ** new_thread,
249                          process_t * process,
250                          intptr_t    u_stack_base,
251                          uint32_t    u_stack_size )
252{
253    error_t        error;
254        thread_t     * new;          // pointer on thread descriptor
255    lid_t          core_lid;     // selected core local index
256
[5]257    thread_dmsg("\n[INFO] %s : enters\n", 
258                __FUNCTION__ );
259
[1]260    // select a target core in local cluster
261    core_lid = cluster_select_local_core();
262
263    // get pointer on calling thread descriptor
264    thread_t * this = CURRENT_THREAD;
265
266    // make allocation / initialisation
[5]267    error = thread_create( &new,
268                           process,
269                           THREAD_USER,
270                           this->entry_func,
271                           this->entry_args,
272                           core_lid,
273                           u_stack_base,
274                           u_stack_size );
[1]275    if( error ) return ENOMEM;
276
277    // set ATTACHED flag if set in this thread
278    if( this->signals & THREAD_FLAG_DETACHED ) new->signals = THREAD_FLAG_DETACHED;
279
280    // allocate & initialise CPU context from calling thread
281        error = hal_cpu_context_copy( new , this ); 
282    if( error ) return ENOMEM;
283
284    // allocate & initialise FPU context from calling thread
285        error = hal_fpu_context_copy( new , this ); 
286    if( error ) return ENOMEM;
287
288    thread_dmsg("INFO : %s thread %x for process %x on core %d in cluster %x\n", 
[5]289                 __FUNCTION__, new->trdid, process->pid, core_lid, local_cxy );
[1]290
291    *new_thread = new;
292        return 0;
[5]293
[1]294} // end thread_user_fork()
295
296
297
298/////////////////////////////////////////////////////////
299error_t thread_kernel_create( thread_t     ** new_thread,
300                              thread_type_t   type,
301                              void          * func, 
302                              void          * args, 
303                                              lid_t           core_lid )
304{
305    error_t        error;
306        thread_t     * new;        // pointer on new thread descriptor
307
[5]308    thread_dmsg("\n[INFO] %s : enters for %s in cluster %x\n",
309                __FUNCTION__ , thread_type_str( type ) , local_cxy );
[1]310
[5]311    assert( ( (type == THREAD_KERNEL) || (type == THREAD_RPC) || 
312              (type == THREAD_IDLE)   || (type == THREAD_DEV) ) ,
313              __FUNCTION__ , "illegal thread type" );
[1]314
[5]315    assert( (core_lid < LOCAL_CLUSTER->cores_nr) , 
316            __FUNCTION__ , "illegal core_lid" );
[1]317
318    // make allocation / initialisation
[5]319    error = thread_create( &new,
320                           &process_zero,
321                           type,
322                           func,
323                           args,
324                           core_lid,
325                           0 , 0 );  // no user stack for a kernel thread
[1]326    if( error ) 
327    {
[5]328        printk("\n[ERROR] in %s : cannot create thread\n", __FUNCTION__ );
329        return ENOMEM;
[1]330    }
331
332    // allocate & initialise CPU context
333        hal_cpu_context_create( new ); 
334
[5]335    thread_dmsg("\n[INFO] %s : sucessfully exit / trdid = %x / core = %d\n", 
336                 __FUNCTION__ , new->trdid , core_lid );
[1]337
338    *new_thread = new; 
339        return 0;
[5]340
[1]341} // end thread_kernel_create()
342
343
344///////////////////////////////////////////////////////////////////////////////////////
345// TODO: check that all memory dynamically allocated during thread execution
346// has been released, using a cache of mmap and malloc requests. [AG]
347///////////////////////////////////////////////////////////////////////////////////////
348void thread_destroy( thread_t * thread )
349{
350        uint32_t     tm_start;
351        uint32_t     tm_end;
352    uint32_t     state;
353
354    process_t  * process    = thread->process;
355    core_t     * core       = thread->core;
356
[5]357    thread_dmsg("\n[INFO] %s : enters for thread %x in process %x / type = %s\n",
358                __FUNCTION__ , thread->trdid , process->pid , thread_type_str( thread->type ) );
[1]359
[5]360    assert( (thread->children_nr == 0) , __FUNCTION__ , "still attached children" );
361
362    assert( (thread->local_locks == 0) , __FUNCTION__ , "all local locks not released" );
[1]363   
[5]364    assert( (thread->remote_locks == 0) , __FUNCTION__ , "all remote locks not released" );
365
[1]366        tm_start = hal_time_stamp();
367
368    // update intrumentation values
369    uint32_t pgfaults = thread->info.pgfault_nr;
370    uint32_t u_errors = thread->info.u_err_nr;
371    uint32_t m_errors = thread->info.m_err_nr;
372
373        process->vmm.pgfault_nr += pgfaults;
374        process->vmm.u_err_nr   += u_errors;
375        process->vmm.m_err_nr   += m_errors;
376
377    // release memory allocated for CPU context and FPU context
378        hal_cpu_context_destroy( thread );
379        hal_fpu_context_destroy( thread );
380       
381    // release FPU if required
382    // TODO This should be done before calling thread_destroy()
383        hal_disable_irq( &state );
384        if( core->fpu_owner == thread )
385        {
386                core->fpu_owner = NULL;
387                hal_fpu_disable();
388        }
389        hal_restore_irq( state );
390
391    // remove thread from process th_tbl[]
392    // TODO This should be done before calling thread_destroy()
393    ltid_t ltid = LTID_FROM_TRDID( thread->trdid );
394
395        spinlock_lock( &process->th_lock );
396        process->th_tbl[ltid] = XPTR_NULL;
397        process->th_nr--;
398        spinlock_unlock( &process->th_lock );
399       
400    // invalidate thread descriptor
401        thread->signature = 0;
402
403    // release memory for thread descriptor
404        kmem_req_t   req; 
405        req.type     = KMEM_PAGE; 
406        req.ptr      = ppm_base2page( thread );
407        kmem_free(&req);
408
409        tm_end = hal_time_stamp();
410
[5]411        thread_dmsg("\n[INFO] %s : exit for thread %x in process %x / duration = %d\n",
412                       __FUNCTION__, thread->trdid , process->pid , tm_end - tm_start );
[1]413
414}  // end thread_destroy()
415
416
417/////////////////////////////////////////////////
418void thread_child_parent_link( xptr_t  xp_parent,
419                               xptr_t  xp_child )
420{
421    // get extended pointers on children list root
422    cxy_t      parent_cxy = GET_CXY( xp_parent );   
423    thread_t * parent_ptr = (thread_t *)GET_PTR( xp_parent );
424    xptr_t     root       = XPTR( parent_cxy , &parent_ptr->children_root );
425
426    // get extended pointer on children list entry
427    cxy_t      child_cxy  = GET_CXY( xp_child );   
428    thread_t * child_ptr  = (thread_t *)GET_PTR( xp_child );
429    xptr_t     entry      = XPTR( child_cxy , &child_ptr->brothers_list );
430
431    // set the link
432    xlist_add_first( root , entry );
433    hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , 1 );
434} 
435
436///////////////////////////////////////////////////
437void thread_child_parent_unlink( xptr_t  xp_parent,
438                                 xptr_t  xp_child )
439{
440    // get extended pointer on children list lock
441    cxy_t      parent_cxy = GET_CXY( xp_parent );   
442    thread_t * parent_ptr = (thread_t *)GET_PTR( xp_parent );
443    xptr_t     lock       = XPTR( parent_cxy , &parent_ptr->children_lock );
444
445    // get extended pointer on children list entry
446    cxy_t      child_cxy  = GET_CXY( xp_child );   
447    thread_t * child_ptr  = (thread_t *)GET_PTR( xp_child );
448    xptr_t     entry      = XPTR( child_cxy , &child_ptr->brothers_list );
449
450    // get the lock
451    remote_spinlock_lock( lock );
452
453    // remove the link
454    xlist_unlink( entry );
455    hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , -1 );
456   
457    // release the lock
458    remote_spinlock_unlock( lock );
459}
460
461/////////////////////////////////////////////////
462inline void thread_set_signal( thread_t * thread,
463                               uint32_t   mask )
464{
465    hal_atomic_or( &thread->signals , mask );
466}
467 
468///////////////////////////////////////////////////
469inline void thread_reset_signal( thread_t * thread,
470                                 uint32_t   mask )
471{
472    hal_atomic_and( &thread->signals , ~mask );
473}
474 
475//////////////////////////////////
476inline bool_t thread_is_joinable()
477{
478    thread_t * this = CURRENT_THREAD;
479    return( (this->brothers_list.next != XPTR_NULL) &&
480            (this->brothers_list.pred != XPTR_NULL) );
481}
482
483//////////////////////////////////
484inline bool_t thread_is_runnable()
485{
486    thread_t * this = CURRENT_THREAD;
487    return( this->blocked == 0 );
488}
489
490////////////////////////////////
491inline bool_t thread_can_yield()
492{
493    thread_t * this = CURRENT_THREAD;
494    return ( (this->local_locks == 0) && (this->remote_locks == 0) );
495}
496
497///////////////////////////
498bool_t thread_check_sched()
499{
500        thread_t * this = CURRENT_THREAD;
501
502    // check locks count
503        if( (this->local_locks != 0) || (this->remote_locks != 0) ) return false;
504
505    // compute elapsed time, taking into account 32 bits register wrap
506    uint32_t elapsed;
507    uint32_t time_now   = hal_time_stamp();
508    uint32_t time_last  = this->time_last_check;
509    if( time_now < time_last ) elapsed = (0xFFFFFFFF - time_last) + time_now;
510        else                       elapsed = time_now - time_last;
511
512    // update thread time
513    this->time_last_check = time_now;
514
515        // check elapsed time
516        if( elapsed < CONFIG_CORE_CHECK_EVERY ) return false;
517    else                                    return true;
518}
519
520/////////////////////
521error_t thread_exit()
522{
523    uint32_t   sr_save;
524
525        thread_t * this = CURRENT_THREAD;
526
527    // test if this thread can be descheduled
528        if( !thread_can_yield() )
529        {
530        printk("ERROR in %s : thread %x in process %x on core %d in cluster %x\n"
531               " did not released all locks\n",
532               __FUNCTION__ , this->trdid , this->process->pid ,
533               CURRENT_CORE->lid , local_cxy );
534        return EINVAL;
535    }
536
537    if( this->flags & THREAD_FLAG_DETACHED )
538    {
539        // if detached set signal and set blocking cause atomically
540        hal_disable_irq( &sr_save );
541        thread_set_signal( this , THREAD_SIG_KILL );
542        thread_block( this , THREAD_BLOCKED_EXIT );
543        hal_restore_irq( sr_save );
544    }
545    else 
546    {
547        // if attached, set blocking cause
548        thread_block( this , THREAD_BLOCKED_EXIT );
549    }
550
551    // deschedule
552    sched_yield();
553    return 0;
554
555} // end thread_exit()
556
557/////////////////////////////////////
558void thread_block( thread_t * thread,
559                   uint32_t   cause )
560{
561    // set blocking cause
562    hal_atomic_or( &thread->blocked , cause );
563
564}  // end thread_block()
565
566////////////////////////////////////
567void thread_unblock( xptr_t   thread,
568                    uint32_t cause )
569{
570    // get thread cluster and local pointer
571    cxy_t      cxy = GET_CXY( thread ); 
572    thread_t * ptr = (thread_t *)GET_PTR( thread );
573
574    // reset blocking cause
575    hal_remote_atomic_and( XPTR( cxy , &ptr->blocked ) , ~cause );
576
577}  // end thread_unblock()
578
579/////////////////////////////////////
580void thread_kill( thread_t * target )
581{
582    // set SIG_KILL signal in target thread descriptor
583    thread_set_signal( target , THREAD_SIG_KILL );
584
585    // set the global blocked bit in target thread descriptor.
586    thread_block( target , THREAD_BLOCKED_GLOBAL );
587
588    // send an IPI to reschedule the target thread core.
589    dev_icu_send_ipi( local_cxy , target->core->lid );
590
591}  // end thread_kill()
592
593
594/////////////////////////
595void * thread_idle_func()
596{
597    while( 1 )
598    {
[5]599        thread_dmsg("\n[INFO] %s : core %d in cluster %x goes to sleeping state at cycle\n",
600                    __FUNCTION__ , core->lid , local_cxy , hal_time_stamp() );
[1]601
602        // force core to sleeping state
603        hal_core_sleep();
604
[5]605        thread_dmsg("\n[INFO] %s : core %d in cluster %x wake up at cycle %d\n",
606                    __FUNCTION__ , core->lid , local_cxy , hal_time_stamp() );
[1]607
608        // force scheduling at wake-up
609        sched_yield();
610   }
611}  // end thread_idle()
612
613
Note: See TracBrowser for help on using the repository browser.