Changeset 409 for trunk/kernel


Ignore:
Timestamp:
Dec 20, 2017, 4:51:09 PM (7 years ago)
Author:
alain
Message:

Fix bugs in exec

Location:
trunk/kernel
Files:
39 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/fs/vfs.c

    r408 r409  
    335335////////////////////////////////////////////
    336336void vfs_inode_set_size( xptr_t    inode_xp,
    337                               uint32_t  size )
     337                         uint32_t  size )
    338338{
    339339    // get inode cluster and local pointer
     
    370370
    371371/////////////////////////////////////////
    372 xptr_t vfs_inode_owner( xptr_t inode_xp )
    373 {
    374     // get inode cluster and local pointer
    375     cxy_t         cxy = GET_CXY( inode_xp );
    376     vfs_inode_t * ptr = (vfs_inode_t *)GET_PTR( inode_xp );
    377 
    378     // get the main lock
    379     return remote_spinlock_owner( XPTR( cxy , &ptr->main_lock ) );
    380 }
    381 
    382 /////////////////////////////////////////
    383 void vfs_inode_display( xptr_t inode_xp )
     372void vfs_inode_get_name( xptr_t inode_xp,
     373                         char * name )
    384374{
    385375    cxy_t          inode_cxy;
     
    389379    vfs_dentry_t * dentry_ptr;
    390380   
    391     char           name[CONFIG_VFS_MAX_NAME_LENGTH];
    392 
    393381    // get inode cluster and local pointer
    394382    inode_cxy = GET_CXY( inode_xp );
     
    411399                           XPTR( dentry_cxy , &dentry_ptr->name ) );
    412400    }
    413 
    414     // display inode header
    415     printk("\n***** inode <%s> [%x in cluster %x]\n",
    416            name , GET_PTR(inode_xp) , GET_CXY(inode_xp) );
    417 
    418     // display children from xhtab
    419     xhtab_display( XPTR( inode_cxy , &inode_ptr->children ) );
    420 
    421 }  // end vfs_inode_display()
     401}  // end vfs_inode_get_name()
    422402
    423403////////////////////////////////////////////////////////////////////////////////////////////
  • trunk/kernel/fs/vfs.h

    r407 r409  
    421421
    422422/******************************************************************************************
    423  * This debug function returns the current owner of the inode main lock.
     423 * This debug function copies the name of a remote inode identified by the <inode_xp>
     424 * argument to a local buffer identified by the <name> argument.
     425 * The local buffer size must be at least CONFIG_VFS_MAX_NAME_LENGTH.
    424426 *****************************************************************************************
    425427 * @ inode_xp  : extended pointer on the remote inode.
    426  * @ return extended pointer on owner thread / return XPTR_NULL if lock not taken.
    427  *****************************************************************************************/
    428 xptr_t vfs_inode_owner( xptr_t inode_xp );
    429 
    430 /******************************************************************************************
    431  * This debug function diplays the name of the inode identified by the <inode_xp>
    432  * argument, and all children names for a directory.
    433  *****************************************************************************************
    434  * @ inode_xp  : extended pointer on the remote inode.
    435  *****************************************************************************************/
    436 void vfs_inode_display( xptr_t inode_xp );
     428 * @ name      : local buffer pointer.
     429 *****************************************************************************************/
     430void vfs_inode_get_name( xptr_t inode_xp,
     431                         char * name );
    437432
    438433
  • trunk/kernel/kern/cluster.c

    r408 r409  
    281281    lpid_t lpid       = LPID_FROM_PID( pid );
    282282
     283    pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
     284
    283285    // check pid argument
    284     if( (lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER) || (owner_cxy != local_cxy) )
    285     {
    286         panic("illegal PID");
    287     }
    288 
    289     pmgr_t  * pm = &LOCAL_CLUSTER->pmgr;
     286    assert( (lpid < CONFIG_MAX_PROCESS_PER_CLUSTER) && (owner_cxy == local_cxy) ,
     287    __FUNCTION__ , "illegal PID" );
     288
     289    // check number of copies
     290    assert( (pm->copies_nr[lpid] == 0) ,
     291    __FUNCTION__ , "number of copies must be 0" ); 
    290292
    291293    // get the process manager lock
  • trunk/kernel/kern/cluster.h

    r408 r409  
    130130    int32_t           threads_var;     /*! threads number increment from last DQDT update */
    131131
    132         dqdt_node_t       dqdt_tbl[CONFIG_MAX_DQDT_DEPTH];     /*! embedded DQDT nodes        */
     132        dqdt_node_t       dqdt_tbl[CONFIG_DQDT_LEVELS_NR]; /*! embedded DQDT nodes in cluster */
    133133
    134134    // Local process manager
  • trunk/kernel/kern/core.c

    r408 r409  
    107107        ticks = core->ticks_nr++;
    108108
    109     // handle signals for all threads executing on this core
    110     sched_handle_signals( core );
    111 
    112         // handle scheduler
     109        // handle scheduler
    113110        if( (ticks % CONFIG_SCHED_TICKS_PER_QUANTUM) == 0 ) sched_yield( "TICK");
    114111
  • trunk/kernel/kern/core.h

    r367 r409  
    6060        struct thread_s   * fpu_owner;      /*! pointer on current FPU owner thread        */
    6161    uint32_t            rand_last;      /*! last computed random value                 */
     62
    6263        scheduler_t         scheduler;      /*! embedded private scheduler                 */
    6364
  • trunk/kernel/kern/do_syscall.c

    r408 r409  
    5454    sys_thread_join,        // 3
    5555    sys_thread_detach,      // 4
    56     sys_undefined,          // 5
     56    sys_thread_cancel,      // 5
    5757    sys_sem,                // 6
    5858    sys_condvar,            // 7
     
    110110        else if( index == SYS_THREAD_JOIN    ) return "THREAD_JOIN";      // 3
    111111        else if( index == SYS_THREAD_DETACH  ) return "THREAD_DETACH";    // 4
     112        else if( index == SYS_THREAD_CANCEL  ) return "THREAD_CANCEL";    // 5
    112113        else if( index == SYS_SEM            ) return "SEM";              // 6
    113114        else if( index == SYS_CONDVAR        ) return "CONDVAR";          // 7
     
    189190        error = syscall_tbl[service_num] ( arg0 , arg1 , arg2 , arg3 );
    190191
     192    // check kernel stack overflow
     193    assert( (this->signature == THREAD_SIGNATURE), __FUNCTION__, "kernel stack overflow\n" );
     194
    191195    // update kernel time
    192196        thread_kernel_time_update( this );
  • trunk/kernel/kern/kernel_init.c

    r408 r409  
    770770    thread->core = &LOCAL_CLUSTER->core_tbl[core_lid];
    771771
    772     // each core initializes locks_root" and "xlocks_root" in idle thread descriptor
     772#if CONFIG_LOCKS_DEBUG
    773773    list_root_init( &thread->locks_root );
    774774    xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) );
     775#endif
    775776
    776777    // CP0 in I/O cluster initialises TXT0 chdev descriptor
  • trunk/kernel/kern/printk.h

    r408 r409  
    253253#endif
    254254
     255#if CONFIG_KILL_DEBUG
     256#define kill_dmsg(...)   if(hal_time_stamp() > CONFIG_KILL_DEBUG) printk(__VA_ARGS__)
     257#else
     258#define kill_dmsg(...)
     259#endif
     260
    255261#if CONFIG_KINIT_DEBUG
    256262#define kinit_dmsg(...)   if(hal_time_stamp() > CONFIG_KINIT_DEBUG) printk(__VA_ARGS__)
  • trunk/kernel/kern/process.c

    r408 r409  
    88 * Copyright (c) UPMC Sorbonne Universites
    99 *
    10  * This file is part of ALMOS-MKH..
     10 * This file is part of ALMOS-MKH.
    1111 *
    1212 * ALMOS-MKH is free software; you can redistribute it and/or modify it
     
    2828#include <hal_remote.h>
    2929#include <hal_uspace.h>
     30#include <hal_irqmask.h>
    3031#include <errno.h>
    3132#include <printk.h>
     
    4950#include <elf.h>
    5051#include <syscalls.h>
     52#include <signal.h>
    5153
    5254//////////////////////////////////////////////////////////////////////////////////////////
     
    124126    uint32_t    stderr_id;
    125127
    126 process_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x\n",
    127 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid );
     128process_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x / ppid = %x\n",
     129__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid , ppid );
    128130
    129131    // get model process cluster and local pointer
     
    136138    process->ref_xp = XPTR( local_cxy , process );
    137139
    138     // initialize vmm
     140    // initialize vmm as empty
    139141    vmm_init( process );
    140142
     
    142144__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid );
    143145
    144     // initialize fd_array (not for kernel)
     146    // initialize fd_array as empty
    145147    process_fd_init( process );
    146148
    147149    // create stdin / stdout / stderr pseudo-files
    148     if( ppid == 0 )                                        // process_init
     150    if( ppid == 0 )                                       // process_init
    149151    {
    150152        error1 = vfs_open( process,
     
    169171                           &stderr_id );
    170172    }
    171     else                                                  // other user process
     173    else                                                  // any other process
    172174    {
    173175        error1 = vfs_open( process,
     
    199201    "bad indexes : stdin %d / stdout %d / stderr %d \n", stdin_id , stdout_id , stderr_id );
    200202
    201     // initialize specific files, cwd_lock, and fd_array
     203    // initialize specific inodes root and cwd
    202204    process->vfs_root_xp = (xptr_t)hal_remote_lwd( XPTR( model_cxy,
    203205                                                         &model_ptr->vfs_root_xp ) );
    204206    process->vfs_cwd_xp  = (xptr_t)hal_remote_lwd( XPTR( model_cxy,
    205207                                                         &model_ptr->vfs_cwd_xp ) );
    206     process->vfs_bin_xp  = (xptr_t)hal_remote_lwd( XPTR( model_cxy,
    207                                                          &model_ptr->vfs_bin_xp ) );
    208     vfs_file_count_up( process->vfs_root_xp );
    209     vfs_file_count_up( process->vfs_cwd_xp );
    210     vfs_file_count_up( process->vfs_bin_xp );
    211 
     208    vfs_inode_remote_up( process->vfs_root_xp );
     209    vfs_inode_remote_up( process->vfs_cwd_xp );
     210
     211    remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) );
     212
     213    // copy all open file descriptors (other than stdin / stdout / stderr)
    212214    process_fd_remote_copy( XPTR( local_cxy , &process->fd_array ),
    213215                            XPTR( model_cxy , &model_ptr->fd_array ) );
    214216
    215     remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) );
    216 
    217 process_dmsg("\n[DBG] %s : core[%x,%d] / fd array initialised for process %x\n",
     217process_dmsg("\n[DBG] %s : core[%x,%d] / fd array for process %x\n",
    218218__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid );
    219219
     
    352352    remote_spinlock_unlock( copies_lock );
    353353
    354     // synchronize memory
    355354        hal_fence();
    356355
    357356    // From this point, the process descriptor is unreachable
    358357
    359     // close all open files and update dirty TODO [AG]
    360 
    361     // release signal manager TODO [AG]
     358    // FIXME close all open files and update dirty [AG]
    362359
    363360    // Decrease refcount for bin file, root file and cwd file
     
    374371}  // end process_destroy()
    375372
     373/////////////////////////////////////////////////
     374char * process_action_str( uint32_t action_type )
     375{
     376    if     ( action_type == BLOCK_ALL_THREADS   ) return "BLOCK";
     377    else if( action_type == UNBLOCK_ALL_THREADS ) return "UNBLOCK";
     378    else if( action_type == DELETE_ALL_THREADS  ) return "DELETE";
     379    else                                          return "undefined";
     380}
     381
     382////////////////////////////////////////////
     383void process_sigaction( process_t * process,
     384                        uint32_t    action_type )
     385{
     386    cxy_t              owner_cxy;         // owner cluster identifier
     387    lpid_t             lpid;              // process index in owner cluster
     388    cluster_t        * cluster;           // pointer on cluster manager
     389    xptr_t             root_xp;           // extended pointer on root of copies
     390    xptr_t             lock_xp;           // extended pointer on lock protecting copies
     391    xptr_t             client_xp;         // extended pointer on client thread
     392    uint32_t           rsp_count;         // number of expected responses
     393    xptr_t             rsp_xp;            // extended pointer on responses counter
     394    xptr_t             iter_xp;           // iterator on copies list
     395    xptr_t             process_xp;        // extended pointer on process copy
     396    cxy_t              process_cxy;       // process copy cluster identifier
     397    process_t        * process_ptr;       // local pointer on process copy
     398
     399signal_dmsg("\n[DBG] %s : enter for signal %s to process %x in cluster %x\n",
     400__FUNCTION__ , process_action_str( action_type ) , process , local_cxy );
     401
     402    thread_t         * this = CURRENT_THREAD;
     403
     404    // get extended pointer on client thread and response counter
     405    client_xp = XPTR( local_cxy , this );
     406    rsp_xp    = XPTR( local_cxy , &rsp_count );
     407
     408    // get owner cluster identifier and process lpid
     409    owner_cxy = CXY_FROM_PID( process->pid );
     410    lpid      = LPID_FROM_PID( process->pid );
     411
     412    assert( (owner_cxy == local_cxy) , __FUNCTION__ , "illegal cluster\n" );
     413   
     414    // get local pointer on local cluster manager
     415    cluster = LOCAL_CLUSTER;
     416
     417    // get extended pointers on copies root, copies lock, and number of copies
     418    root_xp   = XPTR( local_cxy , &cluster->pmgr.copies_root[lpid] );
     419    lock_xp   = XPTR( local_cxy , &cluster->pmgr.copies_lock[lpid] );
     420
     421    // initialize responses number
     422    rsp_count = cluster->pmgr.copies_nr[lpid];
     423
     424    // take the lock protecting the copies
     425    remote_spinlock_lock( lock_xp );
     426
     427    // send RPCs to all process copies
     428    XLIST_FOREACH( root_xp , iter_xp )
     429    {
     430        process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
     431        process_cxy = GET_CXY( process_xp );
     432        process_ptr = (process_t *)GET_PTR( process_xp );
     433
     434printk("\n    @@@ %s : process = %x / pid = %x / ppid = %x\n",
     435__FUNCTION__ , process_ptr , process_ptr->pid , process_ptr->ppid );
     436
     437        rpc_process_sigaction_client( process_cxy,
     438                                      process_ptr,
     439                                      action_type,
     440                                      rsp_xp,
     441                                      client_xp );
     442    }
     443   
     444    // release the lock protecting process copies
     445    remote_spinlock_unlock( lock_xp );
     446
     447    // block and deschedule to wait response
     448    thread_block( CURRENT_THREAD , THREAD_BLOCKED_RPC );
     449    sched_yield("BLOCKED on RPC");
     450
     451signal_dmsg("\n[DBG] %s : exit for signal %s to process %x in cluster %x\n",
     452__FUNCTION__ , process_action_str( action_type ) , process , local_cxy );
     453
     454}  // end process_sigaction()
     455
    376456////////////////////////////////////////
    377 void process_kill( process_t * process )
    378 {
    379     thread_t     * thread;    // pointer on current thead descriptor
    380     uint32_t       ltid;      // index in process th_tbl
    381     uint32_t       count;     // thread counter
    382 
    383 printk("\n[@@@] %s enter\n", __FUNCTION__ );
    384 
    385     // get lock protecting th_tbl[]
     457void process_block( process_t * process,
     458                    xptr_t      rsp_xp,
     459                    xptr_t      client_xp )
     460{
     461    thread_t          * target;         // pointer on target thread
     462    uint32_t            ltid;           // index in process th_tbl
     463    thread_t          * killer;         // killer thread pointer
     464    uint32_t            count;          // requests counter
     465    volatile uint32_t   sig_rsp_count;  // responses counter
     466    cxy_t               client_cxy;     // client thread cluster identifier
     467    thread_t          * client_ptr;     // client thread pointer
     468    core_t            * client_core;    // client thread core pointer
     469
     470    // get local killer thread pointer
     471    killer = CURRENT_THREAD;
     472
     473signal_dmsg("\n[DBG] %s : enter for process %x in cluster %x\n",
     474__FUNCTION__ , process->pid , local_cxy );
     475
     476    // get lock protecting process th_tbl[]
    386477    spinlock_lock( &process->th_lock );
    387478
    388     // first loop on threads to send the THREAD_SIG_KILL signal to all process threads
    389     // we use both "ltid" and "count" indexes, because it can exist "holes" in th_tbl
    390     for( ltid = 0 , count = 0  ;
    391          (ltid < CONFIG_THREAD_MAX_PER_CLUSTER) && (count < process->th_nr) ;
     479    // initialize local responses counter
     480    sig_rsp_count = process->th_nr;
     481
     482    // loop on process threads to block and deschedule all threads in cluster
     483    // we use both "ltid" and "count" because it can exist "holes" in th_tbl
     484    for( ltid = 0 , count = 0 ; count < process->th_nr ; ltid++ )
     485    {
     486        target = process->th_tbl[ltid];
     487
     488        if( target != NULL )             // thread found
     489        {
     490            count++;
     491
     492            // set signal in target thread descriptor
     493            thread_set_signal( target , (uint32_t *)sig_rsp_count );
     494
     495            // set the global blocked bit in target thread descriptor.
     496            thread_block( target , THREAD_BLOCKED_GLOBAL );
     497
     498            // - if the killer thread and the target thread are not on the same core
     499            //   we want the scheduler of target thread to acknowlege the signal
     500            //   to be sure that the target thread is descheduled
     501            // - if the killer thread and the target thread are on the same core
     502            //   we simply decrement the response counter.
     503            if( killer->core->lid != target->core->lid )
     504            {
     505                dev_pic_send_ipi( local_cxy , target->core->lid );
     506            }
     507            else                                                         
     508            {
     509                hal_atomic_add( (void *)&sig_rsp_count , -1 );
     510            }
     511        }
     512    }
     513
     514    // poll the reponses counter
     515    while( 1 )
     516    {
     517        // exit loop when all responses received
     518        if ( sig_rsp_count == 0 ) break;
     519   
     520        // wait 1000 cycles before retry
     521        hal_fixed_delay( 1000 );
     522    }
     523
     524    // acknowledge client thread & unblock client thread if last response
     525    client_cxy  = GET_CXY( client_xp );
     526    client_ptr  = (thread_t *)GET_PTR( client_xp );
     527    client_core = (core_t *)hal_remote_lpt( XPTR( client_cxy , &client_ptr->core ) );
     528    if( hal_remote_atomic_add( rsp_xp , -1 ) == 1 )
     529    {
     530        thread_unblock( client_xp , THREAD_BLOCKED_RPC);
     531        dev_pic_send_ipi( client_cxy , client_core->lid );
     532    }
     533
     534signal_dmsg("\n[DBG] %s : exit for process %x in cluster %x / %d threads blocked\n",
     535__FUNCTION__ , process->pid , local_cxy , count );
     536
     537}  // end process_block()
     538
     539//////////////////////////////////////////
     540void process_unblock( process_t * process,
     541                      xptr_t      rsp_xp,
     542                      xptr_t      client_xp )
     543{
     544    thread_t          * target;        // pointer on target thead
     545    uint32_t            ltid;          // index in process th_tbl
     546    thread_t          * killer;        // killer thread pointer
     547    uint32_t            req_count;     // requests counter
     548    cxy_t               client_cxy;    // client thread cluster identifier
     549    thread_t          * client_ptr;    // client thread pointer
     550    core_t            * client_core;   // client thread core pointer
     551
     552    // get local killer thread pointer
     553    killer = CURRENT_THREAD;
     554
     555signal_dmsg("\n[DBG] %s : enter for process %x in cluster %x\n",
     556__FUNCTION__ , process->pid , local_cxy );
     557
     558    // get lock protecting process th_tbl[]
     559    spinlock_lock( &process->th_lock );
     560
     561    // loop on process threads to unblock all threads in cluster
     562    // we use both "ltid" and "req_count" because it can exist "holes" in th_tbl
     563    for( ltid = 0 , req_count = 0 ;
     564         req_count < process->th_nr ;
    392565         ltid++ )
    393566    {
     567        target = process->th_tbl[ltid];
     568
     569        if( target != NULL )             // thread found
     570        {
     571            req_count++;
     572
     573            // reset the global blocked bit in target thread descriptor.
     574            thread_unblock( XPTR( local_cxy , target ) , THREAD_BLOCKED_GLOBAL );
     575        }
     576    }
     577
     578    // acknowledge client thread & unblock client thread if last response
     579    client_cxy  = GET_CXY( client_xp );
     580    client_ptr  = (thread_t *)GET_PTR( client_xp );
     581    client_core = (core_t *)hal_remote_lpt( XPTR( client_cxy , &client_ptr->core ) );
     582    if( hal_remote_atomic_add( rsp_xp , -1 ) == 1 )
     583    {
     584        thread_unblock( client_xp , THREAD_BLOCKED_RPC);
     585        dev_pic_send_ipi( client_cxy , client_core->lid );
     586    }
     587
     588signal_dmsg("\n[DBG] %s : exit for process %x in cluster %x / %d threads blocked\n",
     589__FUNCTION__ , process->pid , local_cxy , req_count );
     590
     591}  // end process_unblock()
     592
     593/////////////////////////////////////////
     594void process_delete( process_t * process,
     595                     xptr_t      rsp_xp,
     596                     xptr_t      client_xp )
     597{
     598    thread_t          * thread;        // pointer on target thread
     599    uint32_t            ltid;          // index in process th_tbl
     600    uint32_t            count;         // request counter
     601    pid_t               pid;           // process PID
     602    cxy_t               client_cxy;    // client thread cluster identifier
     603    thread_t          * client_ptr;    // client thread pointer
     604    core_t            * client_core;   // client thread core pointer
     605
     606    // get process PID
     607    pid = process->pid;
     608
     609signal_dmsg("\n[DBG] %s : enter for process %x in cluster %x at cycle %d\n",
     610__FUNCTION__ , pid , local_cxy , (uint32_t)hal_get_cycles() );
     611
     612    // loop on threads to release memory allocated to threads
     613    for( ltid = 0 , count = 0  ; count < process->th_nr ; ltid++ )
     614    {
    394615        thread = process->th_tbl[ltid];
    395616
    396         if( thread != NULL )
     617        if( thread != NULL )             // thread found
    397618        {
    398             thread_kill( thread );
    399619            count++;
     620
     621            // detach thread from parent if attached
     622            if( (thread->flags & THREAD_FLAG_DETACHED) == 0 )
     623            thread_child_parent_unlink( thread->parent , XPTR( local_cxy , thread ) );
     624
     625            // detach thread from process
     626            process_remove_thread( thread );
     627
     628            // remove thread from scheduler
     629            sched_remove_thread( thread );
     630
     631            // release memory allocated to thread
     632            thread_destroy( thread );
    400633        }
    401634    }
    402635
    403 printk("\n[@@@] %s : %d signal(s) sent\n", __FUNCTION__, count );
    404 
    405     // second loop on threads to wait acknowledge from scheduler,
    406     // unlink thread from process and parent thread, and release thread descriptor
    407     for( ltid = 0 , count = 0  ;
    408          (ltid < CONFIG_THREAD_MAX_PER_CLUSTER) && (count < process->th_nr) ;
    409          ltid++ )
    410     {
    411         thread = process->th_tbl[ltid];
    412 
    413         if( thread != NULL )
    414         {
    415 
    416 printk("\n[@@@] %s start polling at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
    417 
    418             // poll the THREAD_SIG_KILL bit until reset
    419             while( thread->signals & THREAD_SIG_KILL ) asm volatile( "nop" );
    420 
    421 printk("\n[@@@] %s exit polling\n", __FUNCTION__ );
    422 
    423             // detach target thread from parent if attached
    424             if( (thread->flags & THREAD_FLAG_DETACHED) != 0 )
    425             thread_child_parent_unlink( thread->parent , XPTR( local_cxy , thread ) );
    426 
    427             // unlink thread from process
    428             process_remove_thread( thread );
    429 
    430             // release memory for thread descriptor
    431             thread_destroy( thread );
    432 
    433             count++;
    434         }
    435     }
    436 
    437 printk("\n[@@@] %s : %d ack(s) received\n", __FUNCTION__, count );
    438 
    439     // release lock protecting th_tbl[]
    440     spinlock_unlock( &process->th_lock );
    441 
    442     // release memory allocated for process descriptor
    443     process_destroy( process );
    444 
    445 printk("\n[DBG] %s : core[%x,%d] exit\n",
    446 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid );
    447 
    448 }  // end process_kill()
     636    // release memory allocated to process descriptors
     637    // for all clusters other than the owner cluster
     638    if( local_cxy != CXY_FROM_PID( process->pid ) ) process_destroy( process );
     639
     640    // acknowledge client thread & unblock client thread if last response
     641    client_cxy  = GET_CXY( client_xp );
     642    client_ptr  = (thread_t *)GET_PTR( client_xp );
     643    client_core = (core_t *)hal_remote_lpt( XPTR( client_cxy , &client_ptr->core ) );
     644    if( hal_remote_atomic_add( rsp_xp , -1 ) == 1 )
     645    {
     646        thread_unblock( client_xp , THREAD_BLOCKED_RPC);
     647        dev_pic_send_ipi( client_cxy , client_core->lid );
     648    }
     649
     650signal_dmsg("\n[DBG] %s : exit for process %x in cluster %x at cycle %d\n",
     651__FUNCTION__ , pid , local_cxy , (uint32_t)hal_get_cycles() );
     652
     653}  // end process_delete()
    449654
    450655///////////////////////////////////////////////
     
    496701
    497702    return process_ptr;
    498 }
     703
     704}  // end process_get_local_copy()
    499705
    500706//////////////////////////////////////////////////////////////////////////////////////////
     
    621827        remote_spinlock_lock( XPTR( src_cxy , &src_ptr->lock ) );
    622828
    623     // loop on all entries in source process fd_array
    624     for( fd = 0 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
     829    // loop on all entries other than
     830    // the three first entries: stdin/stdout/stderr
     831    for( fd = 3 ; fd < CONFIG_PROCESS_FILE_MAX_NR ; fd++ )
    625832        {
    626833                entry = (xptr_t)hal_remote_lwd( XPTR( src_cxy , &src_ptr->array[fd] ) );
     
    724931    "parent process must be the reference process\n" );
    725932
    726 process_dmsg("\n[DBG] %s : core[%x,%d] enter at cycle %d\n",
    727 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid , hal_get_cycles() );
     933fork_dmsg("\n[DBG] %s : core[%x,%d] enter at cycle %d\n",
     934__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid , (uint32_t)hal_get_cycles() );
    728935
    729936    // allocate a process descriptor
     
    736943    }
    737944
    738 process_dmsg("\n[DBG] %s : core[%x,%d] child process descriptor allocated at cycle %d\n",
    739  __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() );
     945fork_dmsg("\n[DBG] %s : core[%x,%d] child process descriptor allocated at cycle %d\n",
     946 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
    740947
    741948    // allocate a child PID from local cluster
     
    749956    }
    750957
    751 process_dmsg("\n[DBG] %s : core[%x, %d] child process PID allocated = %x at cycle %d\n",
    752  __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, new_pid , hal_get_cycles() );
     958fork_dmsg("\n[DBG] %s : core[%x, %d] child process PID allocated = %x at cycle %d\n",
     959 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, new_pid , (uint32_t)hal_get_cycles() );
    753960
    754961    // initializes child process descriptor from parent process descriptor
     
    758965                            parent_process_xp );
    759966
    760 process_dmsg("\n[DBG] %s : core[%x, %d] child process initialised at cycle %d\n",
     967fork_dmsg("\n[DBG] %s : core[%x, %d] child process initialised at cycle %d\n",
    761968__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() );
    762969
     
    773980    }
    774981
    775 process_dmsg("\n[DBG] %s : core[%x, %d] child process VMM copied at cycle %d\n",
    776 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() );
     982fork_dmsg("\n[DBG] %s : core[%x, %d] child process VMM copied at cycle %d\n",
     983__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
    777984
    778985    // create child thread descriptor from parent thread descriptor
     
    789996    }
    790997
    791 process_dmsg("\n[DBG] %s : core[%x,%d] child thread created at cycle %d\n",
    792 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() );
     998fork_dmsg("\n[DBG] %s : core[%x,%d] child thread created at cycle %d\n",
     999__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
    7931000
    7941001    // update parent process GPT to set Copy_On_Write for shared data vsegs
     
    8041011    }
    8051012
    806 process_dmsg("\n[DBG] %s : core[%x,%d] COW set in parent_process at cycle %d\n",
    807 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, hal_get_cycles() );
     1013fork_dmsg("\n[DBG] %s : core[%x,%d] COW set in parent_process at cycle %d\n",
     1014__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
    8081015
    8091016    // update children list in parent process
     
    8211028    *child_pid    = new_pid;
    8221029
     1030fork_dmsg("\n[DBG] %s : core[%x,%d] exit at cycle %d\n",
     1031__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, (uint32_t)hal_get_cycles() );
     1032
    8231033    return 0;
    8241034
    8251035}  // end process_make_fork()
     1036
     1037/*  deprecated because we don't wand to destroy the existing process descriptor
    8261038
    8271039/////////////////////////////////////////////////////
     
    8411053    pid  = exec_info->pid;
    8421054
    843     // check local cluster is old process owner
     1055    // check local cluster is process owner
    8441056    assert( (CXY_FROM_PID( pid ) == local_cxy), __FUNCTION__,
    8451057    "local cluster %x is not owner for process %x\n", local_cxy, pid );
     
    8761088        }
    8771089
    878 exec_dmsg("\n[DBG] %s : core[%x,%d] registered code/data vsegs / process %x / path = %s\n",
    879 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid, path );
     1090exec_dmsg("\n[DBG] %s : core[%x,%d] vsegs registered / path = %s\n",
     1091__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path );
    8801092
    8811093    // select a core in local cluster to execute the main thread
     
    9081120                   XPTR( local_cxy , &new->brothers_list ) );
    9091121
    910     // FIXME request destruction of old process copies and threads in all clusters
     1122    // request destruction of old process copies and threads in all clusters
     1123    process_sigaction( old , SIGKILL );
    9111124
    9121125    // activate new thread
     
    9201133}  // end process_make_exec()
    9211134
     1135*/
     1136
     1137/////////////////////////////////////////////////////
     1138error_t process_make_exec( exec_info_t  * exec_info )
     1139{
     1140    char           * path;                    // pathname to .elf file
     1141    process_t      * process;                 // local pointer on old process
     1142    pid_t            pid;                     // old process identifier
     1143    thread_t       * thread;                  // pointer on new main thread
     1144    pthread_attr_t   attr;                    // main thread attributes
     1145    lid_t            lid;                     // selected core local index
     1146        error_t          error;
     1147
     1148        // get .elf pathname and PID from exec_info
     1149        path = exec_info->path;
     1150    pid  = exec_info->pid;
     1151
     1152    // check local cluster is process owner
     1153    assert( (CXY_FROM_PID( pid ) == local_cxy), __FUNCTION__,
     1154    "local cluster %x is not owner for process %x\n", local_cxy, pid );
     1155
     1156exec_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x / path = %s\n",
     1157__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid , path );
     1158
     1159    // get process local pointer
     1160    process = (process_t *)cluster_get_local_process_from_pid( pid );
     1161   
     1162    assert( (process != NULL ) , __FUNCTION__ ,
     1163    "process %x not found in cluster %x\n", pid , local_cxy );
     1164
     1165    // reset the existing vmm
     1166    vmm_destroy( process );
     1167
     1168exec_dmsg("\n[DBG] %s : core[%x,%d] VMM cleared\n",
     1169__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid );
     1170
     1171    // block all existing process threads
     1172    process_sigaction( process , BLOCK_ALL_THREADS );
     1173
     1174    // kill all existing threads and process descriptors (other than owner)
     1175    process_sigaction( process , DELETE_ALL_THREADS );
     1176
     1177    // check no threads
     1178    assert( (process->th_nr == 0) , __FUNCTION__ , "no threads at this point" );
     1179
     1180exec_dmsg("\n[DBG] %s : core[%x,%d] all threads deleted\n",
     1181__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid );
     1182
     1183    // re-initialize VMM
     1184    vmm_init( process );
     1185
     1186    // register "code" and "data" vsegs as well as entry-point and vfs_bin_xp
     1187    // in VMM, using information contained in the elf file.
     1188        if( elf_load_process( path , process ) )
     1189        {
     1190                printk("\n[ERROR] in %s : failed to access .elf file for process %x / path = %s\n",
     1191                __FUNCTION__, pid , path );
     1192        process_destroy( process );
     1193        return -1;
     1194        }
     1195
     1196exec_dmsg("\n[DBG] %s : core[%x,%d] new vsegs registered / path = %s\n",
     1197__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path );
     1198
     1199// @@@
     1200vmm_display( process , true );
     1201// @@@
     1202
     1203    // select a core in local cluster to execute the new main thread
     1204    lid  = cluster_select_local_core();
     1205
     1206    // initialize pthread attributes for new main thread
     1207    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
     1208    attr.cxy        = local_cxy;
     1209    attr.lid        = lid;
     1210
     1211    // create and initialize thread descriptor
     1212        error = thread_user_create( pid,
     1213                                (void *)process->vmm.entry_point,
     1214                                exec_info->args_pointers,
     1215                                &attr,
     1216                                &thread );
     1217        if( error )
     1218        {
     1219                printk("\n[ERROR] in %s : cannot create thread for process %x / path = %s\n",
     1220                       __FUNCTION__, pid , path );
     1221        process_destroy( process );
     1222        return -1;
     1223        }
     1224
     1225exec_dmsg("\n[DBG] %s : core[%x,%d] created main thread %x for new process %x\n",
     1226__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, thread->trdid, pid );
     1227
     1228    // activate new thread
     1229        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
     1230
     1231exec_dmsg("\n[DBG] %s : core[%x,%d] exit for path = %s\n",
     1232__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path  );
     1233
     1234        return 0;
     1235
     1236}  // end process_make_exec()
     1237
     1238////////////////////////////////////////////
     1239void process_make_kill( process_t * process,
     1240                        uint32_t    sig_id )
     1241{
     1242    // this function must be executed by a thread running in owner cluster
     1243    assert( (CXY_FROM_PID( process->pid ) == local_cxy) , __FUNCTION__ ,
     1244    "must execute in owner cluster" );
     1245
     1246    // analyse signal type
     1247    switch( sig_id )
     1248    {
     1249        case SIGSTOP:     // block all threads
     1250        {
     1251            process_sigaction( process , BLOCK_ALL_THREADS );
     1252        }
     1253        break;
     1254        case SIGCONT:     // unblock all threads
     1255        {
     1256            process_sigaction( process , UNBLOCK_ALL_THREADS );
     1257        }
     1258        break;
     1259        case SIGKILL:  // block all threads, then delete all threads
     1260        {
     1261            process_sigaction( process , BLOCK_ALL_THREADS );
     1262            process_sigaction( process , DELETE_ALL_THREADS );
     1263            process_destroy( process );
     1264        }
     1265        break;
     1266    }
     1267}  // end process_make_kill()
     1268
     1269////////////////////////////////////////////
     1270void process_make_exit( process_t * process,
     1271                        uint32_t    status )
     1272{
     1273    // this function must be executed by a thread running in owner cluster
     1274    assert( (CXY_FROM_PID( process->pid ) == local_cxy) , __FUNCTION__ ,
     1275    "must execute in owner cluster" );
     1276
     1277    // block all threads in all clusters
     1278    process_sigaction( process , BLOCK_ALL_THREADS );
     1279
     1280    // delete all threads in all clusters
     1281    process_sigaction( process , DELETE_ALL_THREADS );
     1282
     1283    // delete local process descriptor
     1284    process_destroy( process );
     1285
     1286}  // end process_make_exit()
     1287
    9221288//////////////////////////
    9231289void process_init_create()
    9241290{
    925     exec_info_t   exec_info;     // structure to be passed to process_make_exec()
    926     process_t   * process;       // local pointer on process_init descriptor
    927     pid_t         pid;           // process_init identifier
    928     error_t       error;
    929 
    930 process_dmsg("\n[DBG] %s : enters in cluster %x\n",
    931 __FUNCTION__ , local_cxy );
     1291    process_t      * process;       // local pointer on process_init descriptor
     1292    pid_t            pid;           // process_init identifier
     1293    thread_t       * thread;        // local pointer on main thread
     1294    pthread_attr_t   attr;          // main thread attributes
     1295    lid_t            lid;           // selected core local index for main thread
     1296    error_t          error;
     1297
     1298kinit_dmsg("\n[DBG] %s :  core[%x,%d] enters\n",
     1299__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid );
    9321300
    9331301    // allocates memory for process descriptor from local cluster
     
    9361304    {
    9371305                printk("\n[PANIC] in %s : no memory for process descriptor in cluster %x\n",
    938                 __FUNCTION__, local_cxy );
    939     }
    940 
    941     // get new PID from local cluster
     1306                __FUNCTION__, local_cxy  );
     1307    }
     1308
     1309    // get PID from local cluster
    9421310    error = cluster_pid_alloc( XPTR( local_cxy , process ) , &pid );
    9431311    if( error )
     
    9451313                printk("\n[PANIC] in %s : cannot allocate PID in cluster %x\n",
    9461314                __FUNCTION__, local_cxy );
    947     }
    948 
    949     // initialise the process desciptor (parent is local kernel process)
    950     process_reference_init( process,
     1315        process_destroy( process );
     1316    }
     1317
     1318    assert( (LPID_FROM_PID(pid) == 1) , __FUNCTION__ , "LPID must be 1 for process_init" );
     1319
     1320    // initialize process descriptor / parent is local process_zero
     1321    process_reference_init( process,
    9511322                            pid,
    952                             process_zero.pid,
     1323                            0,
    9531324                            XPTR( local_cxy , &process_zero ) );
    9541325
    955     // initialize the exec_info structure
    956     exec_info.pid          = pid;
    957     exec_info.args_nr      = 0;
    958     exec_info.envs_nr      = 0;
    959     strcpy( exec_info.path , CONFIG_PROCESS_INIT_PATH );
    960 
    961     // update process descriptor and create thread descriptor
    962         error = process_make_exec( &exec_info );
    963 
     1326kinit_dmsg("\n[DBG] %s : core[%x,%d] / process initialised\n",
     1327__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid );
     1328
     1329    // register "code" and "data" vsegs as well as entry-point
     1330    // in process VMM, using information contained in the elf file.
     1331        if( elf_load_process( CONFIG_PROCESS_INIT_PATH , process ) )
     1332        {
     1333                printk("\n[PANIC] in %s : cannot access .elf file / path = %s\n",
     1334                __FUNCTION__, CONFIG_PROCESS_INIT_PATH );
     1335        process_destroy( process );
     1336        }
     1337
     1338kinit_dmsg("\n[DBG] %s : core[%x,%d] vsegs registered / path = %s\n",
     1339__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, CONFIG_PROCESS_INIT_PATH );
     1340
     1341    // select a core in local cluster to execute the main thread
     1342    lid  = cluster_select_local_core();
     1343
     1344    // initialize pthread attributes for main thread
     1345    attr.attributes = PT_ATTR_DETACH | PT_ATTR_CLUSTER_DEFINED | PT_ATTR_CORE_DEFINED;
     1346    attr.cxy        = local_cxy;
     1347    attr.lid        = lid;
     1348
     1349    // create and initialize thread descriptor
     1350        error = thread_user_create( pid,
     1351                                (void *)process->vmm.entry_point,
     1352                                NULL,
     1353                                &attr,
     1354                                &thread );
    9641355        if( error )
    965     {
    966                 printk("\n[PANIC] in %s : cannot exec %s in cluster %x\n",
    967                 __FUNCTION__, CONFIG_PROCESS_INIT_PATH , local_cxy );
    968     }
    969 
    970 process_dmsg("\n[DBG] %s : exit in cluster %x\n",
    971 __FUNCTION__ , local_cxy );
    972                
     1356        {
     1357                printk("\n[PANIC] in %s : cannot create main thread / path = %s\n",
     1358                __FUNCTION__, CONFIG_PROCESS_INIT_PATH );
     1359        process_destroy( process );
     1360        }
     1361
     1362    // activate thread
     1363        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
     1364
    9731365    hal_fence();
    9741366
     1367kinit_dmsg("\n[DBG] %s : core[%x,%d] exit / main thread = %x\n",
     1368__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, thread );
     1369
    9751370}  // end process_init_create()
    9761371
  • trunk/kernel/kern/process.h

    r408 r409  
    5454
    5555/*********************************************************************************************
     56 * This enum defines the actions that can be executed by the process_signal() function.
     57 ********************************************************************************************/
     58
     59enum process_sigactions
     60{
     61    BLOCK_ALL_THREADS,
     62    UNBLOCK_ALL_THREADS,
     63    DELETE_ALL_THREADS,
     64};
     65
     66/*********************************************************************************************
    5667 * This structure defines an array of extended pointers on the open file descriptors
    5768 * for a given process. We use an extended pointer because the open file descriptor
     
    7687 * - The PID 16 LSB bits contain the LPID (Local Process Index)
    7788 * - The PID 16 MSB bits contain the owner cluster CXY.
    78  * In each cluster, the process manager allocates LPID values for the process that are
    79  * allocated to this cluster.
    80  * The process descriptor for a PID process is replicated in all clusters containing
    81  * at least one thread of the PID process, with the following rules :
    82  *
     89 * In each cluster, the process manager allocates  the LPID values for the process that
     90 * are owned by this cluster.
     91 * The process descriptor is replicated in all clusters containing at least one thread
     92 * of the PID process, with the following rules :
    8393 * 1) The <pid>, <ppid>, <ref_xp>, <vfs_root_xp>, <vfs_bin_xp>  fields are defined
    8494 *    in all process descriptor copies.
    8595 * 2) The <vfs_cwd_xp> and associated <cwd_lock>, that can be dynamically modified,
    8696 *    are only defined in the reference process descriptor.
    87  * 2) The <vmm>, containing the list of registered vsegs, and the page table, are only
    88  *    complete in the reference process cluster, other copies are read-only caches.
     97 * 2) The <vmm>, containing the VSL (list of registered vsegs), and the GPT (generic
     98 *    page table), are only complete in the reference process cluster, other copies
     99 *    are actually use as read-only caches.
    89100 * 3) the <fd_array>, containing extended pointers on the open file descriptors, is only
    90101 *    complete in the reference process cluster, other copies are read-only caches.
     
    95106 * 6) The <brothers_list>, <local_list>, <copies_list>, <th_tbl>, <th_nr>, <th_lock> fields
    96107 *    are defined in all process descriptors copies.
    97  * 7) The <sig_mgr> field is only defined in the reference cluster. TODO
    98108 ********************************************************************************************/
    99109
     
    130140
    131141    remote_spinlock_t sync_lock;        /*! lock protecting sem,mutex,barrier,condvar lists */
    132 
    133         sig_mgr_t         sig_mgr;          /*! embedded signal manager TODO [AG]               */
    134142}
    135143process_t;
     
    137145/*********************************************************************************************
    138146 * This structure defines the information required by the process_make_exec() function
    139  * to create a new reference process descriptor, and the associated main thread,
    140  * in the parent process owner cluster.
     147 * to create a new reference process descriptor, and the associated main thread.
    141148 ********************************************************************************************/
    142149
     
    176183/*********************************************************************************************
    177184 * This function allocates memory and initializes the "process_init" descriptor and the
    178  * associated "thread_init" descriptor in the local cluster. It is called once at the end
    179  * of the kernel initialisation procedure, by the local kernel process.
     185 * associated "thread_init" descriptor. It is called once at the end of the kernel
     186 * initialisation procedure, by the kernel process in cluster_IO.
    180187 * The "process_init" is the first user process, and all other user processes will be forked
    181188 * from this process. The code executed by "process_init" is stored in a .elf file, whose
    182  * pathname is defined by the CONFIG_PROCESS_INIT_PATH argument.
    183  * Practically, it builds the exec_info structure, and calls the process_make_exec()
    184  * function, that make the real job.
     189 * pathname is defined by the CONFIG_PROCESS_INIT_PATH configuration variable.
     190 * The process_init streams are defined  by the CONFIG_INIT_[STDIN/STDOUT/STDERR] variables.
     191 * Its local process identifier is 1, and parent process is the local kernel process_zero.
    185192 ********************************************************************************************/
    186193void process_init_create();
     
    200207 * descriptor, defined by the <model_xp> argument. The <process> descriptor, the <pid>, and
    201208 * the <ppid> arguments must be previously defined by the caller.
    202  * It can be called by three functions, depending on the process type:
    203  * 1) if "process" is the user "process_init", the parent is the kernel process. It is
     209 * It can be called by two functions, depending on the process type:
     210 * 1) if "process" is the "process_init", the parent is the kernel process. It is
    204211 *    called once, by the process_init_create() function in cluster[xmax-1][ymax-1].
    205212 * 2) if the caller is the process_make_fork() function, the model is generally a remote
    206213 *    process, that is also the parent process.
     214
    207215 * 3) if the caller is the process_make_exec() function, the model is always a local process,
    208  *    but the parent is the parent of the model process.
    209  *
     216 *    and the parent is the parent of the model process. DEPRECATED [AG]
     217
    210218 * The following fields are initialised (for all process but process_zero).
    211219 * - It set the pid / ppid / ref_xp fields.
    212  * - It initializes an empty VMM (no vsegs registered in VSL and GPT).
     220 * - It initializes the VMM (register the kentry, args, envs vsegs in VSL)
    213221 * - It initializes the FDT, defining the three pseudo files STDIN / STDOUT / STDERR.
    214222 * - It set the root_xp, bin_xp, cwd_xp fields.
     
    251259
    252260/*********************************************************************************************
    253  * This function kills a user process in a given cluster.
    254  * It can be directly called in the reference cluster, or it can be called through the
    255  * PROCESS_KILL RPC.
    256  * - In a first loop, it set the THREAD_SIG_KILL signal to all threads of process.
    257  * - In a second loop, it wait, for each thread the reset of the THREAD_SIG_KILL signal
    258  *   by the scheduler, and completes the thread descriptor destruction.
     261 * This function returns a printable string defining the action for process_signa().
     262 *********************************************************************************************
     263 * @ action_type   : BLOCK_ALL_THREADS / UNBLOCK_ALL_THREADS / DELETE_ALL_THREADS
     264 * @ return a string pointer.
     265 ********************************************************************************************/
     266char * process_action_str( uint32_t action_type );
     267
     268/*********************************************************************************************
     269 * This function allows any thread running in any cluster to block, unblock  or delete
     270 * all threads of a given process identified by the <process> argument, dependig on the
     271 * <acion_type> argument.
     272 * It can be called by the sys_kill() or sys_exit() functions to handle the "kill" & "exit"
     273 * system calls, or by the process_make_exec() function to handle the "exec" system call.
     274 * It must be executed in the owner cluster for the target process (using the relevant RPC
     275 * (RPC_PROCESS_SIGNAL or RPC_PROCESS_EXEC) if the client thread in not running in the
     276 * owner cluster.
     277 * It uses the multicast, non blocking, RPC_PROCESS_KILL to send the signal to all process
     278 * copies in parallel, block & deschedule when all signals have been sent, and finally
     279 * returns only when all responses have been received and the operation is completed.
    259280 *********************************************************************************************
    260281 * @ process     : pointer on the process descriptor.
    261  ********************************************************************************************/
    262 void process_kill( process_t * process );
     282 * @ action_type   : BLOCK_ALL_THREADS / UNBLOCK_ALL_THREADS / DELETE_ALL_THREADS
     283 ********************************************************************************************/
     284void process_sigaction( process_t * process,
     285                        uint32_t    action_type );
     286
     287/*********************************************************************************************
     288 * This function blocks all threads of a given user process in a given cluster.
     289 * It is always called by a local RPC thread, through the multicast RPC_PROCESS_KILL.
     290 * It loop on all local threads of the process, requesting the relevant schedulers to
     291 * block and deschedule these threads, using IPI if required. The threads are not detached
     292 * from the scheduler, and not detached from the local process.
     293 * It acknowledges the client thread in the owner cluster only when all process threads
     294 * are descheduled and blocked on the BLOCKED_GLOBAL condition, using the <rsp_xp> argument.
     295 *********************************************************************************************
     296 * @ process     : pointer on the target process descriptor.
     297 * @ rsp_xp      : extended pointer on the response counter.
     298 * # client_xp   : extended pointer on client thread descriptor.
     299 ********************************************************************************************/
     300void process_block( process_t * process,
     301                    xptr_t      rsp_xp,
     302                    xptr_t      client_xp );
     303
     304/*********************************************************************************************
     305 * This function unblocks all threads of a given user process in a given cluster.
     306 * It is always called by a local RPC thread, through the multicast RPC_PROCESS_KILL.
     307 * It loops on local threads of the process, to reset the BLOCKED_GLOBAL bit in all threads.
     308 * It acknowledges directly the client thread in the owner cluster when this is done,
     309 * using the <rsp_xp> argument.
     310 *********************************************************************************************
     311 * @ process     : pointer on the process descriptor.
     312 * @ rsp_xp      : extended pointer on the response counter.
     313 * # client_xp   : extended pointer on client thread descriptor.
     314 ********************************************************************************************/
     315void process_unblock( process_t * process,
     316                      xptr_t      rsp_xp,
     317                      xptr_t      client_xp );
     318
     319/*********************************************************************************************
     320 * This function delete all threads descriptors, of given user process in a given cluster.
     321 * It is always called by a local RPC thread, through the multicast RPC_PROCESS_KILL.
     322 * It detach all process threads from the scheduler, detach the threads from the local
     323 * process, and release the local memory allocated to threads descriptors (including the
     324 * associated structures such as CPU and FPU context). Finally, it release the memory
     325 * allocated to the local process descriptor itself, but only when the local cluster
     326 * is NOT the process owner, but only a copy.  It acknowledges directly the client thread
     327 * in the owner cluster, using ithe <rsp_xp> argument.
     328 *********************************************************************************************
     329 * @ process     : pointer on the process descriptor.
     330 * @ rsp_xp      : extended pointer on the response counter.
     331 * # client_xp   : extended pointer on client thread descriptor.
     332 ********************************************************************************************/
     333void process_delete( process_t * process,
     334                     xptr_t      rsp_xp,
     335                     xptr_t      client_xp );
    263336
    264337/*********************************************************************************************
     
    274347
    275348/*********************************************************************************************
    276  * This function implements the exec() system call, and is called by the sys_exec() function.
    277  * It is also called by the process_init_create() function to build the "init" process.
     349 * This function implements the "exec" system call, and is called by the sys_exec() function.
    278350 * The "new" process keep the "old" process PID and PPID, all open files, and env variables,
    279351 * the vfs_root and vfs_cwd, but build a brand new memory image (new VMM from the new .elf).
    280  * It actually creates a "new" reference process descriptor, saves all relevant information
    281  * from the "old" reference process descriptor to the "new" process descriptor.
     352 * It actually creates a "new" reference process descriptor, and copies all relevant
     353 * information from the "old" process descriptor to the "new" process descriptor.
    282354 * It completes the "new" process descriptor, from information found in the <exec_info>
    283355 * structure (defined in the process.h file), that must be built by the caller.
    284356 * It creates and initializes the associated main thread. It finally destroys all copies
    285  * of the "old" process in all clusters, and all the old associated threads.
     357 * of the "old" process in all clusters, and destroys all old associated threads.
    286358 * It is executed in the local cluster, that becomes both the "owner" and the "reference"
    287359 * cluster for the "new" process.
     
    293365
    294366/*********************************************************************************************
    295  * This function implement the fork() system call, and is called by the sys_fork() function.
     367 * This function implements the "fork" system call, and is called by the sys_fork() function.
    296368 * It allocates memory and initializes a new "child" process descriptor, and the
    297369 * associated "child" thread descriptor in the local cluster. This function can involve
    298370 * up to three different clusters :
    299371 * - the local (child) cluster can be any cluster defined by the sys_fork function.
    300  * - the parent cluster must be the reference clusterfor the parent process.
    301  * - the client cluster containing the thread requestingthe fork can be any cluster.
     372 * - the parent cluster must be the reference cluster for the parent process.
     373 * - the client cluster containing the thread requesting the fork can be any cluster.
    302374 * The new "child" process descriptor is initialised from informations found in the "parent"
    303375 * reference process descriptor, containing the complete process description.
     
    315387                            pid_t            * child_pid,
    316388                            struct thread_s ** child_thread_ptr );
     389
     390/*********************************************************************************************
     391 * This function implement the "exit" system call, and is called by the sys_exit() function.
     392 * It must be executed by a thread running in the calling process owner cluster.
     393 * It uses twice the multicast RPC_PROCESS_SIGNAL to first block all process threads
     394 * in all clusters, and then delete all thread  and process descriptors.
     395 *********************************************************************************************
     396 * @ process  : pointer on process descriptor in owner cluster.
     397 * @ status   : exit return value.
     398 ********************************************************************************************/
     399void process_make_exit( process_t * process,
     400                        uint32_t    status );
     401
     402/*********************************************************************************************
     403 * This function implement the "kill" system call, and is called by the sys_kill() function.
     404 * It must be executed by a thread running in the target process owner cluster.
     405 * Only the SIGKILL, SIGSTOP, and SIGCONT signals are supported.
     406 * User defined handlers are not supported.
     407 * It uses once or twice the multicast RPC_PROCESS_SIGNAL to block, unblock or delete
     408 * all process threads in all clusters, and then delete process descriptors.
     409 *********************************************************************************************
     410 * @ process  : pointer on process descriptor in owner cluster.
     411 * @ sig_id   : signal type.
     412 ********************************************************************************************/
     413void process_make_kill( process_t * process,
     414                        uint32_t    sig_id );
     415
    317416
    318417/********************   File Management Operations   ****************************************/
     
    376475
    377476/*********************************************************************************************
    378  * This function copies all non-zero entries from a remote <src_xp> fd_array,
    379  * embedded in a process descriptor, to another remote <dst_xp> fd_array, embedded
    380  * in another process descriptor. The calling thread can be running in any cluster.
     477 * This function copies all non-zero entries (other than the three first stdin/stdout/stderr)
     478 * from a remote <src_xp> fd_array, embedded in a process descriptor, to another remote
     479 * <dst_xp> fd_array, embedded in another process descriptor.
     480 * The calling thread can be running in any cluster.
    381481 * It takes the remote lock protecting the <src_xp> fd_array during the copy.
    382482 * For each involved file descriptor, the refcount is incremented.
  • trunk/kernel/kern/rpc.c

    r408 r409  
    4949{
    5050    &rpc_pmem_get_pages_server,         // 0
    51     &rpc_process_make_exec_server,      // 1
    52     &rpc_process_make_fork_server,      // 2
    53     &rpc_process_kill_server,           // 3
    54     &rpc_thread_user_create_server,     // 4
    55     &rpc_thread_kernel_create_server,   // 5
    56     &rpc_signal_rise_server,            // 6                       
    57     &rpc_undefined,                     // 7
    58     &rpc_undefined,                     // 8
    59     &rpc_undefined,                     // 9
     51    &rpc_pmem_release_pages_server,     // 1
     52    &rpc_process_make_exec_server,      // 2
     53    &rpc_process_make_fork_server,      // 3
     54    &rpc_process_make_exit_server,      // 4
     55    &rpc_process_make_kill_server,      // 5
     56    &rpc_thread_user_create_server,     // 6
     57    &rpc_thread_kernel_create_server,   // 7
     58    &rpc_thread_kill_server,            // 8                       
     59    &rpc_process_sigaction_server,      // 9
    6060
    6161    &rpc_vfs_inode_create_server,       // 10 
     
    8888}
    8989
    90 /////////////////////////////////////////////////////////////////////////////////////////
    91 // [0]           Marshaling functions attached to RPC_PMEM_GET_PAGES
     90/***************************************************************************************/
     91/************ Generic functions supporting RPCs : client side **************************/
     92/***************************************************************************************/
     93
     94///////////////////////////////////////
     95void rpc_send( cxy_t        server_cxy,
     96               rpc_desc_t * rpc,
     97               bool_t       block )
     98{
     99    error_t    error;
     100
     101    thread_t * this = CURRENT_THREAD;
     102    core_t   * core = this->core;
     103
     104    // register client thread pointer and core lid in RPC descriptor
     105    rpc->thread    = this;
     106    rpc->lid       = core->lid;
     107
     108    // build an extended pointer on the RPC descriptor
     109        xptr_t   desc_xp = XPTR( local_cxy , rpc );
     110
     111    // get local pointer on rpc_fifo in remote cluster, with the
     112    // assumption that local pointers are identical in all clusters
     113    remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
     114
     115        // try to post an item in remote fifo
     116    // deschedule and retry if remote fifo full
     117    do
     118    {
     119        error = remote_fifo_put_item( XPTR( server_cxy , rpc_fifo ),
     120                                      (uint64_t )desc_xp );
     121            if ( error )
     122        {
     123            printk("\n[WARNING] %s : cluster %x cannot post RPC to cluster %x\n",
     124            __FUNCTION__ , local_cxy , server_cxy );
     125
     126            if( thread_can_yield() ) sched_yield("RPC fifo full");
     127        }
     128    }
     129    while( error );
     130 
     131    hal_fence();
     132       
     133    // send IPI to the remote core corresponding to the client core
     134        dev_pic_send_ipi( server_cxy , core->lid );
     135
     136    // wait RPC completion if blocking
     137    // - busy waiting policy during kernel_init, or if threads cannot yield
     138    // - block and deschedule in all other cases
     139    if ( block )
     140    {
     141        if( (this->type == THREAD_IDLE) || (thread_can_yield() == false) ) // busy waiting
     142        {
     143
     144grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s busy waiting after registering RPC\n"
     145"        rpc = %d / server = %x / cycle %d\n",
     146__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) ,
     147rpc->index , server_cxy , hal_time_stamp() );
     148
     149            while( rpc->response ) hal_fixed_delay( 100 );
     150   
     151grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s exit after RPC completion\n",
     152__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) );
     153
     154        }
     155        else                                                              // block & deschedule
     156        {
     157
     158grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s deschedule after registering RPC\n"
     159"        rpc = %d / server = %x / cycle %d\n",
     160__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) ,
     161rpc->index , server_cxy , hal_time_stamp() );
     162
     163            thread_block( this , THREAD_BLOCKED_RPC );
     164            sched_yield("BLOCKED on RPC");
     165
     166grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s resumes after RPC completion\n",
     167__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) );
     168
     169        }
     170
     171        // check response available
     172        assert( (rpc->response == 0) , __FUNCTION__, "illegal RPC response\n" );
     173
     174        // acknowledge the IPI sent by the server
     175        dev_pic_ack_ipi();
     176    }
     177   
     178}  // end rpc_send()
     179
     180
     181/***************************************************************************************/
     182/************ Generic functions supporting RPCs : server side **************************/
     183/***************************************************************************************/
     184
     185////////////////
     186void rpc_check()
     187{
     188    error_t         error;
     189    thread_t      * thread; 
     190    uint32_t        sr_save;
     191
     192    bool_t          found    = false;
     193        thread_t      * this     = CURRENT_THREAD;
     194    core_t        * core     = this->core;
     195    scheduler_t   * sched    = &core->scheduler;
     196        remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
     197
     198grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s / cycle %d\n",
     199__FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() );
     200
     201    // interrupted thread not preemptable during RPC chek
     202        hal_disable_irq( &sr_save );
     203
     204    // check RPC FIFO not empty and no RPC thread handling it 
     205        if( (rpc_fifo->owner == 0) && (local_fifo_is_empty(rpc_fifo) == false) )
     206    {
     207        // search one non blocked RPC thread   
     208        list_entry_t * iter;
     209        LIST_FOREACH( &sched->k_root , iter )
     210        {
     211            thread = LIST_ELEMENT( iter , thread_t , sched_list );
     212            if( (thread->type == THREAD_RPC) && (thread->blocked == 0 ) )
     213            {
     214                found = true;
     215                break;
     216            }
     217        }
     218
     219        // create new RPC thread if not found   
     220        if( found == false )                   
     221        {
     222            error = thread_kernel_create( &thread,
     223                                          THREAD_RPC,
     224                                                      &rpc_thread_func,
     225                                          NULL,
     226                                                      this->core->lid );
     227                if( error )
     228            {
     229                printk("\n[WARNING] in %s : no memory for new RPC thread in cluster %x\n",
     230                __FUNCTION__ , local_cxy );
     231            }
     232            else
     233            {
     234                // unblock created RPC thread
     235                thread->blocked = 0;
     236
     237                // update core descriptor counter 
     238                    hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 );
     239
     240grpc_dmsg("\n[DBG] %s : core [%x,%d] creates a new RPC thread %x / cycle %d\n",
     241__FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );
     242
     243            }
     244        }
     245    }
     246
     247grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s deschedules / cycle %d\n",
     248__FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() );
     249
     250    // interrupted thread deschedule always           
     251        sched_yield("IPI received");
     252
     253grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s resume / cycle %d\n",
     254__FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() );
     255
     256    // interrupted thread restore IRQs after resume
     257        hal_restore_irq( sr_save );
     258
     259} // end rpc_check()
     260
     261
     262//////////////////////
     263void rpc_thread_func()
     264{
     265    uint32_t     count;       // handled RPC requests counter
     266    error_t      empty;       // local RPC fifo state
     267    xptr_t       desc_xp;     // extended pointer on RPC request
     268    cxy_t        desc_cxy;    // RPC request cluster (client)
     269    rpc_desc_t * desc_ptr;    // RPC request local pointer
     270    uint32_t     index;       // RPC request index
     271    uint32_t     responses;   // number of responses received by client
     272    thread_t   * thread_ptr;  // local pointer on client thread
     273    lid_t        core_lid;    // local index of client core
     274 
     275    // makes RPC thread not preemptable
     276        hal_disable_irq( NULL );
     277 
     278        thread_t      * this     = CURRENT_THREAD;
     279        remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
     280
     281    // two embedded loops:
     282    // - external loop : "infinite" RPC thread
     283    // - internal loop : handle up to CONFIG_RPC_PENDING_MAX RPC requests
     284 
     285        while(1)  // external loop
     286        {
     287        // try to take RPC_FIFO ownership
     288        if( hal_atomic_test_set( &rpc_fifo->owner , this->trdid ) )
     289        {
     290            // initializes RPC requests counter
     291            count = 0;
     292
     293            // acknowledge local IPI
     294            dev_pic_ack_ipi();
     295
     296                    // exit internal loop in three cases:
     297            // - RPC fifo is empty
     298            // - ownership has been lost (because descheduling)
     299            // - max number of RPCs is reached
     300                while( 1 )  // internal loop
     301            {
     302                    empty = local_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp );
     303
     304                    if ( empty == 0 ) // one RPC request found
     305                {
     306                    // get client cluster and pointer on RPC descriptor
     307                    desc_cxy = (cxy_t)GET_CXY( desc_xp );
     308                    desc_ptr = (rpc_desc_t *)GET_PTR( desc_xp );
     309
     310                    // get rpc index from RPC descriptor
     311                        index = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->index ) );
     312
     313grpc_dmsg("\n[DBG] %s : core[%x,%d] / RPC thread %x / starts rpc %d / cycle %d\n",
     314__FUNCTION__ , local_cxy , this->core->lid , this->trdid , index , hal_time_stamp() );
     315
     316                    // call the relevant server function
     317                    rpc_server[index]( desc_xp );
     318
     319grpc_dmsg("\n[DBG] %s : core[%x,%d] / RPC thread %x / completes rpc %d / cycle %d\n",
     320__FUNCTION__ , local_cxy , this->core->lid , this->trdid , index , hal_time_stamp() );
     321
     322                    // increment handled RPC counter
     323                        count++;
     324
     325                    // decrement response counter in RPC descriptor
     326                    responses = hal_remote_atomic_add(XPTR( desc_cxy, &desc_ptr->response ), -1);
     327
     328                    // unblock client thread  and send IPI to client core if last response
     329                    if( responses == 1 )
     330                    {
     331                        // get pointer on client thread and unblock it
     332                        thread_ptr = (thread_t *)hal_remote_lpt(XPTR(desc_cxy,&desc_ptr->thread));
     333                        thread_unblock( XPTR(desc_cxy,thread_ptr) , THREAD_BLOCKED_RPC );
     334
     335                        hal_fence();
     336
     337                        // get client core lid and send IPI
     338                        core_lid = hal_remote_lw(XPTR(desc_cxy, &desc_ptr->lid));
     339                            dev_pic_send_ipi( desc_cxy , core_lid );
     340                    }
     341                        }
     342       
     343                // chek exit condition
     344                        if( local_fifo_is_empty( rpc_fifo )  ||
     345                    (rpc_fifo->owner != this->trdid) ||
     346                    (count >= CONFIG_RPC_PENDING_MAX) ) break;
     347                } // end internal loop
     348
     349            // release rpc_fifo ownership if not lost
     350            if( rpc_fifo->owner == this->trdid ) rpc_fifo->owner = 0;
     351        }
     352
     353        // sucide if too many RPC threads in cluster
     354        if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX )
     355            {
     356
     357grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) suicide at cycle %d\n",
     358__FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );
     359
     360            // update RPC threads counter
     361                hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 );
     362
     363            // suicide
     364                thread_kill( this );
     365            }
     366
     367grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) deschedules / cycle %d\n",
     368__FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );
     369
     370        // deschedule without blocking
     371        sched_yield("RPC fifo empty or too much work");
     372
     373grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) resumes / cycle %d\n",
     374__FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );
     375
     376        } // end external loop
     377
     378} // end rpc_thread_func()
     379
     380
     381/////////////////////////////////////////////////////////////////////////////////////////
     382// [0]           Marshaling functions attached to RPC_PMEM_GET_PAGES (blocking)
    92383/////////////////////////////////////////////////////////////////////////////////////////
    93384
     
    97388                                page_t  ** page )      // out
    98389{
    99     rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    100     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    101     CURRENT_THREAD->core->lid , hal_time_stamp() );
     390rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     391__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     392CURRENT_THREAD->core->lid , hal_time_stamp() );
    102393
    103394    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
     
    112403
    113404    // register RPC request in remote RPC fifo (blocking function)
    114     rpc_send_sync( cxy , &rpc );
     405    rpc_send( cxy , &rpc  , true );
    115406
    116407    // get output arguments from RPC descriptor
    117408    *page = (page_t *)(intptr_t)rpc.args[1];
    118409
    119     rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    120     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    121     CURRENT_THREAD->core->lid , hal_time_stamp() );
     410rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     411__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     412CURRENT_THREAD->core->lid , hal_time_stamp() );
    122413}
    123414
     
    125416void rpc_pmem_get_pages_server( xptr_t xp )
    126417{
    127     rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    128     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    129     CURRENT_THREAD->core->lid , hal_time_stamp() );
     418rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     419__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     420CURRENT_THREAD->core->lid , hal_time_stamp() );
    130421
    131422    // get client cluster identifier and pointer on RPC descriptor
     
    134425
    135426    // get input arguments from client RPC descriptor
    136     uint32_t order = hal_remote_lw( XPTR( cxy , &desc->args[0] ) );
     427    uint32_t order = (uint32_t)hal_remote_lwd( XPTR( cxy , &desc->args[0] ) );
    137428   
    138429    // call local pmem allocator
     
    142433    hal_remote_swd( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page );
    143434
    144     rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    145     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    146     CURRENT_THREAD->core->lid , hal_time_stamp() );
    147 }
    148 
    149 /////////////////////////////////////////////////////////////////////////////////////////
    150 // [1]           Marshaling functions attached to RPC_PROCESS_MAKE_EXEC
     435rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     436__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     437CURRENT_THREAD->core->lid , hal_time_stamp() );
     438}
     439
     440/////////////////////////////////////////////////////////////////////////////////////////
     441// [1]       Marshaling functions attached to RPC_PMEM_RELEASE_PAGES (blocking)
     442/////////////////////////////////////////////////////////////////////////////////////////
     443
     444//////////////////////////////////////////////////
     445void rpc_pmem_release_pages_client( cxy_t     cxy,
     446                                    page_t  * page )      // out
     447{
     448rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     449__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     450CURRENT_THREAD->core->lid , hal_time_stamp() );
     451
     452    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
     453
     454    // initialise RPC descriptor header
     455    rpc_desc_t  rpc;
     456    rpc.index    = RPC_PMEM_RELEASE_PAGES;
     457    rpc.response = 1;
     458
     459    // set input arguments in RPC descriptor
     460    rpc.args[0] = (uint64_t)(intptr_t)page;
     461
     462    // register RPC request in remote RPC fifo (blocking function)
     463    rpc_send( cxy , &rpc  , true );
     464
     465rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     466__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     467CURRENT_THREAD->core->lid , hal_time_stamp() );
     468}
     469
     470///////////////////////////////////////////////
     471void rpc_pmem_release_pages_server( xptr_t xp )
     472{
     473rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     474__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     475CURRENT_THREAD->core->lid , hal_time_stamp() );
     476
     477    // get client cluster identifier and pointer on RPC descriptor
     478    cxy_t        cxy  = (cxy_t)GET_CXY( xp );
     479    rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );
     480
     481    // get input arguments from client RPC descriptor
     482    page_t * page = (page_t *)(intptr_t)hal_remote_lwd( XPTR( cxy , &desc->args[0] ) );
     483   
     484    // release memory to local pmem
     485    kmem_req_t req;
     486    req.type = KMEM_PAGE;
     487    req.ptr  = page;
     488    kmem_free( &req );
     489
     490rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     491__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     492CURRENT_THREAD->core->lid , hal_time_stamp() );
     493}
     494
     495/////////////////////////////////////////////////////////////////////////////////////////
     496// [2]           Marshaling functions attached to RPC_PROCESS_MAKE_EXEC (blocking)
    151497/////////////////////////////////////////////////////////////////////////////////////////
    152498
     
    156502                                   error_t     * error )   // out
    157503{
    158     rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    159     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    160     CURRENT_THREAD->core->lid , hal_time_stamp() );
     504rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     505__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     506CURRENT_THREAD->core->lid , hal_time_stamp() );
    161507
    162508    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
     
    171517
    172518    // register RPC request in remote RPC fifo (blocking function)
    173     rpc_send_sync( cxy , &rpc );
     519    rpc_send( cxy , &rpc  , true );
    174520
    175521    // get output arguments from RPC descriptor
    176522    *error  = (error_t)rpc.args[1];     
    177523
    178     rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    179     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    180     CURRENT_THREAD->core->lid , hal_time_stamp() );
     524rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     525__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     526CURRENT_THREAD->core->lid , hal_time_stamp() );
    181527}
    182528
     
    184530void rpc_process_make_exec_server( xptr_t xp )
    185531{
     532rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     533__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     534CURRENT_THREAD->core->lid , hal_time_stamp() );
     535
    186536    exec_info_t * ptr;       // local pointer on remote exec_info structure
    187537    exec_info_t   info;      // local copy of exec_info structure
    188538    error_t       error;     // local error error status
    189 
    190     rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    191     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    192     CURRENT_THREAD->core->lid , hal_time_stamp() );
    193539
    194540    // get client cluster identifier and pointer on RPC descriptor
     
    210556    hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );
    211557
    212     rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    213     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    214     CURRENT_THREAD->core->lid , hal_time_stamp() );
    215 }
    216 
    217 /////////////////////////////////////////////////////////////////////////////////////////
    218 // [2]           Marshaling functions attached to RPC_PROCESS_MAKE_FORK
     558rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     559__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     560CURRENT_THREAD->core->lid , hal_time_stamp() );
     561}
     562
     563/////////////////////////////////////////////////////////////////////////////////////////
     564// [3]           Marshaling functions attached to RPC_PROCESS_MAKE_FORK (blocking)
    219565/////////////////////////////////////////////////////////////////////////////////////////
    220566
     
    227573                                   error_t   * error )              // out
    228574{
    229     rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    230     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    231     CURRENT_THREAD->core->lid , hal_time_stamp() );
     575rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     576__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     577CURRENT_THREAD->core->lid , hal_time_stamp() );
    232578
    233579    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
     
    243589
    244590    // register RPC request in remote RPC fifo (blocking function)
    245     rpc_send_sync( cxy , &rpc );
     591    rpc_send( cxy , &rpc  , true );
    246592
    247593    // get output arguments from RPC descriptor
     
    250596    *error             = (error_t)rpc.args[4];     
    251597
    252     rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    253     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    254     CURRENT_THREAD->core->lid , hal_time_stamp() );
     598rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     599__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     600CURRENT_THREAD->core->lid , hal_time_stamp() );
    255601}
    256602
     
    258604void rpc_process_make_fork_server( xptr_t xp )
    259605{
     606rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     607__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     608CURRENT_THREAD->core->lid , hal_time_stamp() );
     609
    260610    xptr_t     ref_process_xp;     // extended pointer on reference parent process
    261611    xptr_t     parent_thread_xp;   // extended pointer on parent thread
     
    264614    error_t    error;              // local error status
    265615
    266     rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    267     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    268     CURRENT_THREAD->core->lid , hal_time_stamp() );
    269 
    270616    // get client cluster identifier and pointer on RPC descriptor
    271617    cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
     
    287633    hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );
    288634
    289     rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    290     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    291     CURRENT_THREAD->core->lid , hal_time_stamp() );
    292 }
    293 
    294 /////////////////////////////////////////////////////////////////////////////////////////
    295 // [3]           Marshaling functions attached to RPC_PROCESS_KILL
     635rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     636__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     637CURRENT_THREAD->core->lid , hal_time_stamp() );
     638}
     639
     640/////////////////////////////////////////////////////////////////////////////////////////
     641// [4]      Marshaling functions attached to RPC_PROCESS_MAKE_EXIT (blocking)
    296642/////////////////////////////////////////////////////////////////////////////////////////
    297643
    298644///////////////////////////////////////////////////
    299 void rpc_process_kill_client( process_t * process )
    300 {
    301     rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    302     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    303     CURRENT_THREAD->core->lid , hal_time_stamp() );
    304 
    305     // only reference cluster can send this RPC
    306     assert( (GET_CXY( process->ref_xp ) == local_cxy) , __FUNCTION__ ,
    307             "caller must be reference process cluster\n");
    308 
    309     // get local process index in reference cluster
    310     lpid_t lpid = LPID_FROM_PID( process->pid );
    311 
    312     // get local process manager pointer
    313     pmgr_t * pmgr = &LOCAL_CLUSTER->pmgr;
    314 
    315     // get number of copies
    316     uint32_t copies = pmgr->copies_nr[lpid];
    317 
    318     // initialise RPC descriptor
    319     rpc_desc_t  rpc;
    320     rpc.index    = RPC_PROCESS_KILL;
    321     rpc.response = copies;
    322     rpc.args[0]  = (uint64_t)process->pid;
    323 
    324     // loop on list of copies to send RPC
    325     xptr_t  iter;
    326     XLIST_FOREACH( XPTR( local_cxy , &pmgr->copies_root[lpid] ) , iter )
    327     {
    328         // get cluster_identifier for current copy
    329         cxy_t  target_cxy = GET_CXY( iter );
    330 
    331         // register RPC request in remote RPC fifo ... but the reference
    332         if( target_cxy != local_cxy ) rpc_send_sync( target_cxy , &rpc );
    333     }
    334 
    335     rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    336     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    337     CURRENT_THREAD->core->lid , hal_time_stamp() );
     645void rpc_process_make_exit_client( cxy_t       cxy,
     646                                   process_t * process,
     647                                   uint32_t    status )
     648{
     649rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     650__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     651CURRENT_THREAD->core->lid , hal_time_stamp() );
     652
     653    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
     654
     655    // initialise RPC descriptor header
     656    rpc_desc_t  rpc;
     657    rpc.index    = RPC_PROCESS_MAKE_EXIT;
     658    rpc.response = 1;
     659
     660    // set input arguments in RPC descriptor 
     661    rpc.args[0] = (uint64_t)(intptr_t)process;
     662    rpc.args[1] = (uint64_t)status;
     663
     664    // register RPC request in remote RPC fifo (blocking function)
     665    rpc_send( cxy , &rpc , true );
     666
     667rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     668__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     669CURRENT_THREAD->core->lid , hal_time_stamp() );
    338670
    339671
    340 /////////////////////////////////////////
    341 void rpc_process_kill_server( xptr_t xp )
    342 {
    343     pid_t       pid;
    344     process_t * process;
    345 
    346     rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    347     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    348     CURRENT_THREAD->core->lid , hal_time_stamp() );
     672//////////////////////////////////////////////
     673void rpc_process_make_exit_server( xptr_t xp )
     674{
     675rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     676__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     677CURRENT_THREAD->core->lid , hal_time_stamp() );
     678
     679    process_t * process;
     680    uint32_t    status; 
    349681
    350682    // get client cluster identifier and pointer on RPC descriptor
     
    352684    rpc_desc_t * desc        = (rpc_desc_t *)GET_PTR( xp );
    353685
    354     // get pid argument from RPC descriptor
    355     pid = (pid_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
    356 
    357     // get process pointer to call local kernel function
    358     process = cluster_get_local_process_from_pid( pid );
    359 
    360     if( process == NULL )  // process not found => do nothing
    361     {
    362         printk("\n[WARNING] in %s : process %x not found in cluster %x\n",
    363                __FUNCTION__ , pid , local_cxy );
    364     }
    365     else                   // destroy process
    366     {
    367         process_kill( process );
    368     }
    369 
    370     rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    371     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    372     CURRENT_THREAD->core->lid , hal_time_stamp() );
     686    // get arguments from RPC descriptor
     687    process = (process_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
     688    status  = (uint32_t)             hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) );
     689
     690    // call local kernel function
     691    process_make_exit( process , status );
     692
     693rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     694__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     695CURRENT_THREAD->core->lid , hal_time_stamp() );
    373696}
    374697
    375 
    376 /////////////////////////////////////////////////////////////////////////////////////////
    377 // [4]           Marshaling functions attached to RPC_THREAD_USER_CREATE               
     698/////////////////////////////////////////////////////////////////////////////////////////
     699// [5]      Marshaling functions attached to RPC_PROCESS_MAKE_KILL (blocking)
     700/////////////////////////////////////////////////////////////////////////////////////////
     701
     702///////////////////////////////////////////////////
     703void rpc_process_make_kill_client( cxy_t       cxy,
     704                                   process_t * process,
     705                                   uint32_t    sig_id )
     706{
     707rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     708__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     709CURRENT_THREAD->core->lid , hal_time_stamp() );
     710
     711    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
     712
     713    // initialise RPC descriptor header
     714    rpc_desc_t  rpc;
     715    rpc.index    = RPC_PROCESS_MAKE_KILL;
     716    rpc.response = 1;
     717
     718    // set input arguments in RPC descriptor 
     719    rpc.args[0] = (uint64_t)(intptr_t)process;
     720    rpc.args[1] = (uint64_t)sig_id;
     721
     722    // register RPC request in remote RPC fifo (blocking function)
     723    rpc_send( cxy , &rpc , true );
     724
     725rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     726__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     727CURRENT_THREAD->core->lid , hal_time_stamp() );
     728
     729
     730//////////////////////////////////////////////
     731void rpc_process_make_kill_server( xptr_t xp )
     732{
     733rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     734__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     735CURRENT_THREAD->core->lid , hal_time_stamp() );
     736
     737    process_t * process;
     738    uint32_t    sig_id;
     739
     740    // get client cluster identifier and pointer on RPC descriptor
     741    cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
     742    rpc_desc_t * desc        = (rpc_desc_t *)GET_PTR( xp );
     743
     744    // get arguments from RPC descriptor
     745    process = (process_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
     746    sig_id  = (uint32_t)             hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) );
     747
     748    // call local kernel function
     749    process_make_exit( process , sig_id );
     750
     751rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     752__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     753CURRENT_THREAD->core->lid , hal_time_stamp() );
     754}
     755
     756/////////////////////////////////////////////////////////////////////////////////////////
     757// [6]           Marshaling functions attached to RPC_THREAD_USER_CREATE (blocking)               
    378758/////////////////////////////////////////////////////////////////////////////////////////
    379759
     
    387767                                    error_t        * error )      // out
    388768{
    389     rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    390     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    391     CURRENT_THREAD->core->lid , hal_time_stamp() );
     769rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     770__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     771CURRENT_THREAD->core->lid , hal_time_stamp() );
    392772
    393773    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
     
    404784    rpc.args[3] = (uint64_t)(intptr_t)attr;
    405785
    406     // register RPC request in remote RPC fifo
    407     rpc_send_sync( cxy , &rpc );
     786    // register RPC request in remote RPC fifo (blocking function)
     787    rpc_send( cxy , &rpc , true );
    408788
    409789    // get output arguments from RPC descriptor
     
    411791    *error     = (error_t)rpc.args[5];
    412792
    413     rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    414     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    415     CURRENT_THREAD->core->lid , hal_time_stamp() );
     793rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     794__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     795CURRENT_THREAD->core->lid , hal_time_stamp() );
    416796}
    417797
     
    419799void rpc_thread_user_create_server( xptr_t xp )
    420800{
     801rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     802__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     803CURRENT_THREAD->core->lid , hal_time_stamp() );
     804
    421805    pthread_attr_t * attr_ptr;   // pointer on attributes structure in client cluster
    422806    pthread_attr_t   attr_copy;  // attributes structure  copy in server cluster
     
    428812    void           * start_arg;
    429813    error_t          error;
    430 
    431     rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    432     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    433     CURRENT_THREAD->core->lid , hal_time_stamp() );
    434814
    435815    // get client cluster identifier and pointer on RPC descriptor
     
    462842    hal_remote_swd( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error );
    463843
    464     rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    465     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    466     CURRENT_THREAD->core->lid , hal_time_stamp() );
    467 }
    468 
    469 /////////////////////////////////////////////////////////////////////////////////////////
    470 // [5]           Marshaling functions attached to RPC_THREAD_KERNEL_CREATE
     844rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     845__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     846CURRENT_THREAD->core->lid , hal_time_stamp() );
     847}
     848
     849/////////////////////////////////////////////////////////////////////////////////////////
     850// [7]           Marshaling functions attached to RPC_THREAD_KERNEL_CREATE (blocking)
    471851/////////////////////////////////////////////////////////////////////////////////////////
    472852
     
    479859                                      error_t * error )      // out
    480860{
    481     rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    482     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    483     CURRENT_THREAD->core->lid , hal_time_stamp() );
     861rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     862__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     863CURRENT_THREAD->core->lid , hal_time_stamp() );
    484864
    485865    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
     
    495875    rpc.args[2] = (uint64_t)(intptr_t)args;
    496876   
    497     // register RPC request in remote RPC fifo
    498     rpc_send_sync( cxy , &rpc );
     877    // register RPC request in remote RPC fifo (blocking function)
     878    rpc_send( cxy , &rpc , true );
    499879
    500880    // get output arguments from RPC descriptor
     
    502882    *error     = (error_t)rpc.args[4];
    503883
    504     rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    505     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    506     CURRENT_THREAD->core->lid , hal_time_stamp() );
     884rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     885__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     886CURRENT_THREAD->core->lid , hal_time_stamp() );
    507887}
    508888
     
    510890void rpc_thread_kernel_create_server( xptr_t xp )
    511891{
     892rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     893__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     894CURRENT_THREAD->core->lid , hal_time_stamp() );
     895
    512896    thread_t       * thread_ptr;  // local pointer on thread descriptor
    513897    xptr_t           thread_xp;   // extended pointer on thread descriptor
     
    515899    error_t          error;   
    516900
    517     rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    518     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    519     CURRENT_THREAD->core->lid , hal_time_stamp() );
    520 
    521901    // get client cluster identifier and pointer on RPC descriptor
    522902    cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
     
    539919    hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)thread_xp );
    540920
    541     rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    542     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    543     CURRENT_THREAD->core->lid , hal_time_stamp() );
    544 }
    545 
    546 /////////////////////////////////////////////////////////////////////////////////////////
    547 // [6]           Marshaling functions attached to RPC_SIGNAL_RISE
     921rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     922__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     923CURRENT_THREAD->core->lid , hal_time_stamp() );
     924}
     925
     926/////////////////////////////////////////////////////////////////////////////////////////
     927// [8]           Marshaling functions attached to RPC_THREAD_KILL (blocking)
    548928/////////////////////////////////////////////////////////////////////////////////////////
    549929
    550930/////////////////////////////////////////////
    551 void rpc_signal_rise_client( cxy_t       cxy,
    552                              process_t * process,    // in
    553                              uint32_t    sig_id )    // in
    554 {
    555     rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    556     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    557     CURRENT_THREAD->core->lid , hal_time_stamp() );
    558 
    559     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    560 
    561     // initialise RPC descriptor header
    562     rpc_desc_t  rpc;
    563     rpc.index    = RPC_SIGNAL_RISE;
     931void rpc_thread_kill_client( cxy_t       cxy,
     932                             thread_t  * thread )    // in
     933{
     934rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     935__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     936CURRENT_THREAD->core->lid , hal_time_stamp() );
     937
     938    // this RPC can be called in local cluster
     939
     940    // initialise RPC descriptor header
     941    rpc_desc_t  rpc;
     942    rpc.index    = RPC_THREAD_KILL;
    564943    rpc.response = 1;
    565944
    566945    // set input arguments in RPC descriptor
    567     rpc.args[0] = (uint64_t)(intptr_t)process;
    568     rpc.args[1] = (uint64_t)sig_id;
     946    rpc.args[0] = (uint64_t)(intptr_t)thread;
    569947   
    570     // register RPC request in remote RPC fifo
    571     rpc_send_sync( cxy , &rpc );
    572 
    573     rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    574     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    575     CURRENT_THREAD->core->lid , hal_time_stamp() );
     948    // register RPC request in remote RPC fifo (blocking function)
     949    rpc_send( cxy , &rpc , true );
     950
     951rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     952__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     953CURRENT_THREAD->core->lid , hal_time_stamp() );
    576954}
    577955
    578956////////////////////////////////////////                             
    579 void rpc_signal_rise_server( xptr_t xp )
    580 {
    581     process_t  * process;  // local pointer on process descriptor
    582     uint32_t     sig_id;   // signal index
    583 
    584     rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    585     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    586     CURRENT_THREAD->core->lid , hal_time_stamp() );
     957void rpc_thread_kill_server( xptr_t xp )
     958{
     959rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     960__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     961CURRENT_THREAD->core->lid , hal_time_stamp() );
     962
     963    thread_t  * thread;  // local pointer on process descriptor
    587964
    588965    // get client cluster identifier and pointer on RPC descriptor
     
    591968
    592969    // get attributes from RPC descriptor
    593     process = (process_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
    594     sig_id  = (uint32_t)             hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) );
     970    thread = (thread_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
    595971
    596972    // call local kernel function
    597     signal_rise( process , sig_id );
    598 
    599     rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    600     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    601     CURRENT_THREAD->core->lid , hal_time_stamp() );
    602 }
    603 
    604 /////////////////////////////////////////////////////////////////////////////////////////
    605 // [10]          Marshaling functions attached to RPC_VFS_INODE_CREATE
     973    thread_kill( thread );
     974
     975rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     976__FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     977CURRENT_THREAD->core->lid , hal_time_stamp() );
     978}
     979
     980
     981/////////////////////////////////////////////////////////////////////////////////////////
     982// [9]     Marshaling functions attached to RPC_PROCESS_KILL  (multicast / non blocking)
     983/////////////////////////////////////////////////////////////////////////////////////////
     984
     985///////////////////////////////////////////////////
     986void rpc_process_sigaction_client( cxy_t       cxy,
     987                                   process_t * process,        // in
     988                                   uint32_t    sigaction,      // in
     989                                   xptr_t      rsp_xp,         // in
     990                                   xptr_t      client_xp )     // in
     991{
     992signal_dmsg("\n[DBG] %s : enter for %s / thread %x on core[%x,%d] / cycle %d\n",
     993__FUNCTION__ , process_action_str( sigaction ) , CURRENT_THREAD ,
     994local_cxy , CURRENT_THREAD->core->lid , hal_time_stamp() );
     995
     996    // initialise RPC descriptor header
     997    rpc_desc_t  rpc;
     998    rpc.index    = RPC_PROCESS_SIGACTION;
     999
     1000    // set input arguments in RPC descriptor 
     1001    rpc.args[0] = (uint64_t)(intptr_t)process;
     1002    rpc.args[1] = (uint64_t)sigaction;
     1003    rpc.args[2] = (uint64_t)rsp_xp;
     1004    rpc.args[3] = (uint64_t)client_xp;
     1005
     1006    // register RPC request in remote RPC fifo (non blocking)
     1007    rpc_send( cxy , &rpc , false );
     1008
     1009signal_dmsg("\n[DBG] %s : exit for %s / thread %x on core[%x,%d] / cycle %d\n",
     1010__FUNCTION__ , process_action_str( sigaction ) , CURRENT_THREAD ,
     1011local_cxy , CURRENT_THREAD->core->lid , hal_time_stamp() );
     1012
     1013
     1014//////////////////////////////////////////////
     1015void rpc_process_sigaction_server( xptr_t xp )
     1016{
     1017    process_t * process;
     1018    uint32_t    action; 
     1019    xptr_t      rsp_xp;
     1020    xptr_t      client_xp;
     1021
     1022    // get client cluster identifier and pointer on RPC descriptor
     1023    cxy_t        client_cxy  = (cxy_t)GET_CXY( xp );
     1024    rpc_desc_t * desc        = (rpc_desc_t *)GET_PTR( xp );
     1025
     1026    // get arguments from RPC descriptor
     1027    process   = (process_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
     1028    action    = (uint32_t)             hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) );
     1029    rsp_xp    = (xptr_t)               hal_remote_lwd( XPTR( client_cxy , &desc->args[2] ) );
     1030    client_xp = (xptr_t)               hal_remote_lwd( XPTR( client_cxy , &desc->args[3] ) );
     1031   
     1032signal_dmsg("\n[DBG] %s : enter for %s / thread %x on core[%x,%d] / cycle %d\n",
     1033__FUNCTION__ , process_action_str( action ) , CURRENT_THREAD ,
     1034local_cxy , CURRENT_THREAD->core->lid , hal_time_stamp() );
     1035
     1036    // call relevant kernel function
     1037    if      (action == DELETE_ALL_THREADS  ) process_delete ( process , rsp_xp , client_xp );
     1038    else if (action == BLOCK_ALL_THREADS   ) process_block  ( process , rsp_xp , client_xp );
     1039    else if (action == UNBLOCK_ALL_THREADS ) process_unblock( process , rsp_xp , client_xp );
     1040
     1041signal_dmsg("\n[DBG] %s : exit for %s / thread %x on core[%x,%d] / cycle %d\n",
     1042__FUNCTION__ , process_action_str( action ) , CURRENT_THREAD ,
     1043local_cxy , CURRENT_THREAD->core->lid , hal_time_stamp() );
     1044}
     1045
     1046/////////////////////////////////////////////////////////////////////////////////////////
     1047// [10]          Marshaling functions attached to RPC_VFS_INODE_CREATE  (blocking)
    6061048/////////////////////////////////////////////////////////////////////////////////////////
    6071049
     
    6411083
    6421084    // register RPC request in remote RPC fifo (blocking function)
    643     rpc_send_sync( cxy , &rpc );
     1085    rpc_send( cxy , &rpc , true );
    6441086
    6451087    // get output values from RPC descriptor
     
    7051147
    7061148/////////////////////////////////////////////////////////////////////////////////////////
    707 // [11]          Marshaling functions attached to RPC_VFS_INODE_DESTROY
     1149// [11]          Marshaling functions attached to RPC_VFS_INODE_DESTROY  (blocking)
    7081150/////////////////////////////////////////////////////////////////////////////////////////
    7091151
     
    7271169   
    7281170    // register RPC request in remote RPC fifo (blocking function)
    729     rpc_send_sync( cxy , &rpc );
     1171    rpc_send( cxy , &rpc , true );
    7301172
    7311173    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     
    7591201
    7601202/////////////////////////////////////////////////////////////////////////////////////////
    761 // [12]          Marshaling functions attached to RPC_VFS_DENTRY_CREATE
     1203// [12]          Marshaling functions attached to RPC_VFS_DENTRY_CREATE  (blocking)
    7621204/////////////////////////////////////////////////////////////////////////////////////////
    7631205
     
    7871229
    7881230    // register RPC request in remote RPC fifo (blocking function)
    789     rpc_send_sync( cxy , &rpc );
     1231    rpc_send( cxy , &rpc , true );
    7901232
    7911233    // get output values from RPC descriptor
     
    8411283
    8421284/////////////////////////////////////////////////////////////////////////////////////////
    843 // [13]          Marshaling functions attached to RPC_VFS_DENTRY_DESTROY
     1285// [13]          Marshaling functions attached to RPC_VFS_DENTRY_DESTROY  (blocking)
    8441286/////////////////////////////////////////////////////////////////////////////////////////
    8451287
     
    8641306   
    8651307    // register RPC request in remote RPC fifo (blocking function)
    866     rpc_send_sync( cxy , &rpc );
     1308    rpc_send( cxy , &rpc , true );
    8671309
    8681310    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     
    8971339
    8981340/////////////////////////////////////////////////////////////////////////////////////////
    899 // [14]          Marshaling functions attached to RPC_VFS_FILE_CREATE
     1341// [14]          Marshaling functions attached to RPC_VFS_FILE_CREATE  (blocking)
    9001342/////////////////////////////////////////////////////////////////////////////////////////
    9011343
     
    9231365
    9241366    // register RPC request in remote RPC fifo (blocking function)
    925     rpc_send_sync( cxy , &rpc );
     1367    rpc_send( cxy , &rpc , true );
    9261368
    9271369    // get output values from RPC descriptor
     
    9691411
    9701412/////////////////////////////////////////////////////////////////////////////////////////
    971 // [15]          Marshaling functions attached to RPC_VFS_FILE_DESTROY
     1413// [15]          Marshaling functions attached to RPC_VFS_FILE_DESTROY  (blocking)
    9721414/////////////////////////////////////////////////////////////////////////////////////////
    9731415
     
    9911433   
    9921434    // register RPC request in remote RPC fifo (blocking function)
    993     rpc_send_sync( cxy , &rpc );
     1435    rpc_send( cxy , &rpc , true );
    9941436
    9951437    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     
    10231465
    10241466/////////////////////////////////////////////////////////////////////////////////////////
    1025 // [16]          Marshaling functions attached to RPC_VFS_INODE_LOAD 
     1467// [16]          Marshaling functions attached to RPC_VFS_INODE_LOAD   (blocking)
    10261468/////////////////////////////////////////////////////////////////////////////////////////
    10271469
     
    10501492
    10511493    // register RPC request in remote RPC fifo (blocking function)
    1052     rpc_send_sync( cxy , &rpc );
     1494    rpc_send( cxy , &rpc , true );
    10531495
    10541496    // get output values from RPC descriptor
     
    10991541
    11001542/////////////////////////////////////////////////////////////////////////////////////////
    1101 // [17]          Marshaling functions attached to RPC_VFS_MAPPER_LOAD_ALL
     1543// [17]          Marshaling functions attached to RPC_VFS_MAPPER_LOAD_ALL  (blocking)
    11021544/////////////////////////////////////////////////////////////////////////////////////////
    11031545
     
    11221564
    11231565    // register RPC request in remote RPC fifo (blocking function)
    1124     rpc_send_sync( cxy , &rpc );
     1566    rpc_send( cxy , &rpc , true );
    11251567
    11261568    // get output values from RPC descriptor
     
    11611603
    11621604/////////////////////////////////////////////////////////////////////////////////////////
    1163 // [18]          Marshaling functions attached to RPC_FATFS_GET_CLUSTER
     1605// [18]          Marshaling functions attached to RPC_FATFS_GET_CLUSTER  (blocking)
    11641606/////////////////////////////////////////////////////////////////////////////////////////
    11651607
     
    11891631
    11901632    // register RPC request in remote RPC fifo
    1191     rpc_send_sync( cxy , &rpc );
     1633    rpc_send( cxy , &rpc , true );
    11921634
    11931635    // get output argument from rpc descriptor
     
    12351677
    12361678/////////////////////////////////////////////////////////////////////////////////////////
    1237 // [20]          Marshaling functions attached to RPC_VMM_GET_VSEG
     1679// [20]          Marshaling functions attached to RPC_VMM_GET_VSEG  (blocking)
    12381680/////////////////////////////////////////////////////////////////////////////////////////
    12391681
     
    12611703
    12621704    // register RPC request in remote RPC fifo (blocking function)
    1263     rpc_send_sync( cxy , &rpc );
     1705    rpc_send( cxy , &rpc , true );
    12641706
    12651707    // get output argument from rpc descriptor
     
    13081750
    13091751/////////////////////////////////////////////////////////////////////////////////////////
    1310 // [21]          Marshaling functions attached to RPC_VMM_GET_PTE
     1752// [21]          Marshaling functions attached to RPC_VMM_GET_PTE  (blocking)
    13111753/////////////////////////////////////////////////////////////////////////////////////////
    13121754
     
    13371779
    13381780    // register RPC request in remote RPC fifo (blocking function)
    1339     rpc_send_sync( cxy , &rpc );
     1781    rpc_send( cxy , &rpc , true );
    13401782
    13411783    // get output argument from rpc descriptor
     
    13861828
    13871829/////////////////////////////////////////////////////////////////////////////////////////
    1388 // [22]          Marshaling functions attached to RPC_KCM_ALLOC
     1830// [22]          Marshaling functions attached to RPC_KCM_ALLOC  (blocking)
    13891831/////////////////////////////////////////////////////////////////////////////////////////
    13901832
     
    14081850    rpc.args[0] = (uint64_t)kmem_type;
    14091851
    1410     // register RPC request in remote RPC fifo
    1411     rpc_send_sync( cxy , &rpc );
     1852    // register RPC request in remote RPC fifo (blocking function)
     1853    rpc_send( cxy , &rpc , true );
    14121854
    14131855    // get output arguments from RPC descriptor
     
    14491891
    14501892/////////////////////////////////////////////////////////////////////////////////////////
    1451 // [23]          Marshaling functions attached to RPC_KCM_FREE
     1893// [23]          Marshaling functions attached to RPC_KCM_FREE  (blocking)
    14521894/////////////////////////////////////////////////////////////////////////////////////////
    14531895
     
    14721914    rpc.args[1] = (uint64_t)kmem_type;
    14731915
    1474     // register RPC request in remote RPC fifo
    1475     rpc_send_sync( cxy , &rpc );
     1916    // register RPC request in remote RPC fifo (blocking function)
     1917    rpc_send( cxy , &rpc , true );
    14761918
    14771919    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     
    15401982
    15411983    // register RPC request in remote RPC fifo (blocking function)
    1542     rpc_send_sync( cxy , &rpc );
     1984    rpc_send( cxy , &rpc , true );
    15431985
    15441986    // get output values from RPC descriptor
     
    16082050
    16092051/////////////////////////////////////////////////////////////////////////////////////////
    1610 // [25]          Marshaling functions attached to RPC_MAPPER_GET_PAGE
     2052// [25]          Marshaling functions attached to RPC_MAPPER_GET_PAGE (blocking)
    16112053/////////////////////////////////////////////////////////////////////////////////////////
    16122054
     
    16332075
    16342076    // register RPC request in remote RPC fifo (blocking function)
    1635     rpc_send_sync( cxy , &rpc );
     2077    rpc_send( cxy , &rpc , true );
    16362078
    16372079    // get output values from RPC descriptor
     
    16702112
    16712113/////////////////////////////////////////////////////////////////////////////////////////
    1672 // [26]          Marshaling functions attached to RPC_VMM_CREATE_VSEG
     2114// [26]          Marshaling functions attached to RPC_VMM_CREATE_VSEG (blocking)
    16732115/////////////////////////////////////////////////////////////////////////////////////////
    16742116
     
    17072149
    17082150    // register RPC request in remote RPC fifo (blocking function)
    1709     rpc_send_sync( cxy , &rpc );
     2151    rpc_send( cxy , &rpc , true );
    17102152
    17112153    // get output values from RPC descriptor
     
    17572199
    17582200/////////////////////////////////////////////////////////////////////////////////////////
    1759 // [27]          Marshaling functions attached to RPC_SCHED_DISPLAY
     2201// [27]          Marshaling functions attached to RPC_SCHED_DISPLAY (blocking)
    17602202/////////////////////////////////////////////////////////////////////////////////////////
    17612203
     
    17792221
    17802222    // register RPC request in remote RPC fifo (blocking function)
    1781     rpc_send_sync( cxy , &rpc );
     2223    rpc_send( cxy , &rpc , true );
    17822224
    17832225    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     
    18092251
    18102252/////////////////////////////////////////////////////////////////////////////////////////
    1811 // [28]          Marshaling functions attached to RPC_VMM_SET_COW
     2253// [28]          Marshaling functions attached to RPC_VMM_SET_COW (blocking)
    18122254/////////////////////////////////////////////////////////////////////////////////////////
    18132255
     
    18312273
    18322274    // register RPC request in remote RPC fifo (blocking function)
    1833     rpc_send_sync( cxy , &rpc );
     2275    rpc_send( cxy , &rpc , true );
    18342276
    18352277    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     
    18622304}
    18632305
    1864 /***************************************************************************************/
    1865 /************ Generic functions supporting RPCs : client side **************************/
    1866 /***************************************************************************************/
    1867 
    1868 ////////////////////////////////////////////
    1869 void rpc_send_sync( cxy_t        server_cxy,
    1870                     rpc_desc_t * rpc )
    1871 {
    1872     error_t    error;
    1873 
    1874     thread_t * this = CURRENT_THREAD;
    1875     core_t   * core = this->core;
    1876 
    1877     // register client thread pointer and core lid in RPC descriptor
    1878     rpc->thread    = this;
    1879     rpc->lid       = core->lid;
    1880 
    1881     // build an extended pointer on the RPC descriptor
    1882         xptr_t   desc_xp = XPTR( local_cxy , rpc );
    1883 
    1884     // get local pointer on rpc_fifo in remote cluster, with the
    1885     // assumption that local pointers are identical in all clusters
    1886     remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
    1887 
    1888         // try to post an item in remote fifo
    1889     // deschedule and retry if remote fifo full
    1890     do
    1891     {
    1892         error = remote_fifo_put_item( XPTR( server_cxy , rpc_fifo ),
    1893                                       (uint64_t )desc_xp );
    1894             if ( error )
    1895         {
    1896             printk("\n[WARNING] %s : cluster %x cannot post RPC to cluster %x\n",
    1897             __FUNCTION__ , local_cxy , server_cxy );
    1898 
    1899             if( thread_can_yield() ) sched_yield("RPC fifo full");
    1900         }
    1901     }
    1902     while( error );
    1903  
    1904     hal_fence();
    1905        
    1906     // send IPI to the remote core corresponding to the client core
    1907         dev_pic_send_ipi( server_cxy , core->lid );
    1908 
    1909     // wait RPC completion:
    1910     // - busy waiting policy during kernel_init, or if threads cannot yield
    1911     // - block and deschedule in all other cases
    1912 
    1913     if( (this->type == THREAD_IDLE) || (thread_can_yield() == false) ) // busy waiting
    1914     {
    1915 
    1916 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s busy waiting after registering RPC\n"
    1917 "        rpc = %d / server = %x / cycle %d\n",
    1918 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) ,
    1919 rpc->index , server_cxy , hal_time_stamp() );
    1920 
    1921         while( rpc->response ) hal_fixed_delay( 100 );
    1922    
    1923 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s exit after RPC completion\n",
    1924 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) );
    1925 
    1926     }
    1927     else                                                              // block & deschedule
    1928     {
    1929 
    1930 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s deschedule after registering RPC\n"
    1931 "        rpc = %d / server = %x / cycle %d\n",
    1932 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) ,
    1933 rpc->index , server_cxy , hal_time_stamp() );
    1934 
    1935         thread_block( this , THREAD_BLOCKED_RPC );
    1936         sched_yield("client blocked on RPC");
    1937 
    1938 grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s resumes after RPC completion\n",
    1939 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) );
    1940 
    1941     }
    1942 
    1943     // check response available
    1944     assert( (rpc->response == 0) , __FUNCTION__, "illegal RPC response\n" );
    1945 
    1946     // acknowledge the IPI sent by the server
    1947     dev_pic_ack_ipi();
    1948    
    1949 }  // end rpc_send_sync()
    1950 
    1951 
    1952 
    1953 /***************************************************************************************/
    1954 /************ Generic functions supporting RPCs : server side **************************/
    1955 /***************************************************************************************/
    1956 
    1957 ////////////////
    1958 void rpc_check()
    1959 {
    1960     error_t         error;
    1961     thread_t      * thread; 
    1962     uint32_t        sr_save;
    1963 
    1964     bool_t          found    = false;
    1965         thread_t      * this     = CURRENT_THREAD;
    1966     core_t        * core     = this->core;
    1967     scheduler_t   * sched    = &core->scheduler;
    1968         remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
    1969 
    1970 grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s / cycle %d\n",
    1971 __FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() );
    1972 
    1973     // interrupted thread not preemptable during RPC chek
    1974         hal_disable_irq( &sr_save );
    1975 
    1976     // check RPC FIFO not empty and no RPC thread handling it 
    1977         if( (rpc_fifo->owner == 0) && (local_fifo_is_empty(rpc_fifo) == false) )
    1978     {
    1979         // search one non blocked RPC thread   
    1980         list_entry_t * iter;
    1981         LIST_FOREACH( &sched->k_root , iter )
    1982         {
    1983             thread = LIST_ELEMENT( iter , thread_t , sched_list );
    1984             if( (thread->type == THREAD_RPC) && (thread->blocked == 0 ) )
    1985             {
    1986                 found = true;
    1987                 break;
    1988             }
    1989         }
    1990 
    1991         // create new RPC thread if not found   
    1992         if( found == false )                   
    1993         {
    1994             error = thread_kernel_create( &thread,
    1995                                           THREAD_RPC,
    1996                                                       &rpc_thread_func,
    1997                                           NULL,
    1998                                                       this->core->lid );
    1999                 if( error )
    2000             {
    2001                 printk("\n[WARNING] in %s : no memory for new RPC thread in cluster %x\n",
    2002                 __FUNCTION__ , local_cxy );
    2003             }
    2004             else
    2005             {
    2006                 // unblock created RPC thread
    2007                 thread->blocked = 0;
    2008 
    2009                 // update core descriptor counter 
    2010                     hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 );
    2011 
    2012 grpc_dmsg("\n[DBG] %s : core [%x,%d] creates a new RPC thread %x / cycle %d\n",
    2013 __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );
    2014 
    2015             }
    2016         }
    2017     }
    2018 
    2019 grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s deschedules / cycle %d\n",
    2020 __FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() );
    2021 
    2022     // interrupted thread deschedule always           
    2023         sched_yield("IPI received");
    2024 
    2025 grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s resume / cycle %d\n",
    2026 __FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() );
    2027 
    2028     // interrupted thread restore IRQs after resume
    2029         hal_restore_irq( sr_save );
    2030 
    2031 } // end rpc_check()
    2032 
    2033 
    2034 //////////////////////
    2035 void rpc_thread_func()
    2036 {
    2037     uint32_t     count;       // handled RPC requests counter
    2038     error_t      empty;       // local RPC fifo state
    2039     xptr_t       desc_xp;     // extended pointer on RPC request
    2040     cxy_t        desc_cxy;    // RPC request cluster (client)
    2041     rpc_desc_t * desc_ptr;    // RPC request local pointer
    2042     uint32_t     index;       // RPC request index
    2043     uint32_t     responses;   // number of responses received by client
    2044     thread_t   * thread_ptr;  // local pointer on client thread
    2045     lid_t        core_lid;    // local index of client core
    2046  
    2047     // makes RPC thread not preemptable
    2048         hal_disable_irq( NULL );
    2049  
    2050         thread_t      * this     = CURRENT_THREAD;
    2051         remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
    2052 
    2053     // two embedded loops:
    2054     // - external loop : "infinite" RPC thread
    2055     // - internal loop : handle up to CONFIG_RPC_PENDING_MAX RPC requests
    2056  
    2057         while(1)  // external loop
    2058         {
    2059         // try to take RPC_FIFO ownership
    2060         if( hal_atomic_test_set( &rpc_fifo->owner , this->trdid ) )
    2061         {
    2062             // initializes RPC requests counter
    2063             count = 0;
    2064 
    2065             // acknowledge local IPI
    2066             dev_pic_ack_ipi();
    2067 
    2068                     // exit internal loop in three cases:
    2069             // - RPC fifo is empty
    2070             // - ownership has been lost (because descheduling)
    2071             // - max number of RPCs is reached
    2072                 while( 1 )  // internal loop
    2073             {
    2074                     empty = local_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp );
    2075 
    2076                     if ( empty == 0 ) // one RPC request found
    2077                 {
    2078                     // get client cluster and pointer on RPC descriptor
    2079                     desc_cxy = (cxy_t)GET_CXY( desc_xp );
    2080                     desc_ptr = (rpc_desc_t *)GET_PTR( desc_xp );
    2081 
    2082                     // get rpc index from RPC descriptor
    2083                         index = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->index ) );
    2084 
    2085 grpc_dmsg("\n[DBG] %s : core[%x,%d] / RPC thread %x / starts rpc %d / cycle %d\n",
    2086 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , index , hal_time_stamp() );
    2087 
    2088                     // call the relevant server function
    2089                     rpc_server[index]( desc_xp );
    2090 
    2091 grpc_dmsg("\n[DBG] %s : core[%x,%d] / RPC thread %x / completes rpc %d / cycle %d\n",
    2092 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , index , hal_time_stamp() );
    2093 
    2094                     // increment handled RPC counter
    2095                         count++;
    2096 
    2097                     // decrement response counter in RPC descriptor
    2098                     responses = hal_remote_atomic_add(XPTR( desc_cxy, &desc_ptr->response ), -1);
    2099 
    2100                     // unblock client thread  and send IPI to client core if last response
    2101                     if( responses == 1 )
    2102                     {
    2103                         // get pointer on client thread and unblock it
    2104                         thread_ptr = (thread_t *)hal_remote_lpt(XPTR(desc_cxy,&desc_ptr->thread));
    2105                         thread_unblock( XPTR(desc_cxy,thread_ptr) , THREAD_BLOCKED_RPC );
    2106 
    2107                         hal_fence();
    2108 
    2109                         // get client core lid and send IPI
    2110                         core_lid = hal_remote_lw(XPTR(desc_cxy, &desc_ptr->lid));
    2111                             dev_pic_send_ipi( desc_cxy , core_lid );
    2112                     }
    2113                         }
    2114        
    2115                 // chek exit condition
    2116                         if( local_fifo_is_empty( rpc_fifo )  ||
    2117                     (rpc_fifo->owner != this->trdid) ||
    2118                     (count >= CONFIG_RPC_PENDING_MAX) ) break;
    2119                 } // end internal loop
    2120 
    2121             // release rpc_fifo ownership if not lost
    2122             if( rpc_fifo->owner == this->trdid ) rpc_fifo->owner = 0;
    2123         }
    2124 
    2125         // sucide if too many RPC threads in cluster
    2126         if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX )
    2127             {
    2128 
    2129 grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) suicide at cycle %d\n",
    2130 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );
    2131 
    2132             // update RPC threads counter
    2133                 hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 );
    2134 
    2135             // suicide
    2136                 thread_exit();
    2137             }
    2138 
    2139 grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) deschedules / cycle %d\n",
    2140 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );
    2141 
    2142         // deschedule without blocking
    2143         sched_yield("RPC fifo empty or too much work");
    2144 
    2145 grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) resumes / cycle %d\n",
    2146 __FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );
    2147 
    2148         } // end external loop
    2149 
    2150 } // end rpc_thread_func()
    2151 
    2152 
     2306
  • trunk/kernel/kern/rpc.h

    r408 r409  
    3232#include <vseg.h>
    3333#include <remote_fifo.h>
     34#include <signal.h>
    3435
    3536/**** Forward declarations ****/
     
    6061{
    6162    RPC_PMEM_GET_PAGES         = 0,
    62     RPC_PROCESS_MAKE_EXEC      = 1,     
    63     RPC_PROCESS_MAKE_FORK      = 2,
    64     RPC_PROCESS_KILL           = 3,
    65     RPC_THREAD_USER_CREATE     = 4,
    66     RPC_THREAD_KERNEL_CREATE   = 5,
    67     RPC_SIGNAL_RISE            = 6,
     63    RPC_PMEM_RELEASE_PAGES     = 1,
     64    RPC_PROCESS_MAKE_EXEC      = 2,     
     65    RPC_PROCESS_MAKE_FORK      = 3,
     66    RPC_PROCESS_MAKE_EXIT      = 4,
     67    RPC_PROCESS_MAKE_KILL      = 5,
     68    RPC_THREAD_USER_CREATE     = 6,
     69    RPC_THREAD_KERNEL_CREATE   = 7,
     70    RPC_THREAD_KILL            = 8,
     71    RPC_PROCESS_SIGACTION      = 9,
    6872
    6973    RPC_VFS_INODE_CREATE       = 10,
     
    8690    RPC_SCHED_DISPLAY          = 27,
    8791    RPC_VMM_SET_COW            = 28,
     92
    8893    RPC_MAX_INDEX              = 30,
    8994}
     
    116121
    117122/***********************************************************************************
    118  * This blocking function executes on the client core.
    119  * It puts one RPC extended pointer in the remote fifo.
    120  * It sends an IPI if fifo is empty, and waits until RPC response available.
    121  * The RPC descriptor must be allocated in the caller's stack
    122  * and initialised by the caller.  Exit with a Panic message if remote fifo
    123  * is still full after (CONFIG_RPC_PUT_MAX_ITERATIONS) retries.
     123 * This function is executed by the client thread in the client cluster.
     124 * It puts one RPC descriptor defined by the <desc> argument in the remote fifo
     125 * defined by the <cxy> argument.  It sends an IPI to the server if fifo is empty.
     126 * The RPC descriptor must be allocated in the caller's stack, and initialised by
     127 * the caller. It exit with a Panic message if remote fifo is still full after
     128 * (CONFIG_RPC_PUT_MAX_ITERATIONS) retries.
     129 * - When the <block> argument is true, this function blocks and deschedule.
     130 *   It returns only when the server acknowledges the RPC by writing in the RPC
     131 *   "response" field, and unblocks the client.
     132 * - When the <block> argument is false, this function returns as soon as the RPC
     133 *   has been registered in the FIFO, and the server thread must directly signal
     134 *   completion to the client thread.
    124135 ***********************************************************************************
    125136 * @ cxy   : server cluster identifier
    126137 * @ desc  : local pointer on RPC descriptor in client cluster
    127  **********************************************************************************/
    128 void rpc_send_sync( cxy_t        cxy,   
    129                     rpc_desc_t * desc );
     138 * @ block : boolean true when blocking behaviour is required.
     139 **********************************************************************************/
     140void rpc_send( cxy_t        cxy,   
     141               rpc_desc_t * desc,
     142               bool_t       block );
    130143
    131144
     
    186199
    187200/***********************************************************************************
    188  * [1] The RPC_PROCESS_MAKE_EXEC creates a new process descriptor, from an existing
     201 * [1] The RPC_PMEM_RELEASE_PAGES release one or several pages to a remote cluster.
     202 ***********************************************************************************
     203 * @ cxy     : server cluster identifier
     204 * @ page    : [in] local pointer on page descriptor to release.
     205 **********************************************************************************/
     206void rpc_pmem_release_pages_client( cxy_t            cxy,
     207                                    struct page_s  * page );
     208
     209void rpc_pmem_release_pages_server( xptr_t xp );
     210
     211/***********************************************************************************
     212 * [2] The RPC_PROCESS_MAKE_EXEC creates a new process descriptor, from an existing
    189213 * process descriptor in a remote server cluster. This server cluster must be
    190214 * the owner cluster for the existing process. The new process descriptor is
     
    204228
    205229/***********************************************************************************
    206  * [2] The RPC_PROCESS_MAKE_FORK creates a "child" process descriptor, and the
     230 * [3] The RPC_PROCESS_MAKE_FORK creates a "child" process descriptor, and the
    207231 * associated "child" thread descriptor in a target remote cluster that can be
    208232 * any cluster.  The child process is initialized from informations found in the
     
    227251
    228252/***********************************************************************************
    229  * [3] The RPC_PROCESS_KILL is actually a multicast RPC sent by the reference cluster
    230  * to other clusters containing a process descriptor copy, to destroy these copies.
    231  ***********************************************************************************
    232  * @ process  : local pointer on target process.
    233  **********************************************************************************/
    234 void rpc_process_kill_client( struct process_s * process );
    235 
    236 void rpc_process_kill_server( xptr_t xp );
    237 
    238 /***********************************************************************************
    239  * [4] The RPC_THREAD_USER_CREATE creates an user thread in the server cluster,
     253 * [4] The RPC_PROCESS_MAKE_EXIT can be called by any thread to request the owner
     254 * cluster to execute the process_make_exit() function for a calling process.
     255 ***********************************************************************************
     256 * @ cxy      : server cluster identifier.
     257 * @ process  : local pointer on calling process in owner cluster.
     258 * @ status   : calling process exit status.
     259 **********************************************************************************/
     260void rpc_process_make_exit_client( cxy_t              cxy,
     261                                   struct process_s * process,
     262                                   uint32_t           status );
     263
     264void rpc_process_make_exit_server( xptr_t xp );
     265
     266/***********************************************************************************
     267 * [5] The RPC_PROCESS_MAKE_KILL can be called by any thread to request the owner
     268 * cluster to execute the process_make_kill() function for a target process.
     269 ***********************************************************************************
     270 * @ cxy      : server cluster identifier.
     271 * @ process  : local pointer on target process in owner cluster.
     272 * @ seg_id   : signal type (only SIGKILL / SIGSTOP / SIGCONT are supported).
     273 **********************************************************************************/
     274void rpc_process_make_kill_client( cxy_t              cxy,
     275                                   struct process_s * process,
     276                                   uint32_t           seg_id );
     277
     278void rpc_process_make_kill_server( xptr_t xp );
     279
     280/***********************************************************************************
     281 * [6] The RPC_THREAD_USER_CREATE creates an user thread in the server cluster,
    240282 * as specified by the arguments. It returns an extended pointer on the new
    241283 * thread descriptor in server cluster, and an error code.
     
    258300
    259301/***********************************************************************************
    260  * [5] The RPC_THREAD_KERNEL_CREATE creates a kernel thread in the server cluster,
     302 * [7] The RPC_THREAD_KERNEL_CREATE creates a kernel thread in the server cluster,
    261303 * as specified by the type, func and args arguments. It returns the local pointer
    262304 * on the thread descriptor in server cluster and an error code.
     
    280322
    281323/***********************************************************************************
    282  * [6] The RPC_SIGNAL_RISE ask a target cluster to register a given signal in
    283  * all threads descriptors of a given process.
    284  * It is used by the sys_kill() function.
     324 * [8] The RPC_THREAD_KILL ask a target cluster to kill a given thread descriptor.
     325 * It is called by the sys_thread_cancel() function for a remote thread.
    285326 ***********************************************************************************
    286327 * @ cxy       : server cluster identifier.
    287  * @ process   : [in]  local pointer on target process descriptor in server.
    288  * @ sig_id    : [in]  signal index.
    289  **********************************************************************************/
    290 void rpc_signal_rise_client( cxy_t              cxy,
    291                              struct process_s * process,
    292                              uint32_t           sig_id );
     328 * @ thread   : [in]  local pointer on target process descriptor in server.
     329 **********************************************************************************/
     330void rpc_thread_kill_client( cxy_t              cxy,
     331                             struct thread_s  * thread );
    293332                             
    294 void rpc_signal_rise_server( xptr_t xp );
     333void rpc_thread_kill_server( xptr_t xp );
     334
     335/***********************************************************************************
     336 * [9] The RPC_PROCESS_SIGACTION allows the owner cluster to request any other
     337 * cluster to execute a given sigaction (BLOCK / UNBLOCK / DELETE) for all threads
     338 * of a given process.
     339 *
     340 * WARNING : It is implemented as a NON BLOCKING multicast RPC, that can be sent
     341 * in parallel to all process copies. The various server threads must decrement the
     342 * responses counter defined by the <rsp_xp> argument, and the last server thread
     343 * must unblock the <client_xp> thread.
     344 ***********************************************************************************
     345 * @ cxy       : server cluster identifier.
     346 * @ process   : [in]  local pointer on target process in server cluster.
     347 * @ sigaction : [in]  action type (BLOCK / UNBLOCK / DELETE).
     348 * @ rsp_xp    : [in]  extended pointer on response counter.
     349 * @ client_xp : [in]  extended pointer on client thread.
     350 **********************************************************************************/
     351void rpc_process_sigaction_client( cxy_t               cxy,
     352                                   struct process_s  * process,
     353                                   uint32_t            sigaction,
     354                                   xptr_t              rsp_xp,
     355                                   xptr_t              client_xp );
     356                             
     357void rpc_process_sigaction_server( xptr_t xp );
    295358
    296359/***********************************************************************************
  • trunk/kernel/kern/scheduler.c

    r408 r409  
    5858    list_root_init( &sched->k_root );
    5959
     60    sched->sig_pending    = false;            // no pending signal
     61
    6062}  // end sched_init()
    6163
     
    7274    if( type == THREAD_USER )
    7375    {
    74         // register thread in scheduler user list
    7576        list_add_last( &sched->u_root , &thread->sched_list );
    7677        sched->u_threads_nr++;
    77 
    78         // initialize u_last field if first user thread
    7978        if( sched->u_last == NULL ) sched->u_last = &thread->sched_list;
    8079    }
    8180    else // kernel thread
    8281    {
    83         // register thread in scheduler kernel list
    8482        list_add_last( &sched->k_root , &thread->sched_list );
    8583        sched->k_threads_nr++;
    86 
    87         // initialize k_last field if first kernel thread
    8884        if( sched->k_last == NULL ) sched->k_last = &thread->sched_list;
    8985    }
     
    9288    spinlock_unlock( &sched->lock );
    9389
    94 }  // end sched_register()
     90}  // end sched_register_thread()
    9591
    9692/////////////////////////////////////////////
    9793void sched_remove_thread( thread_t * thread )
    9894{
    99     core_t       * core  = thread->core;
    100     scheduler_t  * sched = &core->scheduler;
    101     thread_type_t  type  = thread->type;
     95    scheduler_t * sched = &thread->core->scheduler;
     96    thread_type_t type  = thread->type;
    10297
    10398    // take lock protecting sheduler lists
     
    106101    if( type == THREAD_USER )
    107102    {
    108         // remove thread from user list
    109103        list_unlink( &thread->sched_list );
    110104        sched->u_threads_nr--;
    111 
    112         // reset the u_last field if list empty
    113105        if( sched->u_threads_nr == 0 ) sched->u_last = NULL;
    114106    }
    115     else // kernel thread
    116     {
    117         // remove thread from kernel list
     107    else // kernel thread
     108    {
    118109        list_unlink( &thread->sched_list );
    119110        sched->k_threads_nr--;
    120 
    121         // reset the k_last field if list empty
    122111        if( sched->k_threads_nr == 0 ) sched->k_last = NULL;
    123112    }
    124113
    125     // release lock
     114    // release lock 
    126115    spinlock_unlock( &sched->lock );
    127116
    128 }  // end sched_remove()
     117}  // end sched_remove_thread()
    129118
    130119//////////////////////////////////////////////
     
    214203}  // end sched_select()
    215204
    216 ///////////////////////////////////////////
    217 void sched_kill_thread( thread_t * thread )
    218 {
    219     // check locks
    220     if( thread_can_yield() == false )
    221     {
    222         panic("locks not released for thread %x in process %x on core[%x][%d]",
    223         thread->trdid , thread->process->pid, local_cxy , thread->core->lid );
    224     }
    225 
    226     // remove thread from scheduler
    227     sched_remove_thread( thread );
    228 
    229     // reset the THREAD_SIG_KILL signal
    230     thread_reset_signal( thread , THREAD_SIG_KILL );
    231 
    232     // detached thread can suicide
    233     if( thread->signals & THREAD_SIG_SUICIDE )
    234     {
    235         assert( (thread->flags & THREAD_FLAG_DETACHED), __FUNCTION__,
    236         "thread must be detached in case of suicide\n" );
    237 
    238         // remove thread from process
    239         process_remove_thread( thread );
    240 
    241         // release memory for thread descriptor
    242         thread_destroy( thread );
    243     }
    244 }  // end sched_kill_thread()
    245 
    246205//////////////////////////////////////////
    247206void sched_handle_signals( core_t * core )
     
    249208    list_entry_t * iter;
    250209    thread_t     * thread;
     210
    251211    scheduler_t  * sched = &core->scheduler;
    252 
    253 // signal_dmsg("\n@@@ %s enter at cycle %d\n",
    254 // __FUNCTION__ , hal_time_stamp() );
    255212
    256213    // take lock protecting threads lists
     
    261218    {
    262219        thread = LIST_ELEMENT( iter , thread_t , sched_list );
    263         if( thread->signals ) // sched_kill_thread( thread );
    264         {
    265             printk("\n[WARNING] %s : thread %x has signal %x at cycle %d\n",
    266             __FUNCTION__, thread, thread->signals, hal_time_stamp() );
    267         }
    268     }
    269 
    270     // handle kernel threads
    271     LIST_FOREACH( &sched->k_root , iter )
    272     {
    273         thread = LIST_ELEMENT( iter , thread_t , sched_list );
    274         if( thread->signals )  // sched_kill_thread( thread );
    275         {
    276             printk("\n[WARNING] %s : thread %x has signal %x at cycle %d\n",
    277             __FUNCTION__, thread, thread->signals, hal_time_stamp() );
    278 
     220
     221        if( thread->flags & THREAD_FLAG_SIGNAL )  // thread has signal
     222        {
     223            // decrement response counter to acknowledge signal
     224            hal_atomic_add( thread->sig_rsp_count , -1 );
     225
     226            // reset signal
     227            thread_reset_signal( thread );
    279228        }
    280229    }
     
    283232    spinlock_unlock( &sched->lock );
    284233
    285 // signal_dmsg("\n@@@ %s exit at cycle %d\n",
    286 // __FUNCTION__ , hal_time_stamp() );
    287 
    288234} // end sched_handle_signals()
    289235
     
    293239    thread_t    * next;
    294240    thread_t    * current = CURRENT_THREAD;
    295     scheduler_t * sched   = &current->core->scheduler;
     241    core_t      * core    = current->core;
     242    scheduler_t * sched   = &core->scheduler;
    296243 
    297244#if( CONFIG_SCHED_DEBUG & 0x1 )
    298 if( hal_time_stamp() > CONFIG_SCHED_DEBUG ) sched_display( current->core->lid );
     245if( hal_time_stamp() > CONFIG_SCHED_DEBUG ) sched_display( core->lid );
    299246#endif
    300247
     
    319266    assert( (next->blocked == 0) || (next->type = THREAD_IDLE) , __FUNCTION__ ,
    320267    "next thread %x (%s) is blocked on core[%x,%d]\n",
    321     next->trdid , thread_type_str(next->type) , local_cxy , current->core->lid );
     268    next->trdid , thread_type_str(next->type) , local_cxy , core->lid );
    322269
    323270    // switch contexts and update scheduler state if next != current
     
    327274sched_dmsg("\n[DBG] %s : core[%x,%d] / cause = %s\n"
    328275"      thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n",
    329 __FUNCTION__, local_cxy, current->core->lid, cause,
     276__FUNCTION__, local_cxy, core->lid, cause,
    330277current, thread_type_str(current->type), current->process->pid, current->trdid,
    331278next   , thread_type_str(next->type)   , next->process->pid   , next->trdid,
     
    352299sched_dmsg("\n[DBG] %s : core[%x,%d] / cause = %s\n"
    353300"      thread %x (%s) (%x,%x) continue / cycle %d\n",
    354 __FUNCTION__, local_cxy, current->core->lid, cause,
     301__FUNCTION__, local_cxy, core->lid, cause,
    355302current, thread_type_str(current->type), current->process->pid, current->trdid,
    356303(uint32_t)hal_get_cycles() );
    357304
    358305    }
     306
     307    // handle signals for all threads executing on this core.
     308    sched_handle_signals( core );
    359309
    360310    // exit critical section / restore SR from next thread context
  • trunk/kernel/kern/scheduler.h

    r408 r409  
    4949    struct thread_s * idle;         /*! pointer on idle thread                              */
    5050    struct thread_s * current;      /*! pointer on current running thread                   */
     51    bool_t            sig_pending;  /*! signal_handller must be called when true            */
    5152}
    5253scheduler_t;
     
    6667                            struct thread_s * thread );
    6768
    68 /********************************************************************************************* 
    69  *  This function removes a thread from the set of threads attached to a given core.
     69/*********************************************************************************************
     70 * This function remove a thread from its scheduler.re scheduler.
    7071 *********************************************************************************************
    7172 * @ thread  : local pointer on the thread descriptor.
     
    8788/*********************************************************************************************
    8889 * This function scan all threads attached to a given core scheduler, and executes
    89  * the relevant actions for pending signals, such as the THREAD_SIG_KILL signal.
     90 * the relevant actions for pending KILL or EXIT signals.
     91 * It is called in by the sched_yield() function, with IRQ disabled.
    9092 *********************************************************************************************
    9193 * @ core    : local pointer on the core descriptor.
    9294 ********************************************************************************************/
    9395void sched_handle_signals( struct core_s * core );
    94 
    95 /*********************************************************************************************
    96  * This function is used by the scheduler of a given core to actually kill a thread that has
    97  * the SIG_KILL / SIG_SUICIDE signal set (following a thread_exit() or a thread_kill() event).
    98  * - It checks that the thread has released all locks => panic otherwise...
    99  * - It removes the thread from the scheduler.
    100  * - It reset the SIG_KILL signal to acknoledge the killer.
    101  * - In case of SIG_SUCIDE, it removes the detached thread from its process, and destroys it.
    102  *********************************************************************************************
    103  * @ thread  : local pointer on the thread descriptor.
    104  ********************************************************************************************/
    105 void sched_kill_thread( struct thread_s * thread );
    10696
    10797/*********************************************************************************************
     
    123113
    124114/*********************************************************************************************
     115 * This function unlink a thread identified by the <thread> pointer from its process.
     116 * It is called by the sched_handle_signals() function when one EXIT or KILL signal is set,
     117 * and it implement the first step of a thread destructionebut can also be directly called by a local killer thread signal.
     118 * - It detach the thread from the scheduler.
     119 * - It detach the thread from the process.
     120 * - It detach the thread from the parent thread when the thread is attached.
     121 * - It destroys the thread descriptor.
     122 * - It acknowledge the killer thread if it's a kill signal
     123 *********************************************************************************************
     124 * @ thread   : pointer on thread to be killed.
     125 ********************************************************************************************/
     126void sched_kill_thread( struct thread_s * thread );
     127
     128/*********************************************************************************************
    125129 * This function display the internal state of the local core identified by its <lid>.
    126130 *********************************************************************************************
  • trunk/kernel/kern/signal.c

    r407 r409  
    22 * signal.c - signal-management related operations implementation
    33 *
    4  * Author  Alain Greiner    (2016,2017)
     4 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
     5 *         Mohamed Lamine Karaoui (2015)
     6 *         Alain Greiner    (2016,2017)
    57 *
    68 * Copyright (c) UPMC Sorbonne Universites
     
    2325
    2426#include <hal_types.h>
    25 #include <hal_atomic.h>
    2627#include <printk.h>
    27 #include <thread.h>
    28 #include <spinlock.h>
    2928#include <signal.h>
    30 
    31 //////////////////////////////////////
    32 void signal_rise( process_t * process,
    33                   uint32_t    sig_id )
    34 {
    35     // get the lock protecting the set of local threads
    36         spinlock_lock( &process->th_lock );
    37 
    38     // loop on local threads
    39         thread_t * thread;
    40         uint32_t   i;
    41         for( i = 0 ; i < process->th_nr ; i++ )
    42         {
    43                 thread = process->th_tbl[i];
    44                 hal_atomic_or( &thread->signals , (1 << sig_id) );
    45 
    46         signal_dmsg("\n[DBG] %s : thread %x in process %x received signal %d\n",
    47                     __FUNCTION__, thread->trdid , process->pid , sig_id );
    48         }
    49 
    50     // release the lock
    51         spinlock_unlock( &process->th_lock );
    52 
    53 }  // end signal_rise()
    5429
    5530/*
  • trunk/kernel/kern/signal.h

    r23 r409  
    7070
    7171#define SIG_DEFAULT_MASK         0xFFEEFFFF
    72 #define SIG_DEFAULT_STACK_SIZE   2048
     72
    7373
    7474/****  Forward declarations  ****/
  • trunk/kernel/kern/thread.c

    r408 r409  
    112112/////////////////////////////////////////////////////////////////////////////////////
    113113// This static function initializes a thread descriptor (kernel or user).
    114 // It can be called by the four functions:
     114// It can be called by the three functions:
    115115// - thread_user_create()
    116116// - thread_user_fork()
     
    164164
    165165    thread->local_locks     = 0;
    166     list_root_init( &thread->locks_root );
    167 
    168166    thread->remote_locks    = 0;
     167
     168#if CONFIG_LOCKS_DEBUG
     169    list_root_init( &thread->locks_root ); 
    169170    xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) );
     171#endif
    170172
    171173    thread->u_stack_base    = u_stack_base;
     
    177179    thread->entry_args      = args;         // thread function arguments
    178180    thread->flags           = 0;            // all flags reset
    179     thread->signals         = 0;            // no pending signal
    180181    thread->errno           = 0;            // no error detected
    181182    thread->fork_user       = 0;            // no user defined placement for fork
    182183    thread->fork_cxy        = 0;            // user defined target cluster for fork
    183 
    184     // thread blocked
    185     thread->blocked = THREAD_BLOCKED_GLOBAL;
     184    thread->blocked         = THREAD_BLOCKED_GLOBAL;
    186185
    187186    // reset children list
     
    195194    // reset thread info
    196195    memset( &thread->info , 0 , sizeof(thread_info_t) );
     196
     197    // initializes join_lock
     198    remote_spinlock_init( XPTR( local_cxy , &thread->join_lock ) );
    197199
    198200    // initialise signature
     
    296298        return EINVAL;
    297299    }
    298 
    299     // set LOADABLE flag
    300     thread->flags = THREAD_FLAG_LOADABLE;
    301300
    302301    // set DETACHED flag if required
     
    593592        uint32_t     tm_start;
    594593        uint32_t     tm_end;
    595     reg_t        state;
     594    reg_t        save_sr;
    596595
    597596    process_t  * process    = thread->process;
     
    614613    // release memory allocated for CPU context and FPU context
    615614        hal_cpu_context_destroy( thread );
    616         hal_fpu_context_destroy( thread );
     615        if ( thread->type == THREAD_USER ) hal_fpu_context_destroy( thread );
    617616       
    618617    // release FPU if required
    619618    // TODO This should be done before calling thread_destroy()
    620         hal_disable_irq( &state );
     619        hal_disable_irq( &save_sr );
    621620        if( core->fpu_owner == thread )
    622621        {
     
    624623                hal_fpu_disable();
    625624        }
    626         hal_restore_irq( state );
     625        hal_restore_irq( save_sr );
    627626
    628627    // remove thread from process th_tbl[]
     
    668667    xlist_add_first( root , entry );
    669668    hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->children_nr ) , 1 );
    670 }
     669
     670}  // end thread_child_parent_link()
    671671
    672672///////////////////////////////////////////////////
     
    693693    // release the lock
    694694    remote_spinlock_unlock( lock );
    695 }
     695
     696}  // thread_child_parent_unlink()
    696697
    697698/////////////////////////////////////////////////
    698699inline void thread_set_signal( thread_t * thread,
    699                                uint32_t   mask )
    700 {
    701     hal_atomic_or( &thread->signals , mask );
     700                               uint32_t * sig_rsp_count )
     701{
     702    reg_t    save_sr;   // for critical section
     703
     704    // get pointer on thread thread scheduler
     705    scheduler_t * thread_sched = &thread->core->scheduler;
     706
     707    // wait scheduler ready to handle a new signal
     708    while( thread_sched->sig_pending ) asm volatile( "nop" );
     709   
     710    // enter critical section
     711    hal_disable_irq( &save_sr );
     712     
     713    // set signal in thread scheduler
     714    thread_sched->sig_pending = true;
     715
     716    // set signal in thread thread "flags"
     717    hal_atomic_or( &thread->flags , THREAD_FLAG_SIGNAL );
     718
     719    // set pointer on responses counter in thread thread
     720    thread->sig_rsp_count = sig_rsp_count;
     721   
     722    // exit critical section
     723    hal_restore_irq( save_sr );
     724
    702725    hal_fence();
    703 }
    704 
    705 ///////////////////////////////////////////////////
    706 inline void thread_reset_signal( thread_t * thread,
    707                                  uint32_t   mask )
    708 {
    709     hal_atomic_and( &thread->signals , ~mask );
     726
     727}  // thread_set_signal()
     728
     729////////////////////////////////////////////////////
     730inline void thread_reset_signal( thread_t * thread )
     731{
     732    reg_t    save_sr;   // for critical section
     733
     734    // get pointer on target thread scheduler
     735    scheduler_t * sched = &thread->core->scheduler;
     736
     737    // check signal pending in scheduler
     738    assert( sched->sig_pending , __FUNCTION__ , "no pending signal" );
     739   
     740    // enter critical section
     741    hal_disable_irq( &save_sr );
     742     
     743    // reset signal in scheduler
     744    sched->sig_pending = false;
     745
     746    // reset signal in thread "flags"
     747    hal_atomic_and( &thread->flags , ~THREAD_FLAG_SIGNAL );
     748
     749    // reset pointer on responses counter
     750    thread->sig_rsp_count = NULL;
     751   
     752    // exit critical section
     753    hal_restore_irq( save_sr );
     754
    710755    hal_fence();
    711 }
     756
     757}  // thread_reset_signal()
    712758
    713759////////////////////////////////
     
    760806}  // end thread_unblock()
    761807
    762 /////////////////////
    763 error_t thread_exit()
    764 {
    765     reg_t      sr_save;
    766 
    767         thread_t * this = CURRENT_THREAD;
    768 
    769     // test if this thread can be descheduled
    770         if( !thread_can_yield() )
    771         {
    772         printk("ERROR in %s : locks not released for thread %x in process %x on core[%x,%d]\n",
    773         __FUNCTION__, this->trdid, this->process->pid, local_cxy, this->core->lid );
    774         return EINVAL;
    775     }
    776 
    777     if( this->flags & THREAD_FLAG_DETACHED )
    778     {
    779         // if detached set signal and set blocking cause atomically
    780         hal_disable_irq( &sr_save );
    781         thread_set_signal( this , THREAD_SIG_KILL );
    782         thread_block( this , THREAD_BLOCKED_EXIT );
    783         hal_restore_irq( sr_save );
    784     }
    785     else
    786     {
    787         // if attached, set blocking cause
    788         thread_block( this , THREAD_BLOCKED_EXIT );
    789     }
    790 
    791     // deschedule
    792     sched_yield( "exit" );
    793     return 0;
    794 
    795 }  // end thread_exit()
    796 
    797808/////////////////////////////////////
    798809void thread_kill( thread_t * target )
    799810{
    800     // set SIG_KILL signal in target thread descriptor
    801     thread_set_signal( target , THREAD_SIG_KILL );
     811    volatile uint32_t  sig_rsp_count = 1;     // responses counter
     812
     813    thread_t * killer = CURRENT_THREAD;
     814
     815kill_dmsg("\n[DBG] %s : killer thread %x enter for target thread %x\n",
     816__FUNCTION__, local_cxy, killer->trdid , target trdid );
    802817
    803818    // set the global blocked bit in target thread descriptor.
    804819    thread_block( target , THREAD_BLOCKED_GLOBAL );
    805820
    806     // send an IPI to schedule the target thread core.
    807     dev_pic_send_ipi( local_cxy , target->core->lid );
     821    // request target scheduler to deschedule the target thread
     822    // when killer thread is not running on same core as target thread
     823    if( killer->core->lid != target->core->lid )
     824    {
     825        // set signal in target thread descriptor and in target scheduler
     826        thread_set_signal( target , (uint32_t *)(&sig_rsp_count) );
     827
     828        // send an IPI to the target thread core.
     829        dev_pic_send_ipi( local_cxy , target->core->lid );
     830
     831        // poll the response
     832        while( 1 )
     833        {
     834            // exit when response received from scheduler
     835            if( sig_rsp_count == 0 )  break;
     836
     837            // deschedule without blocking
     838            hal_fixed_delay( 1000 );
     839        }
     840    }
     841
     842        // release FPU if required
     843        if( target->core->fpu_owner == target )  target->core->fpu_owner = NULL;
     844
     845    // detach thread from parent if attached
     846    if( (target->flags & THREAD_FLAG_DETACHED) == 0 )
     847    thread_child_parent_unlink( target->parent , XPTR( local_cxy , target ) );
     848
     849    // detach thread from process
     850    process_remove_thread( target );
     851
     852    // remove thread from scheduler
     853    sched_remove_thread( target );
     854
     855    // release memory allocated to target thread
     856    thread_destroy( target );
     857
     858kill_dmsg("\n[DBG] %s : killer thread %x enter for target thread %x\n",
     859__FUNCTION__, local_cxy, killer->trdid , target trdid );
    808860
    809861}  // end thread_kill()
  • trunk/kernel/kern/thread.h

    r408 r409  
    4444
    4545/***************************************************************************************
    46  * These macros are used to compose or decompose global thread identifier (TRDID)
     46 * These macros are used to compose or decompose the global thread identifier (TRDID)
    4747 * to or from cluster identifier / local thread index (CXY , LTID)
    4848 **************************************************************************************/
     
    6969 **************************************************************************************/
    7070
    71 #define THREAD_FLAG_LOADABLE     0x0001  /*! This thread has not been executed yet    */
    72 #define THREAD_FLAG_DETACHED     0x0002  /*! This thread is detached from parent      */
    73 #define THREAD_FLAG_JOIN         0x0004  /*! Parent thread made a join                */
    74 #define THREAD_FLAG_EXIT         0x0008  /*! This thread made an exit                 */
    75 #define THREAD_FLAG_SCHED        0x0010  /*! Scheduling required for this thread      */
    76 
    77 /***************************************************************************************
    78  * This defines the masks associated to the thread signals.
    79  **************************************************************************************/
    80 
    81 #define THREAD_SIG_KILL          0x0001  /*! This thread killed by another thread     */
    82 #define THREAD_SIG_SUICIDE       0x0002  /*! This thread required exit                */
     71#define THREAD_FLAG_DETACHED     0x0001  /*! This thread is detached from parent      */
     72#define THREAD_FLAG_JOIN_DONE    0x0002  /*! Parent thread made a join                */
     73#define THREAD_FLAG_SCHED        0x0004  /*! Scheduling required for this thread      */
     74#define THREAD_FLAG_SIGNAL       0x0004  /*! Acknowledge of descheduling required     */
    8375
    8476/***************************************************************************************
     
    8981#define THREAD_BLOCKED_IO        0x0002  /*! thread wait IO operation completion      */
    9082#define THREAD_BLOCKED_MAPPER    0x0004  /*! thread wait mapper                       */
    91 #define THREAD_BLOCKED_JOIN      0x0008  /*! thread blocked in join / wait exit       */
    92 #define THREAD_BLOCKED_EXIT      0x0010  /*! thread blocked in exit / wait join       */
    93 #define THREAD_BLOCKED_KILL      0x0020  /*! thread received kill signal              */
    94 #define THREAD_BLOCKED_SEM       0x0040  /*! thread wait semaphore                    */
    95 #define THREAD_BLOCKED_PAGE      0x0080  /*! thread wait page access                  */
    96 #define THREAD_BLOCKED_USERSYNC  0x0100  /*! thread wait POSIX (cond/mutex/barrier)   */
     83#define THREAD_BLOCKED_EXIT      0x0008  /*! thread blocked in join / wait exit       */
     84#define THREAD_BLOCKED_JOIN      0x0010  /*! thread blocked in exit / wait join       */
     85#define THREAD_BLOCKED_SEM       0x0020  /*! thread wait semaphore                    */
     86#define THREAD_BLOCKED_PAGE      0x0040  /*! thread wait page access                  */
     87#define THREAD_BLOCKED_USERSYNC  0x0100  /*! thread wait (cond/mutex/barrier)         */
    9788#define THREAD_BLOCKED_RPC       0x0200  /*! thread wait RPC completion               */
    98 
    99 #define THREAD_BLOCKED_DEV_ISR   0x4000  /*! thread DEV wait ISR                      */
     89#define THREAD_BLOCKED_DEV_ISR   0x0400  /*! thread DEV wait ISR                      */
    10090
    10191/***************************************************************************************
     
    156146    xptr_t              parent;          /*! extended pointer on parent thread        */
    157147
    158     void              * exit_value;      /*! exit_value used in case of join          */
    159 
    160148        uint32_t            local_locks;         /*! number of local locks owned by thread    */
    161     list_entry_t        locks_root;      /*! root of local locks list                 */
    162 
    163     remote_spinlock_t * flags_lock;      /*! lock protecting the flags                */
    164 
    165         uint32_t            remote_locks;        /*! number of local locks owned by thread    */
    166     xlist_entry_t       xlocks_root;     /*! root of remote locks list                */
     149        uint32_t            remote_locks;        /*! number of remote locks owned by thread   */
     150
     151    remote_spinlock_t * join_lock;       /*! lock protecting the join/exit            */
     152    void              * join_value;      /*! exit_value used in case of join          */
     153    xptr_t              join_xp;         /*! extended pointer on joining thread       */
     154
     155    uint32_t          * sig_rsp_count;   /*! pointer on signal response counter       */
    167156
    168157        intptr_t            u_stack_base;    /*! user stack base address                  */
     
    173162
    174163    uint32_t            flags;           /*! bit vector of flags                      */
    175     uint32_t            signals;         /*! bit vector of (KILL / SUICIDE) signals   */
    176164    uint32_t            blocked;         /*! bit vector of blocking causes            */
    177165
     
    203191
    204192    xlist_entry_t       wait_list;       /*! member of threads blocked on same cond   */
     193
     194#if CONFIG_LOCKS_DEBUG
     195    list_entry_t        locks_root;      /*! root of list of locks taken              */
     196    xlist_entry_t       xlocks_root;     /*! root of xlist of remote locks taken      */
     197#endif
    205198
    206199        thread_info_t       info;            /*! embedded thread_info_t                   */
     
    311304
    312305/***************************************************************************************
    313  * This function releases the physical memory allocated for a thread descriptor
    314  * in the local cluster. It can be used for both an user and a kernel thread.
    315  * The physical memory dynamically allocated in the HEAP or MMAP zones by an user
    316  * thread will be released when the process is killed, and the page table flushed.
     306 * This function releases the physical memory allocated for a thread in a given cluster.
     307 * This include the thread descriptor itself, the associated CPU and FPU context, and
     308 * the physical memory allocated for an user thread local stack.
    317309 ***************************************************************************************
    318310 * @ thread  : pointer on the thread descriptor to release.
     
    353345
    354346/***************************************************************************************
    355  * This function atomically sets a signal in a thread descriptor.
     347 * This function is used by a killer thread running in the same cluster as a target
     348 * thread to request the scheduler of the target to call the thread_handle_signal()
     349 * at the next context switch, to confirm that the target thread is blocked and
     350 * not currently running. This function executes atomically the following actions :
     351 * - it set the sig_pending flag in the target scheduler descriptor.
     352 * - it set the SIG flag in the "flags" field of the target thread descriptor.
     353 * - It registers the responses counter pointer in the target thread descriptor.
     354 * The sig_pending flag is handled as a set/reset flip-flop by the killer thread
     355 * and by the target scheduler.
     356 ***************************************************************************************
     357 * @ target        : local pointer on target thread.
     358 * @ sig_rsp_count : local pointer on responses counter.
     359 **************************************************************************************/
     360void thread_set_signal( thread_t * thread,
     361                        uint32_t * sig_rsp_count );
     362
     363/***************************************************************************************
     364 * This function is used by the sched_handle_signal() function executed by a scheduler
     365 * to reset a pending signal in both a target <thread> descriptor, and in the target
     366 * thread scheduler.
    356367 ***************************************************************************************
    357368 * @ thread    : local pointer on target thread.
    358  *s released all locks @ mask      : mask on selected signal.
    359  **************************************************************************************/
    360 inline void thread_set_signal( thread_t * thread,
    361                                uint32_t   mask );
    362 
    363 /***************************************************************************************
    364  * This function resets a signal in a thread descriptor.
    365  ***************************************************************************************
    366  * @ thread    : local pointer on target thread.
    367  * @ mask      : mask on selected signal.
    368  **************************************************************************************/
    369 inline void thread_reset_signal( thread_t * thread,
    370                                  uint32_t   mask );
     369 **************************************************************************************/
     370void thread_reset_signal( thread_t * thread );
    371371
    372372/***************************************************************************************
     
    385385
    386386/***************************************************************************************
    387  * This function is used by the calling thread to suicide.
    388  * All locks must be previously released. The scenario depends on the DETACHED flag.
    389  * if detached :
    390  * 1) the calling thread sets the SIG_SUICIDE bit in the "signals" bit_vector,
    391  *    registers the BLOCKED_GLOBAL bit in the "blocked" bit_vector, and deschedule.
    392  * 2) the scheduler, detecting the SIG_SUICIDE bit, remove the thread from the
    393  *    scheduler list, remove the thread from its process, and destroys the thread.
    394  * if attached :
    395  * 1) the calling thread simply sets the BLOCKED_EXIT bit in the "blocked" bit vector
    396  *    and deschedule.
    397  * 2) The SIG_KILL bit and BLOCKED_SIGNAL bits are set by the parent thread when
    398  *    executing the pthread_join(), and detecting the BLOCKED_EXIT bit.
    399  *    The scenario is a standard kill as described below.
    400  ***************************************************************************************
    401  * @ returns 0 if success / returns EINVAL if locks_count is not zero.
    402  **************************************************************************************/
    403 error_t thread_exit();
    404 
    405 /***************************************************************************************
    406  * This function request to kill a local target thread, with the following scenario:
    407  * 1. This function set the BLOCKED_GLOBAL bit in target thread "blocked" bit_vector,
    408  *    set the SIG_KILL bit in target thread "signals" bit_vector, and send an IPI
    409  *    to the target thread core to force scheduling.
    410  * 2. The scheduler, detecting the SIG_KILL set, removes the thread from the scheduler
    411  *    list, and reset the SIG_KILL bit to acknowledge the killer.
    412  * 3. The caller of this function, (such as the process_kill() function), must poll
    413  *    SIG_KILL bit until reset, detach the thread from its parent if the thread is
    414  *    attached, remove the thread from its process, and destroys the thread.
    415  *
    416  * NOTE: The third step must be done by the caller to allows the process_kill()
    417  *       function to parallelize the work on all schedulers in a given cluster.
     387 * This function is called to handle the "pthread_cancel" system call.
     388 * It allows a killer thread to kill one single target thread.
     389 * The killer thread must be running in the same cluster as the target thread.
     390 * If not, the client thread must use the RPC_THREAD_KILL.
     391 * - When the killer thread is running on the same core as the target thread,
     392 *   This function simply detach the target thread from the scheduler,
     393 *   detach it from the parent thread if it is attached, detach it from the
     394 *   local process descriptor, and rrleases all memory allocated to the thread.
     395 * - When the killer thread is running on a different core than the target thread
     396 *   The killer send a signal to the target thread scheduler requesting this
     397 *   scheduler to confirm that the target thread is blocked and not running.
     398 *   Then, it executes the same actions as described above.
    418399 ***************************************************************************************
    419400 * @ thread   : local pointer on the target thread.
  • trunk/kernel/libk/remote_rwlock.c

    r337 r409  
    4040    hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->current ) , 0 );
    4141    hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->count )   , 0 );
     42
     43#if CONFIG_LOCKS_DEBUG
    4244    hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner )   , XPTR_NULL );
     45    xlist_entry_init( XPTR( lock_cxy , &lock_ptr->list ) );
     46#endif
     47
    4348}
    4449
     
    5358    cxy_t             lock_cxy = GET_CXY( lock_xp );
    5459
    55     // get cluster and local pointer on local thread
     60    // get local pointer on local thread
    5661    thread_t          * thread_ptr = CURRENT_THREAD;
    5762
     
    8186    thread_ptr->remote_locks++;
    8287
     88#if CONFIG_LOCKS_DEBUG
     89    xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
     90                     XPTR( lock_cxy ,  &lock_ptr->list ) );
     91#endif
     92
    8393    // sync
    8494    hal_fence();
     
    115125    // decrement thread.remote_locks
    116126        thread_ptr->remote_locks--;
     127
     128#if CONFIG_LOCKS_DEBUG
     129    xlist_unlink( XPTR( lock_cxy , &lock->ptr->list ) );
     130#endif
    117131
    118132    // enable interrupts
     
    134148    cxy_t             lock_cxy = GET_CXY( lock_xp );
    135149
    136     // get cluster and local pointer on local thread
    137     cxy_t               thread_cxy = local_cxy;
     150    // get local pointer on local thread
    138151    thread_t          * thread_ptr = CURRENT_THREAD;
    139152
     
    142155    xptr_t              count_xp   = XPTR( lock_cxy   , &lock_ptr->count );
    143156    xptr_t              current_xp = XPTR( lock_cxy   , &lock_ptr->current );
    144     xptr_t              owner_xp   = XPTR( lock_cxy   , &lock_ptr->owner );
    145     xptr_t              thread_xp  = XPTR( thread_cxy , thread_ptr );
    146157
    147158    // disable interrupts
     
    165176    }
    166177
    167     // register owner thread
    168     hal_remote_swd( owner_xp , thread_xp );
     178#if CONFIG_LOCKS_DEBUG
     179    hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
     180                    XPTR( local_cxy , thread_ptr ) );
     181    xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
     182                     XPTR( lock_cxy  , &lock_ptr->list ) );
     183#endif   
    169184
    170185    // increment thread.remote_locks
     
    188203    thread_t          * thread_ptr = CURRENT_THREAD;
    189204
    190     // compute extended pointers on lock->ticket, lock->owner
     205    // compute extended pointer on lock->ticket
    191206    xptr_t              current_xp = XPTR( lock_cxy   , &lock_ptr->current );
    192     xptr_t              owner_xp   = XPTR( lock_cxy   , &lock_ptr->owner );
    193207
    194208    // disable interrupts
    195209        hal_disable_irq( &mode );
    196210 
    197     // unregister owner thread, and release lock
    198     hal_remote_swd( owner_xp , XPTR_NULL );
     211#if CONFIG_LOCKS_OWNER
     212    hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
     213    xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
     214#endif
     215
     216    // release lock
    199217    hal_remote_atomic_add( current_xp , 1 );
    200218
     
    217235    uint32_t     current;               // ticket index of current owner
    218236    uint32_t     count;                 // current number of reader threads
    219     xptr_t       owner;                 // extended pointer on writer thread
    220237
    221238    // get cluster and local pointer on remote_rwlock
     
    226243    current = hal_remote_lw ( XPTR( lock_cxy , &lock_ptr->current ) );
    227244    count   = hal_remote_lw ( XPTR( lock_cxy , &lock_ptr->count ) );
    228     owner   = hal_remote_lwd( XPTR( lock_cxy , &lock_ptr->owner ) );
    229 
    230     printk("\n*** rwlock <%l> %s : ticket = %d / current = %d / count = %d / owner = %l\n",
    231            lock_xp , comment , ticket , current , count , owner );
     245
     246    printk("\n*** rwlock <%l> %s : ticket = %d / current = %d / count = %d\n",
     247           lock_xp , comment , ticket , current , count );
    232248
    233249}  // end remote_rwlock_print()
  • trunk/kernel/libk/remote_rwlock.h

    r50 r409  
    4040 *   accesses before starting its own access.
    4141 * When the lock is taken by another thread, the new-comers use a busy waiting policy.
    42  *
    43  * It uses a busy-waiting policy if the lock is already allocated to another thread.
    4442 **************************************************************************************/
    4543
    4644typedef struct remote_rwlock_s
    4745{
    48     uint32_t     ticket;                /*! first free ticket index                   */
    49     uint32_t     current;               /*! ticket index of current owner             */
    50     uint32_t     count;                 /*! current number of reader threads          */
    51     xptr_t       owner;                 /*! extended pointer on writer thread         */
     46    uint32_t       ticket;          /*! first free ticket index                       */
     47    uint32_t       current;         /*! ticket index of current owner                 */
     48    uint32_t       count;           /*! current number of reader threads              */
     49
     50#if CONFIG_LOCKS_DEBUG
     51    xptr_t         owner;           /*! extended pointer on writer thread             */
     52    xlist_entry_t  list;            /*! member of list of remote locks taken by owner */
     53#endif
     54
    5255}
    5356remote_rwlock_t;
  • trunk/kernel/libk/remote_spinlock.c

    r408 r409  
    3838
    3939        hal_remote_sw ( XPTR( cxy , &ptr->taken ) , 0 );
     40
     41#if CONFIG_LOCKS_CONFIG
    4042        hal_remote_swd( XPTR( cxy , &ptr->owner ) , XPTR_NULL );
    4143        xlist_entry_init( XPTR( cxy , &ptr->list ) );
     44#endif
     45
    4246}
    4347
     
    5256        cxy_t               lock_cxy = GET_CXY( lock_xp );
    5357
    54         // get cluster and local pointer on local thread
    55         cxy_t               thread_cxy = local_cxy;
     58        // get local pointer on local thread
    5659        thread_t          * thread_ptr = CURRENT_THREAD;
    5760
     
    7376                thread_ptr->remote_locks++;
    7477
     78#if CONFIG_LOCKS_DEBUG
    7579                hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) ,
    76                             (uint64_t)XPTR( thread_cxy , thread_ptr) );
    77 
    78                 xlist_add_first( XPTR( thread_cxy , &thread_ptr->xlocks_root ) ,
    79                              XPTR( lock_cxy , &lock_ptr->list ) );
     80                                XPTR( thread_cxy , thread_ptr) );
     81                xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
     82                                 XPTR( lock_cxy , &lock_ptr->list ) );
     83#endif
    8084
    8185                hal_restore_irq(mode);
     
    96100        cxy_t               lock_cxy = GET_CXY( lock_xp );
    97101
    98         // get cluster and local pointer on local thread
    99         cxy_t               thread_cxy = local_cxy;
     102        // get local pointer on local thread
    100103        thread_t          * thread_ptr = CURRENT_THREAD;
    101104
     
    118121        thread_ptr->remote_locks++;
    119122
    120         hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) ,
    121                         (uint64_t)XPTR( thread_cxy , thread_ptr) );
    122 
    123         xlist_add_first( XPTR( thread_cxy , &thread_ptr->xlocks_root ) ,
    124                          XPTR( lock_cxy , &lock_ptr->list ) );
     123#if CONFIG_LOCKS_DEBUG
     124        hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
     125                        XPTR( local_cxy , thread_ptr) );
     126        xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
     127                         XPTR( lock_cxy  , &lock_ptr->list ) );
     128#endif
    125129
    126130        // irq_state must be restored when lock is released
     
    140144        thread_t          * thread_ptr = CURRENT_THREAD;
    141145
     146#if CONFIG_LOCKS_DEBUG
    142147        hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
     148        xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
     149#endif
     150
    143151        hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->taken ) , 0 );
    144152        thread_ptr->remote_locks--;
    145         xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
    146153
    147154    // deschedule if pending request
     
    163170        cxy_t               lock_cxy = GET_CXY( lock_xp );
    164171
    165     // get cluster and local pointer on calling thread
    166     cxy_t               thread_cxy = local_cxy;
     172    // get local pointer on calling thread
    167173    thread_t          * thread_ptr = CURRENT_THREAD;
    168174
     
    191197        thread_ptr->remote_locks++;
    192198
    193         hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) ,
    194                         (uint64_t)XPTR( thread_cxy , thread_ptr) );
    195 
    196         xlist_add_first( XPTR( thread_cxy , &thread_ptr->xlocks_root ) ,
    197                          XPTR( lock_cxy , &lock_ptr->list ) );
     199#if CONFIG_LOCKS_DEBUG
     200        hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
     201                        XPTR( local_cxy , thread_ptr) );
     202        xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
     203                         XPTR( lock_cxy  , &lock_ptr->list ) );
     204#endif
    198205
    199206        // enable interrupts
     
    211218        thread_t          * thread_ptr = CURRENT_THREAD;
    212219
     220#if CONFIG_LOCKS_DEBUG
    213221        hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
     222        xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
     223#endif
     224
    214225        hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->taken ) , 0 );
    215226        thread_ptr->remote_locks--;
    216         xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
    217227
    218228    // deschedule if pending request
     
    220230}
    221231
    222 //////////////////////////////////////////////
    223 xptr_t remote_spinlock_owner( xptr_t lock_xp )
    224 {
    225     // get cluster and local pointer on remote_spinlock
    226     remote_spinlock_t * lock_ptr = (remote_spinlock_t *)GET_PTR( lock_xp );
    227     cxy_t               lock_cxy = GET_CXY( lock_xp );
    228 
    229     return hal_remote_lw( XPTR( lock_cxy , &lock_ptr->owner ) );
    230 }
  • trunk/kernel/libk/remote_spinlock.h

    r101 r409  
    3333 * This structure defines a remote spinlock, that can be used to protect
    3434 * exclusive access to a trans-cluster shared resource. It can be taken by any
    35  * thread running in any cluster. All access functions use remote pointers,
    36  * and the owner thread is registrated as a remote pointer.
     35 * thread running in any cluster. All access functions use remote pointers.
     36 * The "owner" and "list" are optionnal fields used for debug.
     37 * It register the list of all remote spinlocks taken by a given thread.
    3738 **************************************************************************************/
    3839
     
    4041{
    4142    volatile uint32_t     taken;       /*! free if 0 / taken if non zero             */
     43
     44#if CONFIG_LOCKS_DEBUG
    4245    xptr_t                owner;       /*! extended pointer on the owner thread      */
    4346    xlist_entry_t         list;        /*! list of all remote_lock taken by owner    */
     47#endif
     48
    4449}
    4550remote_spinlock_t;
     
    100105void remote_spinlock_unlock( xptr_t  lock_xp );
    101106
    102 /***************************************************************************************
    103  * This debug function returns the current owner of a remote spinlock.
    104  ***************************************************************************************
    105  * @ lock_xp    : extended pointer on the remote spinlock
    106  * @ return XPTR_NULL if not taken / return owner thread if lock already taken
    107  **************************************************************************************/
    108 xptr_t remote_spinlock_owner( xptr_t  lock_xp );
    109 
    110 
    111107#endif
  • trunk/kernel/libk/rwlock.c

    r337 r409  
    3737    lock->current = 0;
    3838    lock->count   = 0;
     39
     40#if CONFIG_LOCKS_DEBUG
    3941        lock->owner   = NULL;
     42    list_entry_init( &lock->list );
     43#endif
     44
    4045}
    4146
     
    6570    this->local_locks++;
    6671
     72#if CONFIG_LOCKS_DEBUG
     73    list_add_first( &this->locks_root , &lock->list );
     74#endif
     75
    6776    // consistency
    6877    hal_fence();
     
    8897    hal_atomic_add( &lock->count , -1 );
    8998    this->local_locks--;
     99
     100#if CONFIG_LOCKS_DEBUG
     101    list_unlink( &lock->list );
     102#endif
    90103
    91104    // enable IRQs
     
    123136    }
    124137
     138    this->local_locks++;
     139
     140#if CONFIG_LOCKS_DEBUG
    125141    lock->owner = this;
    126     this->local_locks++;
     142    list_add_first( &this->locks_root , &lock->list );
     143#endif
    127144
    128145    // enable IRQs
     
    140157        hal_disable_irq( &mode );
    141158 
     159#if CONFIG_LOCKS_DEBUG
     160    lock->owner = NULL;
     161    list_unlink( &lock->list );
     162#endif
     163
    142164    // release lock
    143165    lock->current++;
    144     lock->owner = NULL;
    145166    this->local_locks--;
    146167
  • trunk/kernel/libk/rwlock.h

    r14 r409  
    4040 * As this local lock is only accessed by the local threads, if the lock is taken,
    4141 * the new-comers use a busy waiting policy with a delay between retry.
     42 * TODO : Introduce the rwlocks in the list of locks taken by a given thread for debug.
    4243 ******************************************************************************************/
    4344
     
    4849/*******************************************************************************************
    4950 * This structure defines a local rwlock.
     51 * The "owner" and "list" fields are used for debug.
    5052 ******************************************************************************************/
    5153
     
    5557    uint32_t            current;          /*! ticket index of current owner               */
    5658    uint32_t            count;            /*! number of simultaneous readers threads      */
     59
     60#if CONFIG_LOCKS_DEBUG
    5761        struct thread_s   * owner;            /*! pointer on curent writer thread             */
     62    list_entry_t        list;             /*! member of list of locks taken by owner      */
     63#endif
     64
    5865}
    5966rwlock_t;
  • trunk/kernel/libk/spinlock.c

    r408 r409  
    3737{
    3838    lock->taken = 0;
     39
     40#if CONFIG_LOCKS_DEBUG
    3941    lock->owner = NULL;
    4042    list_entry_init( &lock->list );
     43#endif
     44
    4145}
    4246
     
    6670
    6771    this->local_locks++;
     72
     73#if CONFIG_LOCKS_DEBUG
    6874    lock->owner = this;
    6975    list_add_first( &this->locks_root , &lock->list );
     76#endif
    7077
    7178    // irq_state must be restored when lock is released
     
    7986    thread_t * this = CURRENT_THREAD;;
    8087
     88#if CONFIG_LOCKS_DEBUG
    8189    lock->owner = NULL;
     90    list_unlink( &lock->list );
     91#endif
     92
    8293    lock->taken = 0;
    8394    this->local_locks--;
    84     list_unlink( &lock->list );
    8595
    8696    // deschedule if pending request
     
    121131
    122132    this->local_locks++;
     133
     134#if CONFIG_LOCKS_DEBUG
    123135    lock->owner = this;
    124136    list_add_first( &this->locks_root , &lock->list );
     137#endif
    125138
    126139    // restore IRQs
     
    148161    {
    149162        this->local_locks++;
     163
     164#if CONFIG_LOCKS_DEBUG
    150165        lock->owner = this;
    151166        list_add_first( &this->locks_root , &lock->list );
     167#endif
     168
    152169        hal_restore_irq(mode);
    153170        return 0;
     
    160177    thread_t * this = CURRENT_THREAD;
    161178
     179#if CONFIG_LOCKS_DEBUG
    162180    lock->owner = NULL;
     181    list_unlink( &lock->list );
     182#endif
     183
    163184    lock->taken = 0;
    164185    this->local_locks--;
    165     list_unlink( &lock->list );
    166186
    167187    // deschedule if pending request
  • trunk/kernel/libk/spinlock.h

    r14 r409  
    5555/*******************************************************************************************
    5656 * This structure defines a local spinlock.
     57 * The "owner" and "list" are optionnal fields used for debug.
     58 * It register the list of all spinlocks taken by a given thread.
    5759 ******************************************************************************************/
    5860
     
    6062{
    6163        uint32_t            taken;             /*! state : free if zero / taken if non zero  */
     64
     65#if CONFIG_LOCKS_DEBUG
    6266        struct thread_s   * owner;             /*! pointer on curent owner thread            */
    63     list_entry_t        list;              /*! list of all locks taken by owner          */
     67    list_entry_t        list;              /*! member of list of locks taken by owner    */
     68#endif
     69
    6470}
    6571spinlock_t;
     
    96102/*******************************************************************************************
    97103 * This blocking function locks a local spinlock.
    98  * If the lock is already taken, the calling thread deschedules and retries when
    99  * it is rescheduled, until success.
     104 * If the lock is already taken, the calling thread deschedules without blocking,
     105 * and retries when it is rescheduled, until success.
    100106 * It increments the calling thread local_locks count when the lock has been taken.
    101107 *******************************************************************************************
  • trunk/kernel/mm/ppm.h

    r407 r409  
    3434
    3535/*****************************************************************************************
    36  * This structure defines the Physical Memory Manager in a cluster.
    37  * In all clusters, the physical memory bank starts at local physical address 0.
    38  * The size of this local physical memory is defined by the <pages_nr> field in the
     36 * This structure defines the Physical Pages Manager in a cluster.
     37 * In each cluster, the physical memory bank starts at local physical address 0 and
     38 * contains an integer number of pages, is defined by the <pages_nr> field in the
    3939 * boot_info structure. It is split in three parts:
    4040 * - the "kernel_code" section contains the kernel code, loaded by the boot-loader.
  • trunk/kernel/mm/vmm.c

    r408 r409  
    2828#include <hal_special.h>
    2929#include <hal_gpt.h>
     30#include <hal_vmm.h>
    3031#include <printk.h>
    3132#include <memcpy.h>
     
    8384             "STACK zone too small\n");
    8485
    85     // register kentry vseg in VMM
     86    // register kentry vseg in VSL
    8687    base = CONFIG_VMM_KENTRY_BASE << CONFIG_PPM_PAGE_SHIFT;
    8788    size = CONFIG_VMM_KENTRY_SIZE << CONFIG_PPM_PAGE_SHIFT;
     
    100101    vmm->kent_vpn_base = base;
    101102
    102     // register args vseg in VMM
     103    // register args vseg in VSL
    103104    base = (CONFIG_VMM_KENTRY_BASE +
    104105            CONFIG_VMM_KENTRY_SIZE ) << CONFIG_PPM_PAGE_SHIFT;
     
    118119    vmm->args_vpn_base = base;
    119120
    120     // register the envs vseg in VMM
     121    // register the envs vseg in VSL
    121122    base = (CONFIG_VMM_KENTRY_BASE +
    122123            CONFIG_VMM_KENTRY_SIZE +
     
    137138    vmm->envs_vpn_base = base;
    138139
    139     // initialize generic page table
     140    // create GPT (empty)
    140141    error = hal_gpt_create( &vmm->gpt );
    141142
    142     assert( (error == 0) , __FUNCTION__ , "cannot initialize page table\n");
     143    assert( (error == 0) , __FUNCTION__ , "cannot create GPT\n");
     144
     145    // architecture specific GPT initialization
     146    // (For TSAR, identity map the kentry_vseg)
     147    error = hal_vmm_init( vmm );
     148
     149    assert( (error == 0) , __FUNCTION__ , "cannot initialize GPT\n");
    143150
    144151    // initialize STACK allocator
     
    154161
    155162    // initialize instrumentation counters
    156         vmm->pgfault_nr          = 0;
     163        vmm->pgfault_nr = 0;
    157164
    158165    hal_fence();
     
    534541    vmm_t  * vmm = &process->vmm;
    535542
     543// @@@
     544vmm_display( process , true );
     545// @@@
     546
    536547    // get extended pointer on VSL root and VSL lock
    537548    xptr_t   root_xp = XPTR( local_cxy , &vmm->vsegs_root );
     
    541552        remote_rwlock_wr_lock( lock_xp );
    542553
    543     // remove all vsegs registered in VSL
     554    // remove all user vsegs registered in VSL
    544555        while( !xlist_is_empty( root_xp ) )
    545556        {
     557        // get pointer on first vseg in VSL
    546558                vseg_xp = XLIST_FIRST_ELEMENT( root_xp , vseg_t , xlist );
    547559        vseg = (vseg_t *)GET_PTR( vseg_xp );
     560
     561printk("\n@@@ %s : vseg %s\n", __FUNCTION__ , vseg_type_str( vseg->type ) );
     562
     563        // unmap and release all pages
     564        vmm_unmap_vseg( process , vseg );
     565
     566        // remove vseg from VSL
    548567                vseg_detach( vmm , vseg );
     568
     569        // release memory allocated to vseg descriptor
    549570        vseg_free( vseg );
    550571        }
     
    565586    }
    566587
    567     // release memory allocated to the local page table
     588    // release memory allocated to the GPT itself
    568589    hal_gpt_destroy( &vmm->gpt );
    569590
     
    928949    vpn_t       vpn_min;    // VPN of first PTE
    929950    vpn_t       vpn_max;    // VPN of last PTE (excluded)
    930 
    931     // get pointer on process page table
     951    ppn_t       ppn;        // current PTE ppn value
     952    uint32_t    attr;       // current PTE attributes
     953    kmem_req_t  req;        // request to release memory
     954    xptr_t      page_xp;    // extended pointer on page descriptor
     955    cxy_t       page_cxy;   // page descriptor cluster
     956    page_t    * page_ptr;   // page descriptor pointer
     957
     958vmm_dmsg("\n[DBG] %s : core[%x, %d] enter / process %x / vseg %s / base %x / cycle %d\n",
     959__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid ,
     960vseg_type_str( vseg->type ), vseg->vpn_base, (uint32_t)hal_get_cycles() );
     961
     962    // get pointer on process GPT
    932963    gpt_t     * gpt = &process->vmm.gpt;
    933964
     
    937968        for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
    938969    {
    939         hal_gpt_reset_pte( gpt , vpn );
    940     }
    941 }
     970        // get GPT entry
     971        hal_gpt_get_pte( gpt , vpn , &attr , &ppn );
     972
     973        if( attr & GPT_MAPPED )  // entry is mapped
     974        {
     975            // check small page
     976            assert( (attr & GPT_SMALL) , __FUNCTION__ ,
     977            "an user vseg must use small pages" );
     978
     979            // unmap GPT entry
     980            hal_gpt_reset_pte( gpt , vpn );
     981
     982            // release memory if not identity mapped
     983            if( (vseg->flags & VSEG_IDENT)  == 0 )
     984            {
     985                // get extended pointer on page descriptor
     986                page_xp  = ppm_ppn2page( ppn );
     987                page_cxy = GET_CXY( page_xp );
     988                page_ptr = (page_t *)GET_PTR( page_xp );
     989
     990                // release physical page to relevant cluster
     991                if( page_cxy == local_cxy )                   // local cluster
     992                {
     993                    req.type = KMEM_PAGE;
     994                    req.ptr  = page_ptr;
     995                    kmem_free( &req );
     996                }
     997                else                                          // remote cluster
     998                {
     999                    rpc_pmem_release_pages_client( page_cxy , page_ptr );
     1000                }
     1001            }
     1002        }
     1003    }
     1004}  // end vmm_unmap_vseg()
    9421005
    9431006//////////////////////////////////////////////////////////////////////////////////////////
  • trunk/kernel/mm/vmm.h

    r408 r409  
    131131 * - It initializes the STACK and MMAP allocators.
    132132 * - It registers the "kentry", "args", "envs" vsegs in the VSL.
     133 * - It initializes the generic page table, calling the HAL specific hal_gpt_init() function.
     134 * - For TSAR it map all pages for the "kentry" vseg, that must be identity mapping.
     135 * Note:
    133136 * - The "code" and "data" vsegs are registered by the elf_load_process() function.
    134137 * - The "stack" vsegs are dynamically created by the thread_user_create() function.
    135  * - The "file", "anon", "remote" vsegs are dynamically created by the mmap() syscalls.
    136  * - It initializes the generic page table, calling the HAL specific hal_gpt_init() function.
    137  * - For TSAR it map all pages for the "kentry" vseg, that must be identity mapping.
     138 * - The "file", "anon", "remote" vsegs are dynamically created by the mmap() syscall.
    138139 * TODO : Any error in this function gives a kernel panic => improve error handling.
    139140 *********************************************************************************************
     
    206207
    207208/*********************************************************************************************
    208  * This function removes all vsegs registered in in the virtual memory manager of the
    209  * process identified by the <process> argument.
    210  * It releases the memory allocated to the local generic page table.
     209 * This function scan the list of vsegs registered in the VSL of the process
     210 * identified by the <process> argument, and for each vseg:
     211 * - it unmap from the GPT and releases all mapped pages in vseg.
     212 * - it removes the vseg from the process VSL.
     213 * - It releases the memory allocated to the vseg descriptor.
     214 * Finally, it releases the memory allocated to the GPT itself.
    211215 *********************************************************************************************
    212216 * @ process   : pointer on process descriptor.
     
    286290
    287291/*********************************************************************************************
    288  * This function unmaps all PTEs of a given vseg, in the generic page table associated
    289  * to a given process descriptor, and releases the corresponding physical memory.
    290  * It can be used for any type of vseg.
     292 * This function unmaps all mapped PTEs of a given vseg, from the generic page table
     293 * associated to a given process descriptor, and releases the physical memory allocated
     294 * to all mapped GPT entries.  It can be used for any type of vseg.
    291295 *********************************************************************************************
    292296 * @ process  : pointer on process descriptor.
  • trunk/kernel/mm/vseg.h

    r408 r409  
    3535struct vmm_s;
    3636
    37 /**********************************************************************************************
     37/*******************************************************************************************
    3838 * This enum defines the vseg types for an user process.
    39  *********************************************************************************************/
     39 ***********************************************************************************VSEG*******/
    4040
    4141typedef enum
    4242{
    43     VSEG_TYPE_CODE   = 0,          /*! executable user code   / private / localized          */
    44     VSEG_TYPE_DATA   = 1,          /*! initialized user data  / public  / distributed        */
    45     VSEG_TYPE_STACK  = 2,          /*! execution user stack   / private / localized          */
    46     VSEG_TYPE_ANON   = 3,          /*! anonymous mmap         / public  / localized          */
    47     VSEG_TYPE_FILE   = 4,          /*! file mmap              / public  / localized          */
    48     VSEG_TYPE_REMOTE = 5,          /*! remote mmap            / public  / localized          */
     43    VSEG_TYPE_CODE   = 0,          /*! executable user code   / private / localized       */
     44    VSEG_TYPE_DATA   = 1,          /*! initialized user data  / public  / distributed     */
     45    VSEG_TYPE_STACK  = 2,          /*! execution user stack   / private / localized       */
     46    VSEG_TYPE_ANON   = 3,          /*! anonymous mmap         / public  / localized       */
     47    VSEG_TYPE_FILE   = 4,          /*! file mmap              / public  / localized       */
     48    VSEG_TYPE_REMOTE = 5,          /*! remote mmap            / public  / localized       */
    4949
    5050    VSEG_TYPE_KDATA  = 10,
     
    5555
    5656
    57 /**********************************************************************************************
     57/*******************************************************************************************
    5858 * These masks define the vseg generic (hardware independent) flags.
    59  *********************************************************************************************/
     59 ******************************************************************************************/
    6060
    61 #define VSEG_USER     0x0001       /*! user accessible                                       */
    62 #define VSEG_WRITE    0x0002       /*! writeable                                             */
    63 #define VSEG_EXEC     0x0004       /*! executable                                            */
    64 #define VSEG_CACHE    0x0008       /*! cachable                                              */
    65 #define VSEG_PRIVATE  0x0010       /*! should not be accessed from another cluster           */
    66 #define VSEG_DISTRIB  0x0020       /*! physically distributed on all clusters                */
     61#define VSEG_USER     0x0001       /*! user accessible                                    */
     62#define VSEG_WRITE    0x0002       /*! writeable                                          */
     63#define VSEG_EXEC     0x0004       /*! executable                                         */
     64#define VSEG_CACHE    0x0008       /*! cachable                                           */
     65#define VSEG_PRIVATE  0x0010       /*! should not be accessed from another cluster        */
     66#define VSEG_DISTRIB  0x0020       /*! physically distributed on all clusters             */
     67#define VSEG_IDENT    0x0040       /*! identity mapping                                   */
    6768
    68 /**********************************************************************************************
     69/*******************************************************************************************
    6970 * This structure defines a virtual segment descriptor.
    7071 * - The VSL contains only local vsegs, but is implemented as an xlist, because it can be
    7172 *   accessed by thread running in a remote cluster.
    7273 * - The zombi list is used by the local MMAP allocator. It is implemented as a local list.
    73  *********************************************************************************************/
     74 ******************************************************************************************/
    7475
    7576typedef struct vseg_s
    7677{
    77         xlist_entry_t     xlist;        /*! all vsegs in same VSL (or same zombi list)           */
    78         list_entry_t      zlist;        /*! all vsegs in same zombi list                         */
    79         struct vmm_s    * vmm;          /*! pointer on associated VM manager                     */
    80     uint32_t          type;         /*! vseg type                                            */
    81         intptr_t          min;          /*! segment min virtual address                          */
    82         intptr_t          max;          /*! segment max virtual address (excluded)               */
    83         vpn_t             vpn_base;     /*! first page of vseg                                   */
    84         vpn_t             vpn_size;     /*! number of pages occupied                             */
    85         uint32_t          flags;        /*! vseg attributes                                      */
    86         xptr_t            mapper_xp;    /*! xptr on remote mapper (for types CODE/DATA/FILE)     */
    87         intptr_t          file_offset;  /*! vseg offset in file (for types CODE/DATA/FILE        */
    88     intptr_t          file_size;    /*! max segment size in mapper (for type CODE/DATA)      */
    89     cxy_t             cxy;          /*! physical mapping (for non distributed vseg)          */
     78        xlist_entry_t     xlist;        /*! all vsegs in same VSL (or same zombi list)        */
     79        list_entry_t      zlist;        /*! all vsegs in same zombi list                      */
     80        struct vmm_s    * vmm;          /*! pointer on associated VM manager                  */
     81    uint32_t          type;         /*! vseg type                                         */
     82        intptr_t          min;          /*! segment min virtual address                       */
     83        intptr_t          max;          /*! segment max virtual address (excluded)            */
     84        vpn_t             vpn_base;     /*! first page of vseg                                */
     85        vpn_t             vpn_size;     /*! number of pages occupied                          */
     86        uint32_t          flags;        /*! vseg attributes                                   */
     87        xptr_t            mapper_xp;    /*! xptr on remote mapper (for types CODE/DATA/FILE)  */
     88        intptr_t          file_offset;  /*! vseg offset in file (for types CODE/DATA/FILE     */
     89    intptr_t          file_size;    /*! max segment size in mapper (for type CODE/DATA)   */
     90    cxy_t             cxy;          /*! physical mapping (for non distributed vseg)       */
    9091}
    9192vseg_t;
    9293
    93 /**********************************************************************************************
     94/*******************************************************************************************
    9495 * This function returns a printable string for the vseg type.
    95  **********************************************************************************************
     96 *******************************************************************************************
    9697 * @ vseg_type  : type of vseg
    9798 * @ return pointer on string
    98  *********************************************************************************************/
     99 ******************************************************************************************/
    99100char * vseg_type_str( uint32_t vseg_type );
    100101
    101 /**********************************************************************************************
     102/*******************************************************************************************
    102103 * This function allocates physical memory for a new vseg descriptor from the local cluster
    103104 * physical memory allocator.
    104  **********************************************************************************************
     105 *******************************************************************************************
    105106 * @ return pointer on allocated vseg descriptor if success / return NULL if failure.
    106  *********************************************************************************************/
     107 ******************************************************************************************/
    107108vseg_t * vseg_alloc();
    108109
    109 /**********************************************************************************************
    110  * This function releases physical memory allocated for a vseg descriptor to the local cluster
    111  * physical memory allocator.
    112  **********************************************************************************************
     110/*******************************************************************************************
     111 * This function releases the physical memory allocated for a vseg descriptor
     112 * to the local cluster physical memory allocator.
     113 *******************************************************************************************
    113114 * @ vseg   : local pointer on released vseg descriptor.
    114  *********************************************************************************************/
     115 ******************************************************************************************/
    115116void vseg_free( vseg_t * vseg );
    116117
    117 /**********************************************************************************************
     118/*******************************************************************************************
    118119 * This function initializes a local vseg descriptor, from the arguments values.
    119120 * It does NOT register the vseg in the local VMM.
    120  **********************************************************************************************
     121 *******************************************************************************************
    121122 * @ vseg      : pointer on the vseg descriptor.
    122123 * @ base      : vseg base address.
     
    126127 * @ type      : vseg type.
    127128 * @ cxy       : target cluster for physical mapping.
    128  *********************************************************************************************/
     129 ******************************************************************************************/
    129130void vseg_init( vseg_t      * vseg,
    130131                    vseg_type_t   type,
     
    138139                cxy_t         cxy );
    139140
    140 /**********************************************************************************************
     141/*******************************************************************************************
    141142 * This function initializes a local vseg descriptor from values contained in a reference
    142143 * remote vseg descriptor. It does NOT register the vseg in the local VMM.
    143  **********************************************************************************************
     144 *******************************************************************************************
    144145 * @ vseg      : pointer on the vseg descriptor.
    145146 * @ ref_xp    : extended pointer on the reference vseg descriptor.
    146  *********************************************************************************************/
     147 ******************************************************************************************/
    147148void vseg_init_from_ref( vseg_t * vseg,
    148149                         xptr_t   ref_xp );
    149150
    150 /**********************************************************************************************
     151/*******************************************************************************************
    151152 * This function adds a vseg descriptor in the set of vsegs controlled by a given VMM,
    152153 * and updates the vmm field in the vseg descriptor.
    153154 * The lock protecting the vsegs list in VMM must be taken by the caller.
    154  **********************************************************************************************
     155 *******************************************************************************************
    155156 * @ vmm       : pointer on the VMM
    156157 * @ vseg      : pointer on the vseg descriptor
    157  *********************************************************************************************/
     158 ******************************************************************************************/
    158159void vseg_attach( struct vmm_s  * vmm,
    159160                  vseg_t        * vseg );
    160161
    161 /**********************************************************************************************
     162/*******************************************************************************************
    162163 * This function removes a vseg descriptor from the set of vsegs controlled by a given VMM,
    163164 * and updates the vmm field in the vseg descriptor. No memory is released.
    164165 * The lock protecting the vsegs list in VMM must be taken by the caller.
    165  **********************************************************************************************
     166 *******************************************************************************************
    166167 * @ vmm       : pointer on the VMM
    167168 * @ vseg      : pointer on the vseg descriptor
    168  *********************************************************************************************/
     169 ******************************************************************************************/
    169170void vseg_detach( struct vmm_s  * vmm,
    170171                  vseg_t        * vseg );
  • trunk/kernel/syscalls/sys_fork.c

    r408 r409  
    11/*
    2  * sys_fork.c - Fork the current process.
     2 * sys_fork.c - Kernel function implementing the "fork" system call.
    33 *
    44 * Authors  Alain Greiner  (2016,2017)
  • trunk/kernel/syscalls/sys_kill.c

    r124 r409  
    11/*
    2  * sys_kill.c: Send a signal to a given process.
     2 * sys_kill.c - Send a signal to a given process.
    33 *
    44 * Author    Alain Greiner (2016,2017)
     
    2424#include <kernel_config.h>
    2525#include <hal_types.h>
     26#include <hal_irqmask.h>
    2627#include <errno.h>
    2728#include <thread.h>
     
    3637              uint32_t sig_id )
    3738{
     39    uint32_t    save_sr;       // required to enable IRQs
     40
     41#if CONFIG_SYSCALL_DEBUG
     42uint64_t    tm_start;
     43uint64_t    tm_end;
     44tm_start = hal_get_cycles();
     45#endif
     46
    3847    thread_t  * this    = CURRENT_THREAD;
    3948    process_t * process = this->process;
    40 
    41     // check signal index
    42         if( (sig_id == 0) || (sig_id >= SIG_NR) )
    43         {
    44         printk("\n[ERROR] in %s : illegal signal = %d for thread %x in process %x\n",
    45                __FUNCTION__ , sig_id , this->trdid , process->pid );
    46                 this->errno = EINVAL;
    47         return -1;
    48         }
    49 
    50     // get local pointer on local cluster manager
    51     cluster_t * cluster = LOCAL_CLUSTER;
    5249
    5350    // get owner process cluster and lpid
     
    5552    lpid_t  lpid       = LPID_FROM_PID( pid );
    5653
    57     // check PID
     54    // check pid
    5855    if( (lpid >= CONFIG_MAX_PROCESS_PER_CLUSTER) || cluster_is_undefined( owner_cxy ) )
    5956    {
    6057        printk("\n[ERROR] in %s : illegal target PID = %d for thread %x in process %x\n",
    61                __FUNCTION__ , pid , this->trdid , process->pid );
     58        __FUNCTION__ , pid , this->trdid , pid );
    6259                this->errno = EINVAL;
    6360        return -1;
    6461    }
    6562
    66     // get extended pointers on copies root and lock
    67     xptr_t root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[lpid] );
    68     xptr_t lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
    69 
    70     // take the lock protecting the copies
    71     remote_spinlock_lock( lock_xp );
    72 
    73     // TODO the loop below sequencialize the RPCs
    74     // they could be pipelined using a non-blocking RPC ...
    75  
    76     // loop on the process decriptor copies
    77     xptr_t  iter_xp;
    78     XLIST_FOREACH( root_xp , iter_xp )
     63    // check sig_id
     64    if( (sig_id != SIGSTOP) && (sig_id != SIGCONT) && (sig_id != SIGKILL) )
    7965    {
    80         xptr_t      process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
    81         cxy_t       process_cxy = GET_CXY( process_xp );
    82         process_t * process_ptr = (process_t *)GET_PTR( process_xp );
    83 
    84         if( process_cxy == local_cxy )   // process copy is local
    85         {
    86             signal_rise( process_ptr , sig_id );
    87         }
    88         else                           // process copy is remote
    89         {
    90             rpc_signal_rise_client( process_cxy , process_ptr , sig_id );
    91         }
     66        printk("\n[ERROR] in %s : illegal signal type for thread %x in process %x\n",
     67        __FUNCTION__ , sig_id , this->trdid , pid );
     68                this->errno = EINVAL;
     69        return -1;
    9270    }
    9371
    94     // release the lock
    95     remote_spinlock_unlock( lock_xp );
     72    // enable IRQs
     73    hal_enable_irq( &save_sr );
     74
     75    // execute process_make_kill() function in owner cluster
     76    if( local_cxy == owner_cxy )                                // owner is local
     77    {
     78        process_make_kill( process , sig_id );
     79    }
     80    else                                                        // owner is remote
     81    {
     82        rpc_process_make_kill_client( owner_cxy , process , sig_id );
     83    }
     84
     85    // restore IRQs
     86    hal_restore_irq( save_sr );
    9687
    9788    hal_fence();
    9889
     90#if CONFIG_SYSCALL_DEBUG
     91tm_end = hal_get_cycles();
     92syscall_dmsg("\n[DBG] %s exit : core[%x,%d] / thread %x in process %x / cycle %d\n"
     93"process %x killed / cost = %d\n",
     94__FUNCTION__ , local_cxy , this->core->lid , this->trdid , this->process->pid ,
     95tm_start , pid , (uint32_t)(tm_end - tm_start) );
     96#endif
     97 
    9998        return 0;
    10099
  • trunk/kernel/syscalls/sys_read.c

    r408 r409  
    2525#include <hal_types.h>
    2626#include <hal_uspace.h>
     27#include <hal_irqmask.h>
    2728#include <hal_special.h>
    2829#include <errno.h>
     
    3536// TODO: concurrent user page(s) munmap need to be handled [AG]
    3637
    37 // instrumentation
     38// TODO : remove these debug variables
    3839extern uint32_t enter_sys_read;
    3940extern uint32_t enter_devfs_move;
     
    6263    uint32_t     nbytes;      // number of bytes actually read
    6364    reg_t        save_sr;     // required to enable IRQs during syscall
    64         uint32_t     tm_start;
    65         uint32_t     tm_end;
    66 
    67         tm_start = hal_get_cycles();
    68 
    69 #if CONFIG_READ_START
     65
     66#if CONFIG_SYSCALL_DEBUG
     67uint32_t     tm_start;
     68uint32_t     tm_end;
     69tm_start = hal_get_cycles();
     70#endif
     71
     72#if CONFIG_READ_DEBUG
    7073enter_sys_read = tm_start;
    7174#endif
     
    159162    hal_fence();
    160163
    161     tm_end = hal_get_cycles();
     164#if CONFIG_SYSCALL_DEBUG
     165tm_end = hal_get_cycles();
     166syscall_dmsg("\n[DBG] %s exit : core[%x,%d] / thread %x in process %x / cycle %d\n"
     167"nbytes = %d / first byte = %c / file_id = %d / cost = %d\n",
     168__FUNCTION__ , local_cxy , this->core->lid , this->trdid , this->process->pid ,
     169tm_start , nbytes , *((char *)(intptr_t)paddr) , file_id , tm_end - tm_start );
     170#endif
    162171
    163172#if CONFIG_READ_DEBUG
    164173exit_sys_read = tm_end;
    165174
    166 printk("\n@@@@@@@@@@@@ timing ro read character %c\n"
     175printk("\n@@@@@@@@@@@@ timing to read character %c\n"
    167176" - enter_sys_read     = %d / delta %d\n"
    168177" - enter_devfs_move   = %d / delta %d\n"
     
    195204exit_sys_read      , exit_sys_read      - exit_devfs_move    );
    196205#endif
    197 
    198 syscall_dmsg("\n[DBG] %s : core[%x,%d] / thread %x in process %x / cycle %d\n"
    199 "nbytes = %d / first byte = %c / file_id = %d / cost = %d\n",
    200 __FUNCTION__ , local_cxy , this->core->lid , this->trdid , this->process->pid ,
    201 tm_start , nbytes , *((char *)(intptr_t)paddr) , file_id , tm_end - tm_start );
    202206 
    203207        return nbytes;
  • trunk/kernel/syscalls/sys_signal.c

    r408 r409  
    3838    this->errno = EINVAL;
    3939    return -1;
    40    
    41         if((sig_id == 0) || (sig_id >= SIG_NR) || (sig_id == SIGKILL) || (sig_id == SIGSTOP))
    42         {
    43         printk("\n[ERROR] in %s : illega signal index = %d\n", __FUNCTION__ , sig_id );
    44                 this->errno = EINVAL;
    45                 return -1;
    46         }
    47 
    48         // register handler in signal manager for the calling process
    49         this->process->sig_mgr.sigactions[sig_id] = handler;
    50 
    51         signal_dmsg("\n[DBG] %s : handler @%x has been registred for signal %d\n",
    52                     __FUNCTION__ , handler , sig_id );
    53 
    54         return 0;
    5540}
    5641
  • trunk/kernel/syscalls/sys_thread_exit.c

    r408 r409  
    11/*
    2  * sys_thread_exit.c - terminates the execution of current thread
     2 * sys_thread_exit.c - terminates the execution of calling thread
    33 *
    4  * Authors       Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *               Alain Greiner (2016,2017)
     4 * Authors   Alain Greiner (2016,2017)
    65 *
    76 * Copyright (c) UPMC Sorbonne Universites
     
    2423
    2524#include <hal_types.h>
    26 #include <hal_irqmask.h>
    2725#include <thread.h>
    2826#include <core.h>
     27#include <vmm.h>
    2928#include <scheduler.h>
    3029#include <printk.h>
    31 
    3230
    3331////////////////////////////////////////
    3432int sys_thread_exit( void * exit_value )
    3533{
    36         thread_t  * this = CURRENT_THREAD;
    37     core_t    * core = this->core;
    38     reg_t       irq_state;
     34    paddr_t      paddr;
     35    error_t          error;
    3936
    40     // register the exit_value pointer in thread descriptor
    41     this->exit_value = exit_value;
     37#if CONFIG_SYSCALL_DEBUG
     38uint32_t     tm_start;
     39uint32_t     tm_end;
     40tm_start = hal_get_cycles();
     41#endif
    4242
    43     // enter the join loop to wait the join if thread is joinable
    44     if( (this->flags & THREAD_FLAG_DETACHED) == 0 )
     43        thread_t  * this    = CURRENT_THREAD;
     44    process_t * process = this->process;
     45
     46    // check all locks released
     47        if( !thread_can_yield() )
     48        {
     49        printk("\n[ERROR] in %s : locks not released / thread %x in process %x\n",
     50        __FUNCTION__, this->trdid, process->pid );
     51        this->errno = EINVAL;
     52        return -1;
     53    }
     54
     55    // register the exit_value pointer in this thread descriptor
     56    this->join_value = exit_value;
     57
     58    if( (this->flags & THREAD_FLAG_DETACHED) == 0 )    // this thread is joinable
    4559    {
    46             while( 1 )
    47             {
    48             // take the lock protecting the flags
    49                 remote_spinlock_lock( XPTR( local_cxy, &this->flags_lock ) );
     60        // check exit_value in user space
     61        error = vmm_v2p_translate( false , exit_value , &paddr );
     62            if( error )
     63        {
     64            printk("\n[ERROR] in %s : illegal pointer = %x / thread %x in process %x\n",
     65            __FUNCTION__ , (intptr_t)exit_value, this->trdid , process->pid );
     66            this->errno = EINVAL;
     67            return -1;
     68        }
    5069
    51             // check the JOIN flag
    52             if( this->flags & THREAD_FLAG_JOIN )       // parent made a join
    53             {
    54                 // unblock the parent thread
    55                 thread_unblock( this->parent , THREAD_BLOCKED_JOIN );
     70        // take the lock protecting the join
     71        remote_spinlock_lock( XPTR( local_cxy, &this->join_lock ) );
    5672
    57                 // release the lock protecting the flags
    58                     remote_spinlock_unlock( XPTR( local_cxy, &this->flags_lock ) );
     73        if( this->flags & THREAD_FLAG_JOIN_DONE )       // parent thread arrived first
     74        {
     75            // unblock the parent thread
     76            thread_unblock( this->join_xp , THREAD_BLOCKED_EXIT );
    5977
    60                 // exit while
    61                 break;
    62             }
    63             else                                       // no join done by parent thread
    64             {
    65                 // set the EXIT flag
    66                 this->flags |= THREAD_FLAG_EXIT;
     78            // reset the JOIN_DONE flag in this thread
     79            this->flags &= ~THREAD_FLAG_JOIN_DONE;
    6780
    68                 // block this thread
    69                 thread_block( this , THREAD_BLOCKED_EXIT );
     81            // release the lock protecting the flags
     82                remote_spinlock_unlock( XPTR( local_cxy, &this->join_lock ) );
     83        }
     84        else                                           // this thread arrived first
     85        {
     86            // block this thread
     87            thread_block( this , THREAD_BLOCKED_JOIN );
    7088
    71                 // release the lock protecting the flags
    72                     remote_spinlock_unlock( XPTR( local_cxy, &this->flags_lock ) );
     89            // release the lock protecting the flags
     90                remote_spinlock_unlock( XPTR( local_cxy, &this->join_lock ) );
    7391
    74                 // deschedule
    75                 sched_yield("waiting parent join");
    76             }     
    77         }
    78         }
     92            // deschedule
     93            sched_yield( "WAITING JOIN" );
     94        }     
     95    }
    7996
    80         // Release FPU if required
    81         hal_disable_irq( &irq_state );
    82         if( core->fpu_owner == this )  core->fpu_owner = NULL;
    83         hal_restore_irq( irq_state );
     97#if CONFIG_SYSCALL_DEBUG
     98tm_end = hal_get_cycles();
     99syscall_dmsg("\n[DBG] %s : core[%x,%d] / thread %x in process %x / cycle %d\n"
     100"thread %x killed / cost = %d\n",
     101__FUNCTION__ , local_cxy , this->core->lid , this->trdid , this->process->pid , tm_start ,
     102this->trdid , (uint32_t)(tm_end - tm_start) );
     103#endif
    84104
    85         // suicide
    86     thread_kill( this );
    87         return 0;       
     105    // suicide using a rpc because  a thread cannot kill itself
     106    rpc_thread_kill_client( local_cxy , this );
    88107
    89 }  // end sys_thread_exit()
     108    return 0;   // never executed but required by compiler
     109
     110}  // end sys_thread exit
  • trunk/kernel/syscalls/sys_thread_join.c

    r408 r409  
    4040    cxy_t         target_cxy;
    4141    ltid_t        target_ltid;
    42     uint32_t      flags;        // target thread flags
    43     intptr_t      value;        // value returned by target thread
    44     paddr_t       paddr;        // required for vmm_v2p_translate()
     42        uint32_t      target_blocked;   // target thread blocked bit-vector
     43    uint32_t      target_flags;     // target thread flags bit-bector
     44    paddr_t       paddr;            // required for vmm_v2p_translate()
    4545
    46         thread_t  * this    = CURRENT_THREAD;
    47     process_t * process = this->process;
     46        thread_t    * this    = CURRENT_THREAD;
     47    process_t   * process = this->process;
    4848
    4949    // get target thread ltid and cxy
     
    8989
    9090    // check target thread joinable
    91     flags = hal_remote_lw( XPTR( target_cxy , &target_ptr->flags ) );
    92     if( flags & THREAD_FLAG_DETACHED )
     91    target_flags = hal_remote_lw( XPTR( target_cxy , &target_ptr->flags ) );
     92    if( target_flags & THREAD_FLAG_DETACHED )
    9393    {
    9494        printk("\n[ERROR] in %s : target thread not joinable\n", __FUNCTION__ );
     
    100100    if( target_ptr->signature != THREAD_SIGNATURE )
    101101    {
    102         printk("\n[PANIC] in %s : kernel stack overflow\n", __FUNCTION__ );
    103         hal_core_sleep();
     102        panic("\n[PANIC] in %s : kernel stack overflow\n", __FUNCTION__ );
    104103    }
    105104
    106     // wait target thread exit
    107     while( 1 )
     105    // get the lock protecting the join in target thread
     106    remote_spinlock_lock( XPTR( target_cxy , &target_ptr->join_lock ) );
     107
     108    // get the blocked bit_vector from the target thread
     109    target_blocked = hal_remote_lw( XPTR( target_cxy , &target_ptr->blocked ) );
     110
     111    if( target_blocked & THREAD_BLOCKED_JOIN )    // target thread arrived first
    108112    {
    109         // take the target thread lock protecting flags     
    110         remote_spinlock_lock( XPTR( target_cxy , &target_ptr->flags_lock ) );
     113        // unblock the target thread
     114        thread_unblock( target_xp , THREAD_BLOCKED_JOIN );
    111115
    112         // get the remote thread flags
    113         flags = hal_remote_lw( XPTR( target_cxy , &target_ptr->flags ) );
     116        // release the lock protecting flags     
     117        remote_spinlock_unlock( XPTR( target_cxy , &target_ptr->join_lock ) );
    114118
    115         // check the EXIT flag
    116         if( flags & THREAD_FLAG_EXIT )   // target made an exit
    117         {
    118             // unblock the target thread
    119             thread_unblock( target_xp , THREAD_BLOCKED_EXIT );
     119        // get the exit value from target thread
     120        *exit_value = hal_remote_lpt( XPTR( target_cxy , &target_ptr->join_value ) );
     121    }
     122    else                                          // this thread arrived first
     123    {
     124        // register this thread extended pointer in target thread
     125        hal_remote_swd( XPTR( target_cxy , &target_ptr->join_xp ) ,
     126                              XPTR( local_cxy , this ) );
    120127
    121             // release the target thread lock protecting flags     
    122             remote_spinlock_unlock( XPTR( target_cxy , &target_ptr->flags_lock ) );
     128        // set the JOIN_DONE flag in target thread
     129        hal_remote_atomic_or( XPTR( target_cxy , &target_ptr->flags ) ,
     130                              THREAD_FLAG_JOIN_DONE );
    123131
    124             // exit while
    125             break;
    126         }
    127         else                             // no exit done by target thread
    128         {
    129             // set the JOIN flag in target thread
    130             hal_remote_atomic_or( XPTR( target_xp , &target_ptr->flags ) ,
    131                                   THREAD_BLOCKED_JOIN );
     132        // block this thread on BLOCKED_EXIT
     133        thread_block( this , THREAD_BLOCKED_EXIT );
    132134
    133             // block this thread
    134             thread_block( this , THREAD_BLOCKED_JOIN );
     135        // release the lock protecting flags     
     136        remote_spinlock_unlock( XPTR( target_cxy , &target_ptr->join_lock ) );
    135137
    136             // release the target thread lock protecting flags
    137                 remote_spinlock_unlock( XPTR( target_cxy , &target_ptr->flags_lock ) );
    138 
    139             // deschedule
    140             sched_yield("waiting child exit");
    141         }
     138        // deschedule
     139        sched_yield( "WAITING_EXIT" );
     140   
     141        // get the exit value from target thread when resume
     142        *exit_value = hal_remote_lpt( XPTR( target_cxy , &target_ptr->join_value ) );
    142143    }
    143144
    144     // return exit_value from target thread descriptor
    145     value = (intptr_t)hal_remote_lpt( XPTR( target_cxy , &target_ptr->exit_value ) );
    146     *exit_value = (void *)value;
    147145    return 0;
    148146
  • trunk/kernel/syscalls/sys_write.c

    r408 r409  
    2525#include <hal_types.h>
    2626#include <hal_uspace.h>
     27#include <hal_irqmask.h>
    2728#include <hal_special.h>
    2829#include <errno.h>
     
    4445    uint32_t     nbytes;          // number of bytes actually written
    4546    reg_t        save_sr;         // required to enable IRQs during syscall
    46         uint32_t     tm_start;
    47         uint32_t     tm_end;
    4847
    49         tm_start = hal_get_cycles();
     48#if CONFIG_SYSCALL_DEBUG
     49uint32_t     tm_start;
     50uint32_t     tm_end;
     51tm_start = hal_get_cycles();
     52#endif
    5053
    5154        thread_t   * this = CURRENT_THREAD;
     
    136139    hal_fence();
    137140
    138     tm_end = hal_get_cycles();
    139 
     141#if CONFIG_SYSCALL_DEBUG
     142tm_end = hal_get_cycles();
    140143syscall_dmsg("\n[DBG] %s : core[%x,%d] / thread %x in process %x / cycle %d\n"
    141144"nbytes = %d / first byte = %c / file_id = %d / cost = %d\n",
    142145__FUNCTION__ , local_cxy , this->core->lid , this->trdid , this->process->pid ,
    143146tm_start , nbytes , *((char *)(intptr_t)paddr) , file_id , tm_end - tm_start );
     147#endif
    144148 
    145149        return nbytes;
  • trunk/kernel/syscalls/syscalls.h

    r408 r409  
    4040 * and makes the exit_value pointer available to any successful pthread_join() with the
    4141 * terminating thread.
    42  ******************************************************************************************
    43  * @ exit_vallue  : pointer to be returned to parent thread if thread is attached.
    44  * @ return 0 if success / return -1 if failure.
     42 * It actually set the THREAD_SIG_EXIT signal, set the THREAD_BLOCKED_GLOBAL bit in the
     43 * thread descriptor and deschedule.
     44 * The thread will be detached from its process, and the memory allocated to the thread
     45 * descriptor will be released later by the scheduler.
     46 ******************************************************************************************
     47 * @ exit_vallue  : pointer to be returned to joining thread if thread is attached.
     48 * @ return 0 if success / return -1 if all locks not released or illegal argument.
    4549 *****************************************************************************************/
    4650int sys_thread_exit( void * exit_value );
     
    8791 * [4] This function detach a joinable thread.
    8892 ******************************************************************************************
    89  * @ trdid   : thread identifier.i
     93 * @ trdid   : thread identifier.
    9094 * @ return 0 if success / return -1 if failure.
    9195 *****************************************************************************************/
     
    9397
    9498/******************************************************************************************
    95  * [5] This slot is not used.
    96  *****************************************************************************************/
     99 * [5] This function requests a target thread identified by its <trdid> argument
     100 * to be cancelled. Depending on killer thread and target thread location, it calls
     101 * the thread_kil() function or the rpc_thread_kill_client() function to do the work.
     102 * It actually set the THREAD_SIG_KILL signal, set the THREAD_BLOCKED_GLOBAL bit in the
     103 * target thread descriptor and return.
     104 * The thread will be detached from its process, and the memory allocated to the thread
     105 * descriptor will be released later by the scheduler.
     106 ******************************************************************************************
     107 * @ trdid   : thread identifier.
     108 * @ return 0 if success / return -1 if illegal argument.
     109 *****************************************************************************************/
     110int sys_thread_cancel( trdid_t  trdid );
    97111
    98112/******************************************************************************************
     
    158172 * @ status   : terminaison status (not used in present implementation).
    159173 *****************************************************************************************/
    160 void sys_exit( uint32_t status );
     174int sys_exit( uint32_t status );
    161175
    162176/******************************************************************************************
Note: See TracChangeset for help on using the changeset viewer.