Changeset 407 for trunk/kernel/kern


Ignore:
Timestamp:
Nov 7, 2017, 3:08:12 PM (7 years ago)
Author:
alain
Message:

First implementation of fork/exec.

Location:
trunk/kernel/kern
Files:
2 deleted
19 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/chdev.c

    r317 r407  
    2525#include <hal_types.h>
    2626#include <hal_special.h>
     27#include <hal_irqmask.h>
    2728#include <printk.h>
    2829#include <boot_info.h>
    2930#include <xlist.h>
    3031#include <kmem.h>
     32#include <scheduler.h>
    3133#include <thread.h>
    3234#include <rpc.h>
     
    3739extern chdev_directory_t    chdev_dir;   // allocated in kernel_init.c
    3840
     41#if CONFIG_READ_DEBUG
     42extern uint32_t enter_chdev_cmd;
     43extern uint32_t exit_chdev_cmd;
     44extern uint32_t enter_chdev_server;
     45extern uint32_t exit_chdev_server;
     46#endif
    3947
    4048////////////////////////////////////////////
     
    105113}
    106114
    107 ////////////////////////////////////////////////
    108 void chdev_register_command( xptr_t     chdev_xp,
    109                              thread_t * thread )
    110 {
    111     thread_t * thread_ptr = CURRENT_THREAD;
     115//////////////////////////////////////////////////
     116void chdev_register_command( xptr_t     chdev_xp )
     117{
     118    thread_t * server_ptr;    // local pointer on server thread associated to chdev
     119    core_t   * core_ptr;      // local pointer on core running the server thread
     120    uint32_t   lid;           // core running the server thread local index
     121    xptr_t     lock_xp;       // extended pointer on lock protecting the chdev queue
     122    uint32_t   modified;      // non zero if the server thread state was modified
     123    uint32_t   save_sr;       // for critical section
     124
     125    thread_t * this = CURRENT_THREAD;
     126
     127chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) enter / cycle %d\n",
     128__FUNCTION__, local_cxy, this->core->lid, thread_type_str(this->type) , hal_time_stamp() );
    112129
    113130    // get device descriptor cluster and local pointer
     
    116133
    117134    // build extended pointers on client thread xlist and device root
    118     xptr_t  xp_list = XPTR( local_cxy , &thread_ptr->wait_list );
    119     xptr_t  xp_root = XPTR( chdev_cxy , &chdev_ptr->wait_root );
    120 
    121     // get lock protecting queue
    122     remote_spinlock_lock( XPTR( chdev_cxy , &chdev_ptr->wait_lock ) );
     135    xptr_t  list_xp = XPTR( local_cxy , &this->wait_list );
     136    xptr_t  root_xp = XPTR( chdev_cxy , &chdev_ptr->wait_root );
     137
     138    // get local pointer on server thread
     139    server_ptr = (thread_t *)hal_remote_lpt( XPTR( chdev_cxy , &chdev_ptr->server) );
     140
     141chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) / server_cxy %x / server_ptr %x / server_type %\n",
     142__FUNCTION__, local_cxy, this->core->lid, server_cxy, server_ptr,
     143thread_type_str( hal_remote_lw( XPTR( server_cxy , &server_ptr->type) ) ) );
     144
     145    // build extended pointer on chdev lock protecting queue
     146    lock_xp = XPTR( chdev_cxy , &chdev_ptr->wait_lock );
     147
     148    // get local pointer on core running the server thread
     149    core_ptr = (core_t *)hal_remote_lpt( XPTR( chdev_cxy , &server_ptr->core ) );
     150
     151    // get core local index
     152    lid = hal_remote_lw( XPTR( chdev_cxy , &core_ptr->lid ) );
     153
     154    // enter critical section
     155    hal_disable_irq( &save_sr );
    123156
    124157    // register client thread in waiting queue
    125     xlist_add_last( xp_root , xp_list );
     158    remote_spinlock_lock( lock_xp );
     159    xlist_add_last( root_xp , list_xp );
     160    remote_spinlock_unlock( lock_xp );
    126161
    127162    // unblock server thread
    128     thread_unblock( XPTR( chdev_cxy , &chdev_ptr->server ) , THREAD_BLOCKED_DEV_QUEUE );
    129 
    130     // release lock
    131     remote_spinlock_unlock( XPTR( chdev_cxy , &chdev_ptr->wait_lock ) );
    132 
    133     // client thread goes to blocked state and deschedule
    134     thread_block( thread_ptr , THREAD_BLOCKED_IO );
    135     sched_yield( NULL );
     163    modified = thread_unblock( XPTR( chdev_cxy , server_ptr ), THREAD_BLOCKED_DEV_QUEUE );
     164
     165    // send IPI to core running the server thread
     166    if( modified ) dev_pic_send_ipi( chdev_cxy , lid );
     167   
     168    // block client thread
     169    assert( thread_can_yield( this ) , __FUNCTION__ , "illegal sched_yield\n" );
     170
     171chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) deschedules / cycle %d\n",
     172__FUNCTION__, local_cxy, this->core->lid, thread_type_str(this->type) , hal_time_stamp() );
     173
     174    thread_block( CURRENT_THREAD , THREAD_BLOCKED_IO );
     175    sched_yield();
     176
     177chdev_dmsg("\n[DBG] %s : core[%x,%d] (thread %s) resumes / cycle %d\n",
     178__FUNCTION__, local_cxy, this->core->lid, thread_type_str(this->type) , hal_time_stamp() );
     179
     180    // exit critical section
     181    hal_restore_irq( save_sr );
    136182
    137183}  // end chdev_register_command()
     
    143189    cxy_t           client_cxy;   // cluster of client thread
    144190    thread_t      * client_ptr;   // local pointer on client thread
    145     thread_t      * server;       // local pointer on this thread
     191    thread_t      * server;       // local pointer on server thread
    146192    xptr_t          root_xp;      // extended pointer on device waiting queue root
     193    xptr_t          lock_xp;      // extended pointer on lock ptotecting chdev queue
    147194
    148195    server = CURRENT_THREAD;
    149196
     197chdev_dmsg("\n[DBG] %s : enter / server = %x / chdev = %x / cycle %d\n",
     198__FUNCTION__ , server , chdev , hal_time_stamp() );
     199
    150200    root_xp = XPTR( local_cxy , &chdev->wait_root );
    151 
    152     // take the lock protecting the chdev waiting queue, before entering the
    153         // infinite loop handling commands registered in this queue.
    154     // In the loop, the lock is released during the handling of one command.
    155 
    156     remote_spinlock_lock( XPTR( local_cxy , &chdev->wait_lock ) );
    157 
     201    lock_xp = XPTR( local_cxy , &chdev->wait_lock );
     202
     203        // This infinite loop is executed by the DEV thread
     204    // to handle commands registered in the chdev queue.
    158205    while( 1 )
    159206    {
     207        // get the lock protecting the waiting queue
     208        remote_spinlock_lock( lock_xp );
     209
    160210        // check waiting queue state
    161         if( xlist_is_empty( root_xp ) ) // block and deschedule if waiting queue empty
     211        if( xlist_is_empty( root_xp ) ) // waiting queue empty
    162212        {
    163213            // release lock
    164             remote_spinlock_unlock( XPTR( local_cxy , &chdev->wait_lock ) );
     214            remote_spinlock_unlock( lock_xp );
     215
     216chdev_dmsg("\n[DBG] %s : thread %x deschedule /cycle %d\n",
     217__FUNCTION__ , server , hal_time_stamp() );
    165218
    166219            // block and deschedule
    167220            thread_block( server , THREAD_BLOCKED_DEV_QUEUE );
    168             sched_yield( NULL );
     221            sched_yield();
     222
     223chdev_dmsg("\n[DBG] %s : thread %x resume /cycle %d\n",
     224__FUNCTION__ , server , hal_time_stamp() );
     225
    169226        }
    170         else
     227        else                            // waiting queue not empty
    171228        {
     229
     230#if CONFIG_READ_DEBUG
     231enter_chdev_server = hal_time_stamp();
     232#endif
    172233            // release lock
    173             remote_spinlock_unlock( XPTR( local_cxy , &chdev->wait_lock ) );
     234            remote_spinlock_unlock( lock_xp );
     235
     236            // get extended pointer on first client thread
     237            client_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list );
     238
     239            // get client thread cluster, local pointer, and identifier
     240            client_cxy = GET_CXY( client_xp );
     241            client_ptr = (thread_t *)GET_PTR( client_xp );
     242
     243            // call driver command function to execute I/O operation
     244            chdev->cmd( client_xp );
     245       
     246            // remove the client thread from waiting queue
     247            remote_spinlock_lock( lock_xp );
     248            xlist_unlink( XPTR( client_cxy , &client_ptr->wait_list ) );
     249            remote_spinlock_unlock( lock_xp );
     250
     251chdev_dmsg("\n[DBG] %s : thread %x complete operation for client %x / cycle %d\n",
     252__FUNCTION__ , server , client_ptr , hal_time_stamp() );
     253
     254#if CONFIG_READ_DEBUG
     255exit_chdev_server = hal_time_stamp();
     256#endif
     257
    174258        }
    175 
    176         // get extended pointer on first client thread
    177         client_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list );
    178 
    179         // call driver command function to execute I/O operation
    180         chdev->cmd( client_xp );
    181        
    182         // get client thread cluster and local pointer
    183         client_cxy = GET_CXY( client_xp );
    184         client_ptr = (thread_t *)GET_PTR( client_xp );
    185 
    186         // take the lock, and remove the client thread from waiting queue
    187         remote_spinlock_lock( XPTR( local_cxy , &chdev->wait_lock ) );
    188         xlist_unlink( XPTR( client_cxy , &client_ptr->wait_list ) );
    189 
    190259    }  // end while
    191 
    192260}  // end chdev_sequencial_server()
    193261
     
    197265    cxy_t     iob_cxy  = GET_CXY( chdev_dir.iob );
    198266    chdev_t * iob_ptr  = (chdev_t *)GET_PTR( chdev_dir.iob );
    199     xptr_t    iob_base = hal_remote_lwd( XPTR( iob_cxy , &iob_ptr->base ) );
     267    uint32_t  iob_base = (uint32_t)hal_remote_lwd( XPTR( iob_cxy , &iob_ptr->base ) );
    200268
    201269    cxy_t     pic_cxy  = GET_CXY( chdev_dir.pic );
    202270    chdev_t * pic_ptr  = (chdev_t *)GET_PTR( chdev_dir.pic );
    203     xptr_t    pic_base = hal_remote_lwd( XPTR( pic_cxy , &pic_ptr->base ) );
    204 
    205     cxy_t     txt0_cxy  = GET_CXY( chdev_dir.txt[0] );
    206     chdev_t * txt0_ptr  = (chdev_t *)GET_PTR( chdev_dir.txt[0] );
    207     xptr_t    txt0_base = hal_remote_lwd( XPTR( txt0_cxy , &txt0_ptr->base ) );
    208 
    209     cxy_t     txt1_cxy  = GET_CXY( chdev_dir.txt[1] );
    210     chdev_t * txt1_ptr  = (chdev_t *)GET_PTR( chdev_dir.txt[1] );
    211     xptr_t    txt1_base = hal_remote_lwd( XPTR( txt1_cxy , &txt1_ptr->base ) );
    212 
    213     cxy_t     txt2_cxy  = GET_CXY( chdev_dir.txt[2] );
    214     chdev_t * txt2_ptr  = (chdev_t *)GET_PTR( chdev_dir.txt[2] );
    215     xptr_t    txt2_base = hal_remote_lwd( XPTR( txt2_cxy , &txt2_ptr->base ) );
     271    uint32_t  pic_base = (uint32_t)hal_remote_lwd( XPTR( pic_cxy , &pic_ptr->base ) );
     272
     273    cxy_t     txt0_tx_cxy  = GET_CXY( chdev_dir.txt_tx[0] );
     274    chdev_t * txt0_tx_ptr  = (chdev_t *)GET_PTR( chdev_dir.txt_tx[0] );
     275    uint32_t  txt0_tx_base = (uint32_t)hal_remote_lwd( XPTR( txt0_tx_cxy , &txt0_tx_ptr->base ) );
     276
     277    cxy_t     txt0_rx_cxy  = GET_CXY( chdev_dir.txt_rx[0] );
     278    chdev_t * txt0_rx_ptr  = (chdev_t *)GET_PTR( chdev_dir.txt_rx[0] );
     279    uint32_t  txt0_rx_base = (uint32_t)hal_remote_lwd( XPTR( txt0_rx_cxy , &txt0_rx_ptr->base ) );
     280
     281    cxy_t     txt1_tx_cxy  = GET_CXY( chdev_dir.txt_tx[1] );
     282    chdev_t * txt1_tx_ptr  = (chdev_t *)GET_PTR( chdev_dir.txt_tx[1] );
     283    uint32_t  txt1_tx_base = (uint32_t)hal_remote_lwd( XPTR( txt1_tx_cxy , &txt1_tx_ptr->base ) );
     284
     285    cxy_t     txt1_rx_cxy  = GET_CXY( chdev_dir.txt_rx[1] );
     286    chdev_t * txt1_rx_ptr  = (chdev_t *)GET_PTR( chdev_dir.txt_rx[1] );
     287    uint32_t  txt1_rx_base = (uint32_t)hal_remote_lwd( XPTR( txt1_rx_cxy , &txt1_rx_ptr->base ) );
     288
     289    cxy_t     txt2_tx_cxy  = GET_CXY( chdev_dir.txt_tx[2] );
     290    chdev_t * txt2_tx_ptr  = (chdev_t *)GET_PTR( chdev_dir.txt_tx[2] );
     291    uint32_t  txt2_tx_base = (uint32_t)hal_remote_lwd( XPTR( txt2_tx_cxy , &txt2_tx_ptr->base ) );
     292
     293    cxy_t     txt2_rx_cxy  = GET_CXY( chdev_dir.txt_rx[2] );
     294    chdev_t * txt2_rx_ptr  = (chdev_t *)GET_PTR( chdev_dir.txt_rx[2] );
     295    uint32_t  txt2_rx_base = (uint32_t)hal_remote_lwd( XPTR( txt2_rx_cxy , &txt2_rx_ptr->base ) );
    216296
    217297    cxy_t     ioc_cxy  = GET_CXY( chdev_dir.ioc[0] );
    218298    chdev_t * ioc_ptr  = (chdev_t *)GET_PTR( chdev_dir.ioc[0] );
    219     xptr_t    ioc_base = hal_remote_lwd( XPTR( ioc_cxy , &ioc_ptr->base ) );
     299    uint32_t  ioc_base = (uint32_t)hal_remote_lwd( XPTR( ioc_cxy , &ioc_ptr->base ) );
    220300
    221301    cxy_t     fbf_cxy  = GET_CXY( chdev_dir.fbf[0] );
    222302    chdev_t * fbf_ptr  = (chdev_t *)GET_PTR( chdev_dir.fbf[0] );
    223     xptr_t    fbf_base = hal_remote_lwd( XPTR( fbf_cxy , &fbf_ptr->base ) );
    224 
    225     cxy_t     nic_rx_cxy  = GET_CXY( chdev_dir.nic_rx[0] );
    226     chdev_t * nic_rx_ptr  = (chdev_t *)GET_PTR( chdev_dir.nic_rx[0] );
    227     xptr_t    nic_rx_base = hal_remote_lwd( XPTR( nic_rx_cxy , &nic_rx_ptr->base ) );
    228 
    229     cxy_t     nic_tx_cxy  = GET_CXY( chdev_dir.nic_tx[0] );
    230     chdev_t * nic_tx_ptr  = (chdev_t *)GET_PTR( chdev_dir.nic_tx[0] );
    231     xptr_t    nic_tx_base = hal_remote_lwd( XPTR( nic_tx_cxy , &nic_tx_ptr->base ) );
     303    uint32_t  fbf_base = (uint32_t)hal_remote_lwd( XPTR( fbf_cxy , &fbf_ptr->base ) );
     304
     305    cxy_t     nic0_rx_cxy  = GET_CXY( chdev_dir.nic_rx[0] );
     306    chdev_t * nic0_rx_ptr  = (chdev_t *)GET_PTR( chdev_dir.nic_rx[0] );
     307    uint32_t  nic0_rx_base = (uint32_t)hal_remote_lwd( XPTR( nic0_rx_cxy , &nic0_rx_ptr->base ) );
     308
     309    cxy_t     nic0_tx_cxy  = GET_CXY( chdev_dir.nic_tx[0] );
     310    chdev_t * nic0_tx_ptr  = (chdev_t *)GET_PTR( chdev_dir.nic_tx[0] );
     311    uint32_t  nic0_tx_base = (uint32_t)hal_remote_lwd( XPTR( nic0_tx_cxy , &nic0_tx_ptr->base ) );
    232312
    233313    printk("\n***** external chdev directory in cluster %x\n"
    234            "  - iob       = %l / base = %l\n"
    235            "  - pic       = %l / base = %l\n"
    236            "  - txt[0]    = %l / base = %l\n"
    237            "  - txt[1]    = %l / base = %l\n"
    238            "  - txt[2]    = %l / base = %l\n"
    239            "  - ioc[0]    = %l / base = %l\n"
    240            "  - fbf[0]    = %l / base = %l\n"
    241            "  - nic_rx[0] = %l / base = %l\n"
    242            "  - nic_tx[0] = %l / base = %l\n",
     314           "  - iob       : cxy = %X / ptr = %X / base = %X\n"
     315           "  - pic       : cxy = %X / ptr = %X / base = %X\n"
     316           "  - ioc       : cxy = %X / ptr = %X / base = %X\n"
     317           "  - fbf       : cxy = %X / ptr = %X / base = %X\n"
     318           "  - txt_rx[0] : cxy = %X / ptr = %X / base = %X\n"
     319           "  - txt_tx[0] : cxy = %X / ptr = %X / base = %X\n"
     320           "  - txt_rx[1] : cxy = %X / ptr = %X / base = %X\n"
     321           "  - txt_tx[1] : cxy = %X / ptr = %X / base = %X\n"
     322           "  - txt_rx[2] : cxy = %X / ptr = %X / base = %X\n"
     323           "  - txt_tx[2] : cxy = %X / ptr = %X / base = %X\n"
     324           "  - nic_rx[0] : cxy = %X / ptr = %X / base = %X\n"
     325           "  - nic_tx[0] : cxy = %X / ptr = %X / base = %X\n",
    243326           local_cxy,
    244            chdev_dir.iob, iob_base,
    245            chdev_dir.pic, pic_base,
    246            chdev_dir.txt[0], txt0_base,
    247            chdev_dir.txt[1], txt1_base,
    248            chdev_dir.txt[2], txt2_base,
    249            chdev_dir.ioc[0], ioc_base,
    250            chdev_dir.fbf[0], fbf_base,
    251            chdev_dir.nic_rx[0], nic_rx_base,
    252            chdev_dir.nic_tx[0], nic_tx_base );
     327           iob_cxy , iob_ptr , iob_base ,
     328           pic_cxy , pic_ptr , pic_base ,
     329           ioc_cxy , ioc_ptr , ioc_base ,
     330           fbf_cxy , fbf_ptr , fbf_base ,
     331           txt0_rx_cxy , txt0_rx_ptr , txt0_rx_base ,
     332           txt0_tx_cxy , txt0_tx_ptr , txt0_tx_base ,
     333           txt1_rx_cxy , txt1_rx_ptr , txt1_rx_base ,
     334           txt1_tx_cxy , txt1_tx_ptr , txt1_tx_base ,
     335           txt2_rx_cxy , txt2_rx_ptr , txt2_rx_base ,
     336           txt2_tx_cxy , txt2_tx_ptr , txt2_tx_base ,
     337           nic0_rx_cxy , nic0_rx_ptr , nic0_rx_base ,
     338           nic0_tx_cxy , nic0_tx_ptr , nic0_tx_base );
    253339
    254340}  // end chdev_dir_display()
  • trunk/kernel/kern/chdev.h

    r346 r407  
    6666
    6767/******************************************************************************************
    68  * This define the generic prototypes for the two functions that must be defined
    69  * by all drivers implementing a generic device:
    70  * - "cmd"     : start an I/O operation.
     68 * This define the generic prototypes for the three functions that must be defined
     69 * by the drivers implementing a generic device:
     70 * - "cmd"     : start a blocking I/O operation.
    7171 * - "isr"     : complete an I/O operation.
    72  * The "cmd" and "isr" are registered in the generic chdev descriptor at kernel init,
    73  * and are called to start and complete an I/O operation. 
     72 * - "aux"     : not for all drivers (implement special functions)
     73 * The "cmd", "isr", and "aux" driver functions are registered in the generic chdev
     74 * descriptor at kernel init, and are called to start and complete an I/O operation. 
    7475*****************************************************************************************/
    7576
     
    7778typedef void (dev_cmd_t) ( xptr_t thread ); 
    7879typedef void (dev_isr_t) ( struct chdev_s * dev ); 
     80typedef void (dev_aux_t) ( void * args ); 
    7981
    8082/******************************************************************************************
     
    121123        uint32_t             impl;        /*! peripheral inplementation subtype              */
    122124    uint32_t             channel;     /*! channel index                                  */
    123     bool_t               is_rx;       /*! relevant for NIC peripheral channels only      */
     125    bool_t               is_rx;       /*! relevant for NIC and TXT peripherals           */
    124126        xptr_t               base;        /*! extended pointer on channel device segment     */
    125127    char                 name[16];    /*! name (required by DEVFS)                       */
    126128
    127     dev_cmd_t          * cmd;         /*! local pointer on driver command function       */
    128     dev_isr_t          * isr;         /*! local pointer on driver ISR function           */ 
     129    dev_cmd_t          * cmd;         /*! local pointer on driver CMD function           */
     130    dev_isr_t          * isr;         /*! local pointer on driver ISR function           */
     131    dev_aux_t          * aux;         /*! local pointer on driver AUX function           */
     132
    129133    struct thread_s    * server;      /*! local pointer on associated server thread      */
    130134
     
    165169    xptr_t   pic;                                // external / single channel / shared
    166170
    167     xptr_t   txt[CONFIG_MAX_TXT_CHANNELS];       // external / multi-channels / shared
    168171    xptr_t   ioc[CONFIG_MAX_IOC_CHANNELS];       // external / multi-channels / shared
    169172    xptr_t   fbf[CONFIG_MAX_FBF_CHANNELS];       // external / multi-channels / shared
     173    xptr_t   txt_rx[CONFIG_MAX_TXT_CHANNELS];    // external / multi-channels / shared
     174    xptr_t   txt_tx[CONFIG_MAX_TXT_CHANNELS];    // external / multi-channels / shared
    170175    xptr_t   nic_rx[CONFIG_MAX_NIC_CHANNELS];    // external / multi-channels / shared
    171176    xptr_t   nic_tx[CONFIG_MAX_NIC_CHANNELS];    // external / multi-channels / shared
     
    211216
    212217/******************************************************************************************
    213  * This function registers a local client thread in the waiting queue of a remote
     218 * This function registers the calling thread in the waiting queue of a remote
    214219 * chdev descriptor, activates (i.e. unblock) the server thread associated to chdev,
    215220 * and blocks itself on the THREAD_BLOCKED_IO condition.
    216221 ******************************************************************************************
    217222 * @ chdev_xp  : extended pointer on remote chdev descriptor.
    218  * @ thread    : local pointer on client thread.
    219  *****************************************************************************************/
    220 void chdev_register_command( xptr_t            chdev_xp,
    221                              struct thread_s * thread );
     223 *****************************************************************************************/
     224void chdev_register_command( xptr_t chdev_xp );
    222225
    223226/******************************************************************************************
  • trunk/kernel/kern/cluster.c

    r406 r407  
    2929#include <hal_special.h>
    3030#include <hal_ppm.h>
     31#include <remote_fifo.h>
    3132#include <printk.h>
    3233#include <errno.h>
     
    7778    // initialize cluster local parameters
    7879        cluster->cores_nr        = info->cores_nr;
    79     cluster->cores_in_kernel = 0;
    8080
    8181    // initialize the lock protecting the embedded kcm allocator
    8282        spinlock_init( &cluster->kcm_lock );
    8383
    84     cluster_dmsg("\n[DMSG] %s for cluster %x enters\n",
    85                  __FUNCTION__ , local_cxy );
     84cluster_dmsg("\n[DBG] %s for cluster %x enters\n",
     85__FUNCTION__ , local_cxy );
    8686
    8787    // initialises DQDT
     
    102102    }
    103103
    104     cluster_dmsg("\n[DMSG] %s : PPM initialized in cluster %x at cycle %d\n",
    105                  __FUNCTION__ , local_cxy , hal_get_cycles() );
     104cluster_dmsg("\n[DBG] %s : PPM initialized in cluster %x at cycle %d\n",
     105__FUNCTION__ , local_cxy , hal_get_cycles() );
    106106
    107107    // initialises embedded KHM
    108108        khm_init( &cluster->khm );
    109109
    110     cluster_dmsg("\n[DMSG] %s : KHM initialized in cluster %x at cycle %d\n",
     110    cluster_dmsg("\n[DBG] %s : KHM initialized in cluster %x at cycle %d\n",
    111111                 __FUNCTION__ , local_cxy , hal_get_cycles() );
    112112
     
    114114        kcm_init( &cluster->kcm , KMEM_KCM );
    115115
    116     cluster_dmsg("\n[DMSG] %s : KCM initialized in cluster %x at cycle %d\n",
     116    cluster_dmsg("\n[DBG] %s : KCM initialized in cluster %x at cycle %d\n",
    117117                 __FUNCTION__ , local_cxy , hal_get_cycles() );
    118118
     
    125125        }
    126126
    127     cluster_dmsg("\n[DMSG] %s : cores initialized in cluster %x at cycle %d\n",
    128                  __FUNCTION__ , local_cxy , hal_get_cycles() );
     127cluster_dmsg("\n[DBG] %s : cores initialized in cluster %x at cycle %d\n",
     128__FUNCTION__ , local_cxy , hal_get_cycles() );
    129129
    130130    // initialises RPC fifo
    131         rpc_fifo_init( &cluster->rpc_fifo );
     131        local_fifo_init( &cluster->rpc_fifo );
    132132    cluster->rpc_threads = 0;
    133133
    134     cluster_dmsg("\n[DMSG] %s : RPC fifo inialized in cluster %x at cycle %d\n",
    135                  __FUNCTION__ , local_cxy , hal_get_cycles() );
     134cluster_dmsg("\n[DBG] %s : RPC fifo inialized in cluster %x at cycle %d\n",
     135__FUNCTION__ , local_cxy , hal_get_cycles() );
    136136
    137137    // initialise pref_tbl[] in process manager
     
    157157    }
    158158
    159     cluster_dmsg("\n[DMSG] %s Process Manager initialized in cluster %x at cycle %d\n",
    160                  __FUNCTION__ , local_cxy , hal_get_cycles() );
     159cluster_dmsg("\n[DBG] %s Process Manager initialized in cluster %x at cycle %d\n",
     160__FUNCTION__ , local_cxy , hal_get_cycles() );
    161161
    162162    hal_fence();
     
    184184//  Cores related functions
    185185////////////////////////////////////////////////////////////////////////////////////
    186 
    187 ////////////////////////////////
    188 void cluster_core_kernel_enter()
    189 {
    190     cluster_t * cluster = LOCAL_CLUSTER;
    191         hal_atomic_add( &cluster->cores_in_kernel , 1 );
    192 }
    193 
    194 ///////////////////////////////
    195 void cluster_core_kernel_exit()
    196 {
    197     cluster_t * cluster = LOCAL_CLUSTER;
    198         hal_atomic_add( &cluster->cores_in_kernel , -1 );
    199 }
    200186
    201187/////////////////////////////////
     
    353339void cluster_process_local_link( process_t * process )
    354340{
     341    uint32_t irq_state;
    355342    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
    356343
    357344    // get lock protecting the process manager local list
    358     remote_spinlock_lock( XPTR( local_cxy , &pm->local_lock ) );
     345    remote_spinlock_lock_busy( XPTR( local_cxy , &pm->local_lock ) , & irq_state );
    359346
    360347    xlist_add_first( XPTR( local_cxy , &pm->local_root ),
     
    363350
    364351    // release lock protecting the process manager local list
    365     remote_spinlock_unlock( XPTR( local_cxy , &pm->local_lock ) );
     352    remote_spinlock_unlock_busy( XPTR( local_cxy , &pm->local_lock ) , irq_state );
    366353}
    367354
     
    369356void cluster_process_local_unlink( process_t * process )
    370357{
     358    uint32_t irq_state;
    371359    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
    372360
    373361    // get lock protecting the process manager local list
    374     remote_spinlock_lock( XPTR( local_cxy , &pm->local_lock ) );
     362    remote_spinlock_lock_busy( XPTR( local_cxy , &pm->local_lock ) , &irq_state );
    375363
    376364    xlist_unlink( XPTR( local_cxy , &process->local_list ) );
     
    378366
    379367    // release lock protecting the process manager local list
    380     remote_spinlock_unlock( XPTR( local_cxy , &pm->local_lock ) );
     368    remote_spinlock_unlock_busy( XPTR( local_cxy , &pm->local_lock ) , irq_state );
    381369}
    382370
     
    384372void cluster_process_copies_link( process_t * process )
    385373{
     374    uint32_t irq_state;
    386375    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
    387376
     
    401390
    402391    // get lock protecting copies_list[lpid]
    403     remote_spinlock_lock( copies_lock );
     392    remote_spinlock_lock_busy( copies_lock , &irq_state );
    404393
    405394    xlist_add_first( copies_root , copies_entry );
     
    407396
    408397    // release lock protecting copies_list[lpid]
    409     remote_spinlock_unlock( copies_lock );
     398    remote_spinlock_unlock_busy( copies_lock , irq_state );
    410399}
    411400
     
    413402void cluster_process_copies_unlink( process_t * process )
    414403{
     404    uint32_t irq_state;
    415405    pmgr_t * pm = &LOCAL_CLUSTER->pmgr;
    416406
     
    427417
    428418    // get lock protecting copies_list[lpid]
    429     remote_spinlock_lock( copies_lock );
     419    remote_spinlock_lock_busy( copies_lock , &irq_state );
    430420
    431421    xlist_unlink( copies_entry );
     
    433423
    434424    // release lock protecting copies_list[lpid]
    435     remote_spinlock_unlock( copies_lock );
     425    remote_spinlock_unlock_busy( copies_lock , irq_state );
    436426}
    437427
  • trunk/kernel/kern/cluster.h

    r279 r407  
    108108
    109109    // local parameters
    110         uint32_t          cores_nr;        /*! number of cores in cluster                     */
    111     uint32_t          cores_in_kernel; /*! number of cores currently in kernel mode       */
    112 
     110        uint32_t          cores_nr;        /*! actual number of cores in cluster              */
    113111    uint32_t          ram_size;        /*! physical memory size                           */
    114112    uint32_t          ram_base;        /*! physical memory base (local address)           */
     
    125123
    126124    // RPC
    127         rpc_fifo_t        rpc_fifo;        /*! RPC fifo                                       */
    128     uint32_t          rpc_threads;     /*! current number of RPC threads                  */
     125        remote_fifo_t     rpc_fifo;        /*! RPC fifo (one per cluster)                     */
     126    uint32_t          rpc_threads;     /*! current number of RPC threads in cluster       */
    129127
    130128    // DQDT
     
    173171
    174172/******************************************************************************************
    175  * This function checks the validity of a cluster identifier. TODO useful ??? [AG]
     173 * This function checks the validity of a cluster identifier.
    176174 ******************************************************************************************
    177175 * @ cxy    : cluster identifier to be checked.
     
    179177 *****************************************************************************************/
    180178bool_t cluster_is_undefined( cxy_t cxy );
    181 
    182 /******************************************************************************************
    183  * This function register sysfs information in cluster TODO ???  [AG]
    184  *****************************************************************************************/
    185 void cluster_sysfs_register();
    186179
    187180
  • trunk/kernel/kern/core.c

    r406 r407  
    107107        ticks = core->ticks_nr++;
    108108
    109         // handle pending alarms TODO ??? [AG]
    110         // alarm_clock( &core->alarm_mgr , ticks );
     109    // handle signals for all threads executing on this core
     110    sched_handle_signals( core );
    111111
    112112        // handle scheduler
    113         if( (ticks % CONFIG_SCHED_TICKS_PER_QUANTUM) == 0 ) sched_yield( NULL );
     113        if( (ticks % CONFIG_SCHED_TICKS_PER_QUANTUM) == 0 ) sched_yield();
    114114
    115115        // update DQDT
  • trunk/kernel/kern/do_syscall.c

    r406 r407  
    3131
    3232/////////////////////////////////////////////////////////////////////////////////////////////
     33// This ƒonction should never be called...
    3334/////////////////////////////////////////////////////////////////////////////////////////////
    34 static inline int sys_undefined()
     35static int sys_undefined()
    3536{
    3637    panic("undefined system call");
     
    4041/////////////////////////////////////////////////////////////////////////////////////////////
    4142// This array of pointers define the kernel functions implementing the syscalls.
    42 // It must be kept consistent with the enum in syscalls.h
     43// It must be kept consistent with the enum in "shared_syscalls.h" file.
    4344/////////////////////////////////////////////////////////////////////////////////////////////
    4445
     
    4849{
    4950    sys_thread_exit,        // 0
    50     sys_mmap,               // 1
     51    sys_thread_yield,       // 1
    5152    sys_thread_create,      // 2
    5253    sys_thread_join,        // 3
    5354    sys_thread_detach,      // 4
    54     sys_thread_yield,       // 5
     55    sys_undefined,          // 5
    5556    sys_sem,                // 6
    5657    sys_condvar,            // 7
    5758    sys_barrier,            // 8
    5859    sys_mutex,              // 9
    59     sys_thread_sleep,       // 10
    60     sys_thread_wakeup,      // 11
     60
     61    sys_undefined,          // 10
     62    sys_munmap,             // 11
    6163    sys_open,               // 12
    62     sys_creat,              // 13
     64    sys_mmap,               // 13
    6365    sys_read,               // 14
    6466    sys_write,              // 15
     
    6769    sys_unlink,             // 18
    6870    sys_pipe,               // 19
     71
    6972    sys_chdir,              // 20
    7073    sys_mkdir,              // 21
     
    7477    sys_closedir,           // 25
    7578    sys_getcwd,             // 26
    76     sys_clock,              // 27
     79    sys_undefined,          // 27 
    7780    sys_alarm,              // 28
    7881    sys_rmdir,              // 29
     82
    7983    sys_utls,               // 30
    8084    sys_chmod,              // 31
     
    8791    sys_stat,               // 38
    8892    sys_trace,              // 39
     93
     94    sys_get_config,         // 40
     95    sys_get_core,           // 41
     96    sys_get_cycle,          // 42
     97    sys_get_sched,          // 43
     98    sys_panic,              // 44
     99    sys_thread_sleep,       // 45
     100    sys_thread_wakeup,      // 46
    89101};
    90102
     
    102114        thread_user_time_update( this );
    103115
    104     // enable IRQs
     116    // enable interrupts
    105117        hal_enable_irq( NULL );
    106118 
     
    116128        }
    117129
    118         syscall_dmsg("\n[DMSG] %s : pid = %x / trdid = %x / service #%d\n"
    119                  "         arg0 = %x / arg1 = %x / arg2 = %x / arg3 = %x\n",
    120                          __FUNCTION__ , this->process->pid , this->trdid , service_num ,
    121                          arg0 , arg1 , arg2 , arg3 );
     130#if( CONFIG_SYSCALL_DEBUG & 0x1)
     131printk("\n[DBG] %s : pid = %x / trdid = %x / service #%d\n"
     132"      arg0 = %x / arg1 = %x / arg2 = %x / arg3 = %x\n",
     133__FUNCTION__ , this->process->pid , this->trdid , service_num , arg0 , arg1 , arg2 , arg3 );
     134#endif
    122135
    123136    // reset errno
     
    127140        error = syscall_tbl[service_num] ( arg0 , arg1 , arg2 , arg3 );
    128141
    129     // handle pending signals for the calling thread
    130     thread_signals_handle( this );
    131 
    132         // disable IRQs
     142    // disable interrupt
    133143        hal_disable_irq( NULL );
    134144
  • trunk/kernel/kern/do_syscall.h

    r16 r407  
    11/*
    2  * do_syscall.h - kernel service numbers asked by userland
     2 * do_syscall.h - generic syscall handler.
    33 *
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011,2012)
     
    3232/********************************************************************************************
    3333 * This function calls the kernel function defined by the <service_num> argument.
     34 * The possible values for servic_num are defined in the syscalls/syscalls.h file.
    3435 ********************************************************************************************
    3536 * @ this        : pointer on calling thread descriptor
  • trunk/kernel/kern/kernel_init.c

    r406 r407  
    3232#include <barrier.h>
    3333#include <remote_barrier.h>
     34#include <remote_fifo.h>
    3435#include <core.h>
    3536#include <list.h>
     
    8586cluster_t            cluster_manager                         CONFIG_CACHE_LINE_ALIGNED;
    8687
    87 // This variable defines the TXT0 kernel terminal
     88// This variable defines the TXT0 kernel terminal (TX only)
    8889__attribute__((section(".kdata")))
    8990chdev_t              txt0_chdev                              CONFIG_CACHE_LINE_ALIGNED;
     
    121122vfs_ctx_t            fs_context[FS_TYPES_NR]                 CONFIG_CACHE_LINE_ALIGNED;
    122123
     124// These variables are used by the sched_yield function to save SR value
     125__attribute__((section(".kdata")))
     126uint32_t             switch_save_sr[CONFIG_MAX_LOCAL_CORES]  CONFIG_CACHE_LINE_ALIGNED;
     127
     128#if CONFIG_READ_DEBUG
     129uint32_t   enter_sys_read;
     130uint32_t   exit_sys_read;
     131
     132uint32_t   enter_devfs_move;
     133uint32_t   exit_devfs_move;
     134
     135uint32_t   enter_txt_read;
     136uint32_t   exit_txt_read;
     137
     138uint32_t   enter_chdev_cmd;
     139uint32_t   exit_chdev_cmd;
     140
     141uint32_t   enter_chdev_server;
     142uint32_t   exit_chdev_server;
     143
     144uint32_t   enter_tty_cmd;
     145uint32_t   exit_tty_cmd;
     146
     147uint32_t   enter_tty_isr;
     148uint32_t   exit_tty_isr;
     149#endif
    123150
    124151///////////////////////////////////////////////////////////////////////////////////////////
     
    137164           "    /_/        \\_\\ |______| |_|    |_|   \\_____/  |______/        |_|    |_|  |_|  \\_\\ |_|   |_|  \n"
    138165           "\n\n\t\t Advanced Locality Management Operating System / Multi Kernel Hybrid\n"
    139            "\n\n\t\t\t Version 0.0 / %d cluster(s) / %d core(s) per cluster\n\n", nclusters , ncores );
     166           "\n\n\t\t Version 0.0 / %d cluster(s) / %d core(s) per cluster / cycle %d\n\n",
     167           nclusters , ncores , hal_time_stamp() );
    140168}
    141169
     
    201229                {
    202230                    cxy_t  cxy = (x<<info->y_width) + y;
    203                     hal_remote_swd( XPTR( cxy , &chdev_dir.txt[0] ) ,
     231                    hal_remote_swd( XPTR( cxy , &chdev_dir.txt_tx[0] ) ,
    204232                                    XPTR( local_cxy , &txt0_chdev ) );
    205233                }
     
    273301            }
    274302
    275 #if( CONFIG_KINIT_DEBUG > 1 )
    276 printk("\n[DMSG] %s : created MMC in cluster %x / chdev = %x\n",
    277 __FUNCTION__ , channel , local_cxy , chdev_ptr );
     303#if( CONFIG_KINIT_DEBUG & 0x1 )
     304if( hal_time_stamp() > CONFIG_KINIT_DEBUG )
     305printk("\n[DBG] %s : created MMC in cluster %x / chdev = %x\n",
     306__FUNCTION__ , local_cxy , chdev_ptr );
    278307#endif
    279308        }
     
    301330                chdev_dir.dma[channel] = XPTR( local_cxy , chdev_ptr );
    302331
    303 #if( CONFIG_KINIT_DEBUG > 1 )
    304 printk("\n[DMSG] %s : created DMA[%d] in cluster %x / chdev = %x\n",
     332#if( CONFIG_KINIT_DEBUG & 0x1 )
     333if( hal_time_stamp() > CONFIG_KINIT_DEBUG )
     334printk("\n[DBG] %s : created DMA[%d] in cluster %x / chdev = %x\n",
    305335__FUNCTION__ , channel , local_cxy , chdev_ptr );
    306336#endif
     
    355385        impl     = IMPL_FROM_TYPE( dev_tbl[i].type );
    356386
    357         // There is one chdev per direction for NIC
    358         if (func == DEV_FUNC_NIC) directions = 2;
    359         else                      directions = 1;
     387        // There is one chdev per direction for NIC and for TXT
     388        if((func == DEV_FUNC_NIC) || (func == DEV_FUNC_TXT)) directions = 2;
     389        else                                                 directions = 1;
    360390
    361391        // The TXT0 chdev has already been created
     
    363393        else                      first_channel = 0;
    364394
    365         // do nothing for RO, that does not require a device descriptor.
     395        // do nothing for ROM, that does not require a device descriptor.
    366396        if( func == DEV_FUNC_ROM ) continue;
    367397
     
    394424
    395425                // allocate and initialize a local chdev
    396                 // if local cluster matches target cluster
     426                // when local cluster matches target cluster
    397427                if( target_cxy == local_cxy )
    398428                {
     
    420450                    if(func==DEV_FUNC_IOB             ) entry  = &chdev_dir.iob;
    421451                    if(func==DEV_FUNC_IOC             ) entry  = &chdev_dir.ioc[channel];
    422                     if(func==DEV_FUNC_TXT             ) entry  = &chdev_dir.txt[channel];
    423452                    if(func==DEV_FUNC_FBF             ) entry  = &chdev_dir.fbf[channel];
     453                    if((func==DEV_FUNC_TXT) && (rx==0)) entry  = &chdev_dir.txt_tx[channel];
     454                    if((func==DEV_FUNC_TXT) && (rx==1)) entry  = &chdev_dir.txt_rx[channel];
    424455                    if((func==DEV_FUNC_NIC) && (rx==0)) entry  = &chdev_dir.nic_tx[channel];
    425456                    if((func==DEV_FUNC_NIC) && (rx==1)) entry  = &chdev_dir.nic_rx[channel];
     
    435466                    }
    436467
    437 #if( CONFIG_KINIT_DEBUG > 1 )
    438 printk("\n[DMSG] %s : create chdev %s[%d] in cluster %x / chdev = %x\n",
    439 __FUNCTION__ , chdev_func_str( func ), channel , local_cxy , chdev );
     468#if( CONFIG_KINIT_DEBUG & 0x1 )
     469if( hal_time_stamp() > CONFIG_KINIT_DEBUG )
     470printk("\n[DBG] %s : create chdev %s / channel = %d / rx = %d / cluster %x / chdev = %x\n",
     471__FUNCTION__ , chdev_func_str( func ), channel , rx , local_cxy , chdev );
    440472#endif
    441473                }  // end if match
     
    451483///////////////////////////////////////////////////////////////////////////////////////////
    452484// This function is called by CP0 in cluster 0 to allocate memory and initialize the PIC
    453 // device, namely the informations attached to the external IOPIC controller.
     485// device, namely the informations attached to the external IOPIC controller, that
     486// must be replicated in all clusters (struct iopic_input).
    454487// This initialisation must be done before other devices initialisation because the IRQ
    455 // routing infrastructure is required for internal and external devices initialisation.
     488// routing infrastructure is required for both internal and external devices init.
    456489///////////////////////////////////////////////////////////////////////////////////////////
    457490// @ info    : pointer on the local boot-info structure.
     
    490523    assert( found , __FUNCTION__ , "PIC device not found\n" );
    491524
    492     // allocate and initialize the PIC chdev in local cluster
    493     chdev = chdev_create( func,
     525    // allocate and initialize the PIC chdev in cluster 0
     526    chdev = chdev_create( DEV_FUNC_PIC,
    494527                          impl,
    495528                          0,      // channel
     
    502535    dev_pic_init( chdev );
    503536
    504     // register extended pointer on PIC chdev in "chdev_dir" array in all clusters
     537    // register, in all clusters, the extended pointer
     538    // on PIC chdev in "chdev_dir" array
    505539    xptr_t * entry = &chdev_dir.pic;   
    506540               
     
    515549    }
    516550
    517     // initialize the "iopic_input" structure
     551    // initialize, in all clusters, the "iopic_input" structure
    518552    // defining how external IRQs are connected to IOPIC
    519     uint32_t   id;
    520     uint8_t    valid;
    521     uint32_t   type;
    522     uint8_t    channel;
    523     uint8_t    is_rx;
     553
     554    // register default value for unused inputs
     555    for( x = 0 ; x < info->x_size ; x++ )
     556    {
     557        for( y = 0 ; y < info->y_size ; y++ )
     558        {
     559            cxy_t  cxy = (x<<info->y_width) + y;
     560            hal_remote_memset( XPTR( cxy , &iopic_input ) , 0xFF , sizeof(iopic_input_t) );
     561        }
     562    }
     563
     564    // register input IRQ index for valid inputs
     565    uint32_t   id;         // input IRQ index
     566    uint8_t    valid;      // input IRQ is connected
     567    uint32_t   type;       // source device type
     568    uint8_t    channel;    // source device channel
     569    uint8_t    is_rx;      // source device direction
     570    uint32_t * ptr;        // local pointer on one field in iopic_input stucture
    524571
    525572    for( id = 0 ; id < CONFIG_MAX_EXTERNAL_IRQS ; id++ )
     
    529576        channel = dev_tbl[i].irq[id].channel;
    530577        is_rx   = dev_tbl[i].irq[id].is_rx;
    531 
    532         if( valid )  // only valid inputs are registered
    533         {
    534             uint32_t * index;  // local pointer on one entry
    535             uint16_t func = FUNC_FROM_TYPE( type );
    536 
    537             if     ( func == DEV_FUNC_TXT )
    538             index = &iopic_input.txt[channel];
    539             else if( func == DEV_FUNC_IOC )
    540             index = &iopic_input.ioc[channel];
    541             else if( (func == DEV_FUNC_NIC) && (is_rx == 0) )
    542             index = &iopic_input.nic_tx[channel];
    543             else if( (func == DEV_FUNC_NIC) && (is_rx != 0) )
    544             index = &iopic_input.nic_rx[channel];
    545             else if( func == DEV_FUNC_IOB )
    546             index = &iopic_input.iob;
    547             else
    548             assert( false , __FUNCTION__ , "illegal source device for IOPIC input" );
    549 
    550             // set entry in local structure
    551             *index = id;
     578        func    = FUNC_FROM_TYPE( type );
     579
     580        // get pointer on relevant field in iopic_input
     581        if( valid )
     582        {
     583            if     ( func == DEV_FUNC_IOC )                 ptr = &iopic_input.ioc[channel];
     584            else if((func == DEV_FUNC_TXT) && (is_rx == 0)) ptr = &iopic_input.txt_tx[channel];
     585            else if((func == DEV_FUNC_TXT) && (is_rx != 0)) ptr = &iopic_input.txt_rx[channel];
     586            else if((func == DEV_FUNC_NIC) && (is_rx == 0)) ptr = &iopic_input.nic_tx[channel];
     587            else if((func == DEV_FUNC_NIC) && (is_rx != 0)) ptr = &iopic_input.nic_rx[channel];
     588            else if( func == DEV_FUNC_IOB )                 ptr = &iopic_input.iob;
     589            else     panic( "illegal source device for IOPIC input" );
     590
     591            // set one entry in all "iopic_input" structures
     592            for( x = 0 ; x < info->x_size ; x++ )
     593            {
     594                for( y = 0 ; y < info->y_size ; y++ )
     595                {
     596                    cxy_t  cxy = (x<<info->y_width) + y;
     597                    hal_remote_swd( XPTR( cxy , ptr ) , id );
     598                }
     599            }
    552600        }
    553601    }
    554602
    555 #if( CONFIG_KINIT_DEBUG > 1 )
    556 printk("\n[DMSG] %s created PIC chdev in cluster %x at cycle %d\n",
    557 __FUNCTION__ , local_cxy , (uint32_t)hal_time_stamp() );
     603#if( CONFIG_KINIT_DEBUG & 0x1 )
     604if( hal_time_stamp() > CONFIG_KINIT_DEBUG )
     605{
     606    printk("\n[DBG] %s created PIC chdev in cluster %x at cycle %d\n",
     607    __FUNCTION__ , local_cxy , (uint32_t)hal_time_stamp() );
     608    dev_pic_inputs_display();
     609}
    558610#endif
    559611   
     
    715767    hal_set_current_thread( thread );
    716768
    717     // each core initializes the idle thread "locks_root" and "xlocks_root" fields
     769    // each core register core descriptor pointer in idle thread descriptor
     770    thread->core = &LOCAL_CLUSTER->core_tbl[core_lid];
     771
     772    // each core initializes locks_root" and "xlocks_root" in idle thread descriptor
    718773    list_root_init( &thread->locks_root );
    719774    xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) );
     
    728783
    729784    if( (core_lid ==  0) && (local_cxy == 0) )
    730     printk("\n[KINIT] %s : exit barrier 0 : TXT0 initialized / cycle %d\n",
     785    kinit_dmsg("\n[DBG] %s : exit barrier 0 : TXT0 initialized / cycle %d\n",
    731786    __FUNCTION__, hal_time_stamp() );
    732787
    733788    /////////////////////////////////////////////////////////////////////////////
    734     // STEP 1 : all cores check its core identifier.
     789    // STEP 1 : all cores check core identifier.
    735790    //          CP0 initializes the local cluster manager.
    736791    //          This includes the memory allocators.
     
    762817
    763818    if( (core_lid ==  0) && (local_cxy == 0) )
    764     printk("\n[KINIT] %s : exit barrier 1 : clusters initialised / cycle %d\n",
     819    kinit_dmsg("\n[DBG] %s : exit barrier 1 : clusters initialised / cycle %d\n",
    765820    __FUNCTION__, hal_time_stamp() );
    766821
    767822    /////////////////////////////////////////////////////////////////////////////////
    768     // STEP 2 : all CP0s initialize the process_zero descriptor.
     823    // STEP 2 : CP0 initializes the process_zero descriptor.
    769824    //          CP0 in cluster 0 initializes the IOPIC device.
    770825    /////////////////////////////////////////////////////////////////////////////////
     
    787842
    788843    if( (core_lid ==  0) && (local_cxy == 0) )
    789     printk("\n[KINIT] %s : exit barrier 2 : PIC initialised / cycle %d\n",
     844    kinit_dmsg("\n[DBG] %s : exit barrier 2 : PIC initialised / cycle %d\n",
    790845    __FUNCTION__, hal_time_stamp() );
    791846
    792847    ////////////////////////////////////////////////////////////////////////////////
    793     // STEP 3 : all CP0s initialize the distibuted LAPIC descriptor.
    794     //          all CP0s initialize the internal chdev descriptors
    795     //          all CP0s initialize the local external chdev descriptors
     848    // STEP 3 : CP0 initializes the distibuted LAPIC descriptor.
     849    //          CP0 initializes the internal chdev descriptors
     850    //          CP0 initialize the local external chdev descriptors
    796851    ////////////////////////////////////////////////////////////////////////////////
    797852
     
    818873
    819874    if( (core_lid ==  0) && (local_cxy == 0) )
    820     printk("\n[KINIT] %s : exit barrier 3 : all chdev initialised / cycle %d\n",
     875    kinit_dmsg("\n[DBG] %s : exit barrier 3 : all chdev initialised / cycle %d\n",
    821876               __FUNCTION__, hal_time_stamp());
    822877
    823878    /////////////////////////////////////////////////////////////////////////////////
    824879    // STEP 4 : All cores enable IPI (Inter Procesor Interrupt),
    825     //          All cores initialise specific core registers
    826880    //          Alh cores initialize IDLE thread.
    827881    //          Only CP0 in cluster 0 creates the VFS root inode.
     
    837891    hal_enable_irq( &status );
    838892
    839     // All cores initialize specific core registers
    840     hal_core_init( info );
    841 
    842893    // all cores initialize the idle thread descriptor
    843894    error = thread_kernel_init( thread,
     
    857908
    858909#if CONFIG_KINIT_DEBUG
    859 sched_display();
     910sched_display( core_lid );
    860911#endif
    861912
     
    928979
    929980    if( (core_lid ==  0) && (local_cxy == 0) )
    930     printk("\n[KINIT] %s : exit barrier 4 : VFS_root = %l in cluster 0 / cycle %d\n",
     981    kinit_dmsg("\n[DBG] %s : exit barrier 4 : VFS_root = %l in cluster 0 / cycle %d\n",
    931982               __FUNCTION__, vfs_root_inode_xp , hal_time_stamp());
    932983
     
    9871038
    9881039    if( (core_lid ==  0) && (local_cxy == 0) )
    989     printk("\n[KINIT] %s : exit barrier 5 : VFS_root = %l in cluster IO / cycle %d\n",
     1040    kinit_dmsg("\n[DBG] %s : exit barrier 5 : VFS_root = %l in cluster IO / cycle %d\n",
    9901041    __FUNCTION__, vfs_root_inode_xp , hal_time_stamp() );
    9911042
     
    10201071
    10211072    if( (core_lid ==  0) && (local_cxy == 0) )
    1022     printk("\n[KINIT] %s : exit barrier 6 : dev_root = %l in cluster IO / cycle %d\n",
     1073    kinit_dmsg("\n[DBG] %s : exit barrier 6 : dev_root = %l in cluster IO / cycle %d\n",
    10231074    __FUNCTION__, devfs_dev_inode_xp , hal_time_stamp() );
    10241075
     
    10571108
    10581109    if( (core_lid ==  0) && (local_cxy == 0) )
    1059     printk("\n[KINIT] %s : exit barrier 7 : dev_root = %l in cluster 0 / cycle %d\n",
     1110    kinit_dmsg("\n[DBG] %s : exit barrier 7 : dev_root = %l in cluster 0 / cycle %d\n",
    10601111    __FUNCTION__, devfs_dev_inode_xp , hal_time_stamp() );
    10611112
     
    10751126    /////////////////////////////////////////////////////////////////////////////////
    10761127
     1128#if CONFIG_KINIT_DEBUG
     1129sched_display( core_lid );
     1130#endif
     1131
    10771132    if( (core_lid ==  0) && (local_cxy == 0) )
    1078     printk("\n[KINIT] %s : exit barrier 8 : process init created / cycle %d\n",
     1133    kinit_dmsg("\n[DBG] %s : exit barrier 8 : process init created / cycle %d\n",
    10791134    __FUNCTION__ , hal_time_stamp() );
    10801135
     
    11181173                   sizeof( core_t            ),
    11191174                   sizeof( scheduler_t       ),
    1120                    sizeof( rpc_fifo_t        ),
     1175                   sizeof( remote_fifo_t     ),
    11211176                   sizeof( page_t            ),
    11221177                   sizeof( mapper_t          ),
     
    11391194    dev_pic_enable_timer( CONFIG_SCHED_TICK_MS_PERIOD );
    11401195
    1141     // each core jump to idle thread
     1196    // each core jump to thread_idle_func
    11421197    thread_idle_func();
    11431198}
  • trunk/kernel/kern/printk.c

    r406 r407  
    190190        goto xprintf_text;
    191191    }
    192 } // end xprintf()
    193 
    194 ///////////////////////////////////////////////////////////////////////////////////
    195 // This static function is called by kernel_printf() to display a string on the
    196 // TXT channel defined by the <channel> argument.
    197 // The access mode is defined by the <busy> argument:
    198 // - if <busy> is true, it uses the dev_txt_sync_write() function, that takes the
    199 //   TXT lock, and call directly the relevant TXT driver, without descheduling.
    200 // - if <busy is false, it uses the dev_txt_write() function, that register the
    201 //   write buffer in the relevant TXT chdev queue, and uses a descheduling policy.
    202 ///////////////////////////////////////////////////////////////////////////////////
    203 // @ channel  : TXT channel.
    204 // @ busy     : TXT device acces mode (busy waiting if non zero).
    205 // @ buf      : buffer containing the characters.
    206 // @ nc       : number of characters.
    207 // return 0 if success / return -1 if TTY0 busy after 10000 retries.
    208 ///////////////////////////////////////////////////////////////////////////////////
    209 static error_t txt_write( uint32_t  channel,
    210                           uint32_t  busy,
    211                           char    * buffer,
    212                           uint32_t  count )
    213 {
    214     if( busy ) return dev_txt_sync_write( channel , buffer , count );
    215     else       return dev_txt_write( channel , buffer , count );
    216 
     192} // end snprintf()
    217193
    218194//////////////////////////////////////////////////////////////////////////////////////
    219 // This static function is called by printk(), assert() and nolock_printk() to build
    220 // a formated string.
     195// This static function is called by printk(), assert() and nolock_printk()
     196// to display a formated string on TXT0, using a busy waiting policy.
    221197//////////////////////////////////////////////////////////////////////////////////////
    222 // @ channel   : channel index.
    223 // @ busy      : TXT device access mode (busy waiting if non zero).
    224198// @ format    : printf like format.
    225 // @ args      : format arguments.
     199// @ args      : va_list of arguments.
    226200//////////////////////////////////////////////////////////////////////////////////////
    227 static void kernel_printf( uint32_t   channel,
    228                            uint32_t   busy,
    229                            char     * format,
     201static void kernel_printf( char     * format,
    230202                           va_list  * args )
    231203{
     
    239211        if (i)
    240212        {
    241             txt_write( channel, busy, format, i );
     213            dev_txt_sync_write( format, i );
    242214            format += i;
    243215        }
     
    276248                {
    277249                    val = -val;
    278                     txt_write( channel, busy, "-" , 1 );
     250                    dev_txt_sync_write( "-" , 1 );
    279251                }
    280252                for(i = 0; i < 10; i++)
     
    302274            {
    303275                uint32_t val = va_arg( *args , uint32_t );
    304                 txt_write( channel, busy, "0x" , 2 );
     276                dev_txt_sync_write( "0x" , 2 );
    305277                for(i = 0; i < 8; i++)
    306278                {
     
    315287            {
    316288                uint32_t val = va_arg( *args , uint32_t );
    317                 txt_write( channel, busy, "0x" , 2 );
     289                dev_txt_sync_write( "0x" , 2 );
    318290                for(i = 0; i < 8; i++)
    319291                {
     
    328300            {
    329301                unsigned long long val = va_arg( *args , unsigned long long );
    330                 txt_write( channel, busy, "0x" , 2 );
     302                dev_txt_sync_write( "0x" , 2 );
    331303                for(i = 0; i < 16; i++)
    332304                {
     
    341313            {
    342314                unsigned long long val = va_arg( *args , unsigned long long );
    343                 txt_write( channel, busy, "0x" , 2 );
     315                dev_txt_sync_write( "0x" , 2 );
    344316                for(i = 0; i < 16; i++)
    345317                {
     
    363335            default:
    364336            {
    365                 txt_write( channel , busy,
    366                            "\n[PANIC] in kernel_printf() : illegal format\n", 45 );
    367             }
    368         }
    369 
    370         if( pbuf != NULL ) txt_write( channel, busy, pbuf, len );
     337                dev_txt_sync_write( "\n[PANIC] in kernel_printf() : illegal format\n", 45 );
     338            }
     339        }
     340
     341        if( pbuf != NULL ) dev_txt_sync_write( pbuf, len );
    371342       
    372343        goto printf_text;
     
    382353
    383354    // get pointers on TXT0 chdev
    384     xptr_t    txt0_xp  = chdev_dir.txt[0];
     355    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
    385356    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
    386357    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     
    394365    // call kernel_printf on TXT0, in busy waiting mode
    395366    va_start( args , format );
    396     kernel_printf( 0 , 1 , format , &args );
     367    kernel_printf( format , &args );
    397368    va_end( args );
    398369
     
    408379    // call kernel_printf on TXT0, in busy waiting mode
    409380    va_start( args , format );
    410     kernel_printf( 0 , 1 , format , &args );
     381    kernel_printf( format , &args );
    411382    va_end( args );
    412383}
     
    419390
    420391    // get pointers on TXT0 chdev
    421     xptr_t    txt0_xp  = chdev_dir.txt[0];
     392    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
    422393    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
    423394    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     
    431402    // call kernel_printf on TXT0, in busy waiting mode
    432403    va_start( args , format );
    433     kernel_printf( 0 , 1 , format , &args );
     404    kernel_printf( format , &args );
    434405    va_end( args );
    435406
     
    456427    {
    457428        // get pointers on TXT0 chdev
    458         xptr_t    txt0_xp  = chdev_dir.txt[0];
     429        xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
    459430        cxy_t     txt0_cxy = GET_CXY( txt0_xp );
    460431        chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     
    471442        // call kernel_printf on TXT0, in busy waiting to print format
    472443        va_start( args , format );
    473         kernel_printf( 0 , 1 , format , &args );
     444        kernel_printf( format , &args );
    474445        va_end( args );
    475446
  • trunk/kernel/kern/printk.h

    r406 r407  
    102102             char       * format , ... );
    103103
    104 #define panic(fmt, ...)     _panic("[PANIC] %s(): " fmt "\n", __func__, ##__VA_ARGS__)
     104#define panic(fmt, ...)     _panic("\n[PANIC] %s(): " fmt "\n", __func__, ##__VA_ARGS__)
    105105
    106106///////////////////////////////////////////////////////////////////////////////////
     
    108108///////////////////////////////////////////////////////////////////////////////////
    109109
     110#if CONFIG_CHDEV_DEBUG
     111#define chdev_dmsg(...)   if(hal_time_stamp() > CONFIG_CHDEV_DEBUG) printk(__VA_ARGS__)
     112#else
     113#define chdev_dmsg(...)
     114#endif
     115
    110116#if CONFIG_CLUSTER_DEBUG
    111117#define cluster_dmsg(...)   if(hal_time_stamp() > CONFIG_CLUSTER_DEBUG) printk(__VA_ARGS__)
     
    186192#endif
    187193
     194#if CONFIG_GRPC_DEBUG
     195#define grpc_dmsg(...)   if(hal_time_stamp() > CONFIG_GRPC_DEBUG) printk(__VA_ARGS__)
     196#else
     197#define grpc_dmsg(...)
     198#endif
     199
    188200#if CONFIG_IDLE_DEBUG
    189201#define idle_dmsg(...)   if(hal_time_stamp() > CONFIG_IDLE_DEBUG) printk(__VA_ARGS__)
     
    234246#endif
    235247
     248#if CONFIG_MMAP_DEBUG
     249#define mmap_dmsg(...)   if(hal_time_stamp() > CONFIG_MMAP_DEBUG) printk(__VA_ARGS__)
     250#else
     251#define mmap_dmsg(...)
     252#endif
     253
    236254#if CONFIG_MMC_DEBUG
    237255#define mmc_dmsg(...)   if(hal_time_stamp() > CONFIG_MMC_DEBUG) printk(__VA_ARGS__)
     
    264282#endif
    265283
     284#if CONFIG_READ_DEBUG
     285#define read_dmsg(...)   if(hal_time_stamp() > CONFIG_READ_DEBUG) printk(__VA_ARGS__)
     286#else
     287#define read_dmsg(...)
     288#endif
     289
    266290#if CONFIG_RPC_DEBUG
    267291#define rpc_dmsg(...)   if(hal_time_stamp() > CONFIG_RPC_DEBUG) printk(__VA_ARGS__)
     
    310334#else
    311335#define vmm_dmsg(...)
     336#endif
     337
     338#if CONFIG_WRITE_DEBUG
     339#define write_dmsg(...)   if(hal_time_stamp() > CONFIG_WRITE_DEBUG) printk(__VA_ARGS__)
     340#else
     341#define write_dmsg(...)
    312342#endif
    313343
  • trunk/kernel/kern/process.c

    r406 r407  
    3939#include <thread.h>
    4040#include <list.h>
     41#include <string.h>
    4142#include <scheduler.h>
    4243#include <remote_spinlock.h>
     
    9091    pid_t       parent_pid;
    9192
    92     process_dmsg("\n[DMSG] %s : core[%x,%d] enters for process %x\n",
    93     __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid );
     93        error_t     error1;
     94        error_t     error2;
     95        error_t     error3;
     96    xptr_t      stdin_xp;
     97    xptr_t      stdout_xp;
     98    xptr_t      stderr_xp;
     99    uint32_t    stdin_id;
     100    uint32_t    stdout_id;
     101    uint32_t    stderr_id;
     102
     103process_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x\n",
     104__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid );
    94105
    95106    // get parent process cluster, local pointer, and pid
    96     // for all processes other than process_zero
    97     if( process == &process_zero )
     107    // for all processes other than kernel process
     108    if( process == &process_zero )                   // kernel process
    98109    {
    99110        assert( (pid == 0) , __FUNCTION__ , "process_zero must have PID = 0\n");
     
    101112        parent_cxy = 0;
    102113        parent_ptr = NULL;
    103         parent_pid = 0;      // process_zero is its own parent...
    104     }
    105     else
    106     {
    107         assert( (parent_xp != XPTR_NULL) , __FUNCTION__ , "parent_xp cannot be NULL\n");
    108 
     114        parent_pid = 0;     
     115    }
     116    else                                             // user process
     117    {
    109118        parent_cxy = GET_CXY( parent_xp );
    110119        parent_ptr = (process_t *)GET_PTR( parent_xp );
     
    112121    }
    113122
    114     // initialize PID and PPID
    115         process->pid   = pid;
    116     process->ppid  = parent_pid;
    117 
    118     // initialize reference process vmm (not for kernel process)
    119     if( pid ) vmm_init( process );
    120 
    121     // reset reference process file descriptors array
    122         process_fd_init( process );
    123 
    124     // reset reference process files structures and cwd_lock
    125         process->vfs_root_xp     = XPTR_NULL;
    126         process->vfs_bin_xp      = XPTR_NULL;
    127         process->vfs_cwd_xp      = XPTR_NULL;
    128     remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) );
    129 
    130     // reset children list root
    131     xlist_root_init( XPTR( local_cxy , &process->children_root ) );
    132         process->children_nr     = 0;
    133 
    134     // reset semaphore / mutex / barrier / condvar list roots
    135     xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
    136     xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
    137     xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
    138     xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
    139     remote_spinlock_init( XPTR( local_cxy , &process->sync_lock ) );
    140 
    141     // register new process in the parent children list (not for kernel process)
     123    // initialize PID, PPID, and REF
     124        process->pid    = pid;
     125    process->ppid   = parent_pid;
     126    process->ref_xp = XPTR( local_cxy , process );
     127
     128    // initialize vmm, fd array and others structures for user processes.
     129    // These structures are not used by the kernel process.
    142130    if( pid )
    143131    {
     132        // initialize vmm (not for kernel)
     133        vmm_init( process );
     134
     135process_dmsg("\n[DBG] %s : core[%x,%d] / vmm initialised for process %x\n",
     136__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid );
     137
     138        // initialize fd_array (not for kernel)
     139        process_fd_init( process );
     140
     141        // create stdin / stdout / stderr pseudo-files (not for kernel)
     142        if( parent_pid == 0 )                                              // process_init
     143        {
     144            error1 = vfs_open( process,
     145                               CONFIG_INIT_STDIN,
     146                               O_RDONLY,
     147                               0,                // FIXME chmod
     148                               &stdin_xp,
     149                               &stdin_id );
     150
     151            error2 = vfs_open( process,
     152                               CONFIG_INIT_STDOUT,
     153                               O_WRONLY,
     154                               0,                // FIXME chmod
     155                               &stdout_xp,
     156                               &stdout_id );
     157
     158            error3 = vfs_open( process,
     159                               CONFIG_INIT_STDERR,
     160                               O_WRONLY,
     161                               0,                // FIXME chmod
     162                               &stderr_xp,
     163                               &stderr_id );
     164        }
     165        else                                                               // user process
     166        {
     167            error1 = vfs_open( process,
     168                               CONFIG_USER_STDIN,
     169                               O_RDONLY,
     170                               0,                // FIXME chmod
     171                               &stdin_xp,
     172                               &stdin_id );
     173
     174            error2 = vfs_open( process,
     175                               CONFIG_USER_STDOUT,
     176                               O_WRONLY,
     177                               0,                // FIXME chmod
     178                               &stdout_xp,
     179                               &stdout_id );
     180
     181            error3 = vfs_open( process,
     182                               CONFIG_USER_STDERR,
     183                               O_WRONLY,
     184                               0,                // FIXME chmod
     185                               &stderr_xp,
     186                               &stderr_id );
     187        }
     188
     189        assert( ((error1 == 0) && (error2 == 0) && (error3 == 0)) , __FUNCTION__ ,
     190        "cannot open stdin/stdout/stderr pseudo files\n");
     191
     192        assert( ((stdin_id == 0) && (stdout_id == 1) && (stderr_id == 2)) , __FUNCTION__ ,
     193        "bad indexes : stdin %d / stdout %d / stderr %d \n", stdin_id , stdout_id , stderr_id );
     194
     195process_dmsg("\n[DBG] %s : core[%x,%d] / fd array initialised for process %x\n",
     196__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid );
     197
     198
     199        // reset reference process files structures and cwd_lock (not for kernel)
     200            process->vfs_root_xp     = XPTR_NULL;
     201            process->vfs_bin_xp      = XPTR_NULL;
     202            process->vfs_cwd_xp      = XPTR_NULL;
     203        remote_rwlock_init( XPTR( local_cxy , &process->cwd_lock ) );
     204
     205        // reset children list root (not for kernel)
     206        xlist_root_init( XPTR( local_cxy , &process->children_root ) );
     207            process->children_nr     = 0;
     208
     209        // reset semaphore / mutex / barrier / condvar list roots (nor for kernel)
     210        xlist_root_init( XPTR( local_cxy , &process->sem_root ) );
     211        xlist_root_init( XPTR( local_cxy , &process->mutex_root ) );
     212        xlist_root_init( XPTR( local_cxy , &process->barrier_root ) );
     213        xlist_root_init( XPTR( local_cxy , &process->condvar_root ) );
     214        remote_spinlock_init( XPTR( local_cxy , &process->sync_lock ) );
     215
     216        // register new process in the parent children list (nor for kernel)
    144217        xptr_t entry = XPTR( local_cxy  , &process->brothers_list );
    145218        xptr_t root  = XPTR( parent_cxy , &parent_ptr->children_root );
     
    156229    spinlock_init( &process->th_lock );
    157230
    158     // set ref_xp field
    159     process->ref_xp = XPTR( local_cxy , process );
    160 
    161231    // register new process descriptor in local cluster manager local_list
    162232    cluster_process_local_link( process );
     
    169239        hal_fence();
    170240
    171     process_dmsg("\n[DMSG] %s : exit for process %x in cluster %x\n",
    172                  __FUNCTION__ , pid );
     241process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n",
     242__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , pid );
    173243
    174244}  // process_reference init()
     
    181251    cxy_t       ref_cxy = GET_CXY( reference_process_xp );
    182252    process_t * ref_ptr = (process_t *)GET_PTR( reference_process_xp );
     253
     254    // set the pid, ppid, ref_xp fields in local process
     255    local_process->pid    = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->pid ) );
     256    local_process->ppid   = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->ppid ) );
     257    local_process->ref_xp = reference_process_xp;
     258
     259process_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x in cluster %x\n",
     260__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , local_process->pid );
    183261
    184262    // reset local process vmm
     
    192270    local_process->vfs_bin_xp  = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->vfs_bin_xp ) );
    193271    local_process->vfs_cwd_xp  = XPTR_NULL;
    194 
    195     // set the pid, ppid, ref_xp fields
    196     local_process->pid    = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->pid ) );
    197     local_process->ppid   = hal_remote_lw( XPTR( ref_cxy , &ref_ptr->ppid ) );
    198     local_process->ref_xp = reference_process_xp;
    199 
    200     process_dmsg("\n[DMSG] %s : enter for process %x in cluster %x\n",
    201                  __FUNCTION__ , local_process->pid );
    202272
    203273    // reset children list root (not used in a process descriptor copy)
     
    233303        hal_fence();
    234304
    235     process_dmsg("\n[DMSG] %s : exit for process %x in cluster %x\n",
    236                  __FUNCTION__ , local_process->pid );
     305process_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x in cluster %x\n",
     306__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , local_process->pid );
    237307
    238308    return 0;
     
    288358    vmm_destroy( process );
    289359
    290         process_dmsg("\n[DMSG] %s for pid %d / page_faults = %d\n",
     360        process_dmsg("\n[DBG] %s for pid %d / page_faults = %d\n",
    291361                 __FUNCTION__ , process->pid, process->vmm.pgfault_nr );
    292 }
     362
     363}  // end process_destroy()
    293364
    294365////////////////////////////////////////
     
    298369    uint32_t       ltid;      // index in process th_tbl
    299370    uint32_t       count;     // thread counter
     371
     372printk("\n@@@ %s enter\n", __FUNCTION__ );
    300373
    301374    // get lock protecting th_tbl[]
     
    317390    }
    318391
    319     volatile uint32_t ko;
     392printk("\n@@@ %s : %d signal(s) sent\n", __FUNCTION__, count );
    320393
    321394    // second loop on threads to wait acknowledge from scheduler,
     
    329402        if( thread != NULL )
    330403        {
    331             // wait scheduler acknowledge
    332             do { ko = (thread->signals & THREAD_SIG_KILL); } while( ko );
    333 
    334             // unlink thread from brothers list if required
    335             if( (thread->flags & THREAD_FLAG_DETACHED) == 0 )
    336             xlist_unlink( XPTR( local_cxy , &thread->brothers_list ) );
     404
     405printk("\n@@@ %s start polling at cycle %d\n", __FUNCTION__ , hal_time_stamp() );
     406
     407            // poll the THREAD_SIG_KILL bit until reset
     408            while( thread->signals & THREAD_SIG_KILL ) asm volatile( "nop" );
     409
     410printk("\n@@@ %s exit polling\n", __FUNCTION__ );
     411
     412            // detach target thread from parent if attached
     413            if( (thread->flags & THREAD_FLAG_DETACHED) != 0 )
     414            thread_child_parent_unlink( thread->parent , XPTR( local_cxy , thread ) );
    337415
    338416            // unlink thread from process
     
    346424    }
    347425
     426printk("\n@@@ %s : %d ack(s) received\n", __FUNCTION__, count );
     427
    348428    // release lock protecting th_tbl[]
    349429    spinlock_unlock( &process->th_lock );
     
    351431    // release memory allocated for process descriptor
    352432    process_destroy( process );
    353 }
     433
     434printk("\n[@@@] %s : core[%x,%d] exit\n",
     435__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid );
     436
     437}  // end process_kill()
    354438
    355439///////////////////////////////////////////////
     
    440524
    441525/////////////////////////////////////////////////
    442 error_t process_fd_register(  xptr_t     file_xp,
    443                               uint32_t * file_id )
     526error_t process_fd_register( process_t * process,
     527                             xptr_t      file_xp,
     528                             uint32_t  * fdid )
    444529{
    445530    bool_t    found;
     
    447532    xptr_t    xp;
    448533
    449     // get extended pointer on reference process
    450     xptr_t ref_xp = CURRENT_THREAD->process->ref_xp;
    451 
    452534    // get reference process cluster and local pointer
     535    xptr_t ref_xp = process->ref_xp;
    453536    process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
    454537    cxy_t       ref_cxy = GET_CXY( ref_xp );
     
    467550            hal_remote_swd( XPTR( ref_cxy , &ref_ptr->fd_array.array[id] ) , file_xp );
    468551                hal_remote_atomic_add( XPTR( ref_cxy , &ref_ptr->fd_array.current ) , 1 );
    469                         *file_id = id;
     552                        *fdid = id;
    470553            break;
    471554        }
     
    481564////////////////////////////////////////////////
    482565xptr_t process_fd_get_xptr( process_t * process,
    483                             uint32_t    file_id )
     566                            uint32_t    fdid )
    484567{
    485568    xptr_t  file_xp;
    486569
    487570    // access local copy of process descriptor
    488     file_xp = process->fd_array.array[file_id];
     571    file_xp = process->fd_array.array[fdid];
    489572
    490573    if( file_xp == XPTR_NULL )
     
    496579
    497580        // access reference process descriptor
    498         file_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->fd_array.array[file_id] ) );
     581        file_xp = hal_remote_lwd( XPTR( ref_cxy , &ref_ptr->fd_array.array[fdid] ) );
    499582
    500583        // update local fd_array if found
    501584        if( file_xp != XPTR_NULL )
    502585        {
    503             process->fd_array.array[file_id] = file_xp;
     586            process->fd_array.array[fdid] = file_xp;
    504587        }
    505588    }
    506589
    507590    return file_xp;
    508 }
     591
     592}  // end process_fd_get_xptr()
    509593
    510594///////////////////////////////////////////
     
    543627    // release lock on source process fd_array
    544628        remote_spinlock_unlock( XPTR( src_cxy , &src_ptr->lock ) );
    545 }
     629
     630}  // end process_fd_remote_copy()
    546631
    547632////////////////////////////////////////////////////////////////////////////////////
     
    561646    assert( (thread != NULL) , __FUNCTION__ , "thread argument is NULL" );
    562647
    563     // search a free slot in th_tbl[]
     648    // search a free slot in th_tbl[]
     649    // 0 is not a valid ltid value
    564650    found = false;
    565     for( ltid = 0 ; ltid < CONFIG_THREAD_MAX_PER_CLUSTER ; ltid++ )
     651    for( ltid = 1 ; ltid < CONFIG_THREAD_MAX_PER_CLUSTER ; ltid++ )
    566652    {
    567653        if( process->th_tbl[ltid] == NULL )
     
    606692{
    607693    char           * path;                            // pathname to .elf file
     694    bool_t           keep_pid;                        // new process keep parent PID if true
    608695    process_t      * process;                         // local pointer on new process
    609696    pid_t            pid;                             // new process pid
    610697    xptr_t           parent_xp;                       // extended pointer on parent process
    611     cxy_t            parent_cxy;
    612     process_t      * parent_ptr;
    613     uint32_t         parent_pid;
     698    cxy_t            parent_cxy;                      // parent process local cluster
     699    process_t      * parent_ptr;                      // local pointer on parent process
     700    uint32_t         parent_pid;                      // parent process identifier
    614701    thread_t       * thread;                          // pointer on new thread
    615702    pthread_attr_t   attr;                            // main thread attributes
     
    618705        error_t          error;
    619706
    620         // get parent and .elf pathname from exec_info
     707        // get .elf pathname, parent_xp, and keep_pid flag from exec_info
    621708        path      = exec_info->path;
    622709    parent_xp = exec_info->parent_xp;
     710    keep_pid  = exec_info->keep_pid;
     711
     712process_dmsg("\n[DBG] %s : core[%x,%d] enters for path = %s\n",
     713__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path );
    623714
    624715    // get parent process cluster and local pointer
     
    627718    parent_pid = hal_remote_lw( XPTR( parent_cxy , &parent_ptr->pid ) );
    628719
    629     exec_dmsg("\n[DMSG] %s : thread %x on core[%x,%d] enters for path = %s\n",
    630     __FUNCTION__, CURRENT_THREAD->trdid, local_cxy, CURRENT_THREAD->core->lid , path );
    631 
    632     // create new process descriptor
    633     process = process_alloc();
    634 
    635     if( process == NULL )
    636     {
    637         printk("\n[ERROR] in %s : no memory / cluster = %x / ppid = %x / path = %s\n",
    638                __FUNCTION__ , local_cxy , parent_pid , path );
    639         return ENOMEM;
    640     }
    641 
    642     // get a pid from the local cluster
    643     error = cluster_pid_alloc( XPTR( local_cxy , process ) , &pid );
    644 
    645     if( error )
    646     {
    647         printk("\n[ERROR] in %s : cannot get PID / cluster = %x / ppid = %x / path = %s\n",
    648                __FUNCTION__ , local_cxy , parent_pid , path );
    649         process_free( process );
    650                 return ENOMEM;
    651     }
     720    // allocates memory for process descriptor
     721        process = process_alloc();
     722        if( process == NULL ) return -1;
     723
     724    // get PID
     725    if( keep_pid )    // keep parent PID
     726    {
     727        pid = parent_pid;
     728    }
     729    else              // get new PID from local cluster
     730    {
     731        error = cluster_pid_alloc( XPTR( local_cxy , process ) , &pid );
     732        if( error ) return -1;
     733    }
     734
     735process_dmsg("\n[DBG] %s : core[%x,%d] created process %x for path = %s\n",
     736__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid, path );
    652737
    653738    // initialize the process descriptor as the reference
    654739    process_reference_init( process , pid , parent_xp );
    655740
    656     exec_dmsg("\n[DMSG] %s : thread %x on core[%x,%d] created process %x / path = %s\n",
    657     __FUNCTION__, CURRENT_THREAD->trdid, local_cxy, CURRENT_THREAD->core->lid, pid, path );
     741process_dmsg("\n[DBG] %s : core[%x,%d] initialized process %x / path = %s\n",
     742__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid, path );
    658743
    659744    // initialize vfs_root and vfs_cwd from parent process
     
    670755                            XPTR( parent_cxy , &parent_ptr->fd_array) );
    671756
    672     exec_dmsg("\n[DMSG] %s : fd_array copied from process %x to process %x\n",
    673     __FUNCTION__, parent_pid , pid );
    674 
    675         // initialize signal manager TODO ??? [AG]
    676         // signal_manager_init( process );
     757process_dmsg("\n[DBG] %s :  core[%x,%d] copied fd_array for process %x\n",
     758__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid );
    677759
    678760    // register "code" and "data" vsegs as well as the process entry-point in VMM,
     
    682764        if( error )
    683765        {
    684                 printk("\n[ERROR] in %s : failed to access elf file for process %x / path = %s\n",
     766                printk("\n[ERROR] in %s : failed to access .elf file for process %x / path = %s\n",
    685767                       __FUNCTION__, pid , path );
    686768        process_destroy( process );
     
    688770        }
    689771
    690     exec_dmsg("\n[DMSG] %s : code and data vsegs registered for process %x / path = %s\n",
    691     __FUNCTION__ , pid , path );
     772process_dmsg("\n[DBG] %s : core[%x,%d] registered code/data vsegs for process %x / path = %s\n",
     773__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, pid, path );
    692774
    693775    // select a core in cluster
     
    709791        {
    710792                printk("\n[ERROR] in %s : cannot create thread for process %x / path = %s\n",
    711                        __FUNCTION__, pid );
     793                       __FUNCTION__, pid , path );
    712794        process_destroy( process );
    713795        return error;
    714796        }
    715797
    716         exec_dmsg("\n[DMSG] %s : thread created for process %x on core %d in cluster %x\n",
    717                __FUNCTION__ , pid , core->lid , local_cxy );
    718 
    719 #if CONFIG_EXEC_DEBUG
    720 if( hal_time_stamp() > CONFIG_EXEC_DEBUG )
    721 {
    722     grdxt_print( &process->vmm.grdxt , GRDXT_TYPE_VSEG , process->pid );
    723     hal_gpt_print( &process->vmm.gpt , process->pid );
    724 }
    725 #endif
     798process_dmsg("\n[DBG] %s : core[%x,%d] created thread %x for process %x / path = %s\n",
     799__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, thread->trdid, pid, path  );
    726800
    727801    // update children list in parent process
     
    733807        thread_unblock( XPTR( local_cxy , thread ) , THREAD_BLOCKED_GLOBAL );
    734808
    735     exec_dmsg("\n[DMSG] %s : exit for process %x\n",
    736                 __FUNCTION__, process->pid );
     809process_dmsg("\n[DBG] %s : core[%x,%d] exit for path = %s\n",
     810__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, path );
    737811
    738812        return 0;
    739813
    740 }  // end proces_make_exec()
     814}  // end process_make_exec()
    741815
    742816//////////////////////////
     
    744818{
    745819    exec_info_t   exec_info;     // structure to be passed to process_make_exec()
    746 
    747         error_t   error1;
    748         error_t   error2;
    749         error_t   error3;
    750     xptr_t    stdin_xp;
    751     xptr_t    stdout_xp;
    752     xptr_t    stderr_xp;
    753     uint32_t  stdin_id;
    754     uint32_t  stdout_id;
    755     uint32_t  stderr_id;
    756 
    757         process_dmsg("\n[DMSG] %s : enters in cluster %x\n", __FUNCTION__ , local_cxy );
    758 
    759     // open stdin / stdout / stderr pseudo-files
    760         error1 = vfs_open( XPTR_NULL, CONFIG_DEV_STDIN , O_RDONLY, 0, &stdin_xp , &stdin_id  );
    761         error2 = vfs_open( XPTR_NULL, CONFIG_DEV_STDOUT, O_WRONLY, 0, &stdout_xp, &stdout_id );
    762         error3 = vfs_open( XPTR_NULL, CONFIG_DEV_STDERR, O_WRONLY, 0, &stderr_xp, &stderr_id );
    763 
    764         assert( ((error1 == 0) && (error2 == 0) && (error3 == 0)) , __FUNCTION__ ,
    765             "cannot open stdin/stdout/stderr pseudo files\n");
    766 
    767     assert( ((stdin_id == 0) && (stdout_id == 1) && (stderr_id == 2)) , __FUNCTION__ ,
    768             "bad indexes for stdin/stdout/stderr\n");
     820    xptr_t        parent_xp;     // extended pointer on parent process.
     821    error_t       error;
     822
     823process_dmsg("\n[DBG] %s : enters in cluster %x\n",
     824__FUNCTION__ , local_cxy );
     825
     826    // parent process is local kernel process
     827    parent_xp = XPTR( local_cxy , &process_zero );
    769828
    770829    // initialize the exec_info structure
    771     exec_info.parent_xp    = XPTR( local_cxy , &process_zero );
     830    exec_info.keep_pid     = false;
     831    exec_info.parent_xp    = parent_xp;
    772832    strcpy( exec_info.path , CONFIG_PROCESS_INIT_PATH );
    773833    exec_info.args_nr      = 0;
    774834    exec_info.envs_nr      = 0;
    775835
    776     // create process_init and thread_init
    777         error1 = process_make_exec( &exec_info );
    778 
    779         assert( (error1 == 0) , __FUNCTION__ , "cannot create process_init\n");
    780 
    781         process_dmsg("\n[DMSG] %s : exit in cluster %x\n", __FUNCTION__ , local_cxy );
     836    // initialize process_init and create thread_init
     837        error = process_make_exec( &exec_info );
     838
     839        if( error ) panic("cannot initialize process_init in cluster %x", local_cxy );
     840
     841process_dmsg("\n[DBG] %s : exit in cluster %x\n",
     842__FUNCTION__ , local_cxy );
    782843               
    783844    hal_fence();
  • trunk/kernel/kern/process.h

    r204 r407  
    143143{
    144144    xptr_t             parent_xp;      /*! extended pointer on parent process descriptor    */
     145    bool_t             keep_pid;       /*! keep parent PID if true / new PID if false       */
    145146
    146147    char               path[CONFIG_VFS_MAX_PATH_LENGTH];   /*!  .elf file path              */
     
    175176/*********************************************************************************************
    176177 * This function allocates memory and initializes the "process_init" descriptor and the
    177  * associated "thread_init" descriptor. It should be called once at the end of the kernel
    178  * initialisation procedure, by the kernel "process_zero".
     178 * associated "thread_init" descriptor in the local cluster. It is called once at the end
     179 * of the kernel initialisation procedure, by the local kernel process.
    179180 * The "process_init" is the first user process, and all other user processes will be forked
    180181 * from this process. The code executed by "process_init" is stored in a .elf file, whose
    181  * pathname is defined by the CONFIG_PROCESS_INIT_PATH argument. It uses fork/exec syscalls
    182  * to create the "shell" user process, and various other user daemon processes.
    183  * Practically, it builds the exec_info structure, registers the stdin / stdout / stderr
    184  * pseudo-file descriptors and the vfs_root and vfs_cwd in parent process_zero, and calls
    185  * the generic process_make_exec() function, that makes the real job.
     182 * pathname is defined by the CONFIG_PROCESS_INIT_PATH argument.
     183 * Practically, it builds the exec_info structure, and calls the process_make_exec()
     184 * function, that make the real job.
    186185 ********************************************************************************************/
    187186void process_init_create();
     
    253252 * and the associated main thread, from information found in the <exec_info> structure
    254253 * (defined in the process.h file), that must be built by the caller.
     254 * - If the <keep_pid> field is true, the new process inherits its PID from the parent PID.
     255 * - If the <keep_pid> field is false, a new PID is allocated from the local cluster manager.
    255256 * The new process inherits from the parent process (i) the open file descriptors, (ii) the
    256257 * vfs_root and the vfs_cwd inodes.
     
    268269
    269270
    270 /********************   Signal Management Operations   **************************************/
    271 
    272 /*********************************************************************************************
    273  * This function TODO [AG]
    274  ********************************************************************************************/
    275 void process_signal_handler( process_t * process );
    276 
    277 
    278271/********************   File Management Operations   ****************************************/
    279272
     
    287280/*********************************************************************************************
    288281 * This function uses as many remote accesses as required, to reset an entry in fd_array[],
    289  * in all clusters containing a copy. The entry is identified by the <file_id> argument.
     282 * in all clusters containing a copy. The entry is identified by the <fdid> argument.
    290283 * This function must be executed by a thread running reference cluster, that contains
    291284 * the complete list of process descriptors copies.
    292285 *********************************************************************************************
    293286 * @ process  : pointer on the local process descriptor.
    294  * @ file_id  : file descriptor index in the fd_array.
     287 * @ fdid     : file descriptor index in the fd_array.
    295288 ********************************************************************************************/
    296289void process_fd_remove( process_t * process,
    297                         uint32_t    file_id );
     290                        uint32_t    fdid );
    298291
    299292/*********************************************************************************************
     
    306299 *********************************************************************************************
    307300 * @ process  : pointer on the local process descriptor.
    308  * @ file_id  : file descriptor index in the fd_array.
     301 * @ fdid     : file descriptor index in the fd_array.
    309302 * @ return extended pointer on file descriptor if success / return XPTR_NULL if not found.
    310303 ********************************************************************************************/
    311304xptr_t process_fd_get_xptr( process_t * process,
    312                             uint32_t    file_id );
     305                            uint32_t    fdid );
    313306
    314307/*********************************************************************************************
     
    328321 *********************************************************************************************
    329322 * @ file_xp  : extended pointer on the file descriptor to be registered.
    330  * @ file_id  : [out] buffer for fd_array slot index.
     323 * @ fdid     : [out] buffer for fd_array slot index.
    331324 * @ return 0 if success / return EMFILE if array full.
    332325 ********************************************************************************************/
    333 error_t process_fd_register( xptr_t      file_xp,
    334                              uint32_t  * file_id );
     326error_t process_fd_register( process_t * process,
     327                             xptr_t      file_xp,
     328                             uint32_t  * fdid );
    335329
    336330/*********************************************************************************************
  • trunk/kernel/kern/rpc.c

    r406 r407  
    7676    &rpc_mapper_move_buffer_server,     // 24
    7777    &rpc_mapper_get_page_server,        // 25
    78     &rpc_undefined,                     // 26
    79     &rpc_undefined,                     // 27
     78    &rpc_vmm_create_vseg_server,        // 26
     79    &rpc_sched_display_server,          // 27
    8080    &rpc_undefined,                     // 28
    8181    &rpc_undefined,                     // 29
     
    9797                                page_t  ** page )      // out
    9898{
    99     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     99    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    100100    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    101101    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    115115
    116116    // get output arguments from RPC descriptor
    117     *page    = (page_t *)(intptr_t)rpc.args[1];
    118 
    119     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     117    *page = (page_t *)(intptr_t)rpc.args[1];
     118
     119    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    120120    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    121121    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    125125void rpc_pmem_get_pages_server( xptr_t xp )
    126126{
    127     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     127    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    128128    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    129129    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    142142    hal_remote_swd( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page );
    143143
    144     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     144    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    145145    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    146146    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    157157                                   pid_t     * pid )     // out
    158158{
    159     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     159    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    160160    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    161161    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    178178    *error  = (error_t)rpc.args[2];     
    179179
    180     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     180    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    181181    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    182182    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    190190    pid_t       pid;       // output : process identifier
    191191
    192     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     192    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    193193    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    194194    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    209209    hal_remote_sw( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)pid );
    210210
    211     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     211    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    212212    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    213213    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    224224                              error_t     * error )   // out
    225225{
    226     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     226    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    227227    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    228228    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    244244    *error  = (error_t)rpc.args[1];     
    245245
    246     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     246    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    247247    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    248248    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    256256    error_t       error;     // local error error status
    257257
    258     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     258    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    259259    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    260260    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    278278    hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );
    279279
    280     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     280    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    281281    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    282282    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    291291void rpc_process_kill_client( process_t * process )
    292292{
    293     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     293    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    294294    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    295295    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    325325    }
    326326
    327     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     327    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    328328    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    329329    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    336336    process_t * process; 
    337337
    338     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     338    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    339339    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    340340    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    360360    }
    361361
    362     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     362    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    363363    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    364364    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    379379                                    error_t        * error )      // out
    380380{
    381     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     381    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    382382    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    383383    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    387387    // initialise RPC descriptor header
    388388    rpc_desc_t  rpc;
    389     rpc.index    = RPC_THREAD_USER_CREATE;
    390     rpc.response = 1;
     389    rpc.index     = RPC_THREAD_USER_CREATE;
     390    rpc.response  = 1;
    391391
    392392    // set input arguments in RPC descriptor
     
    403403    *error     = (error_t)rpc.args[5];
    404404
    405     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     405    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    406406    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    407407    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    421421    error_t          error;
    422422
    423     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     423    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    424424    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    425425    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    442442                       sizeof(pthread_attr_t) );
    443443   
    444     assert( (attr_copy.cxy == local_cxy) , __FUNCTION__ , "bad target cluster\n" );
    445 
    446444    // call kernel function
    447445    error = thread_user_create( pid,
     
    453451    // set output arguments
    454452    thread_xp = XPTR( local_cxy , thread_ptr );
    455     hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );
    456     hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)thread_xp );
    457 
    458     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     453    hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)thread_xp );
     454    hal_remote_swd( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error );
     455
     456    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    459457    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    460458    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    473471                                      error_t * error )      // out
    474472{
    475     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     473    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    476474    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    477475    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    496494    *error     = (error_t)rpc.args[4];
    497495
    498     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     496    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    499497    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    500498    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    509507    error_t          error;   
    510508
    511     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     509    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    512510    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    513511    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    533531    hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)thread_xp );
    534532
    535     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     533    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    536534    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    537535    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    547545                             uint32_t    sig_id )    // in
    548546{
    549     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     547    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    550548    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    551549    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    565563    rpc_send_sync( cxy , &rpc );
    566564
    567     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     565    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    568566    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    569567    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    576574    uint32_t     sig_id;   // signal index
    577575
    578     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     576    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    579577    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    580578    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    591589    signal_rise( process , sig_id );
    592590
    593     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     591    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    594592    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    595593    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    613611                                  error_t      * error )     // out
    614612{
    615     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     613    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    616614    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    617615    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    641639    *error    = (error_t)rpc.args[9];
    642640
    643     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     641    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    644642    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    645643    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    660658    error_t          error;
    661659
    662     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     660    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    663661    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    664662    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    693691    hal_remote_swd( XPTR( client_cxy , &desc->args[9] ) , (uint64_t)error );
    694692
    695     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     693    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    696694    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    697695    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    706704                                   struct vfs_inode_s * inode )
    707705{
    708     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     706    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    709707    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    710708    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    723721    rpc_send_sync( cxy , &rpc );
    724722
    725     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     723    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    726724    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    727725    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    733731    vfs_inode_t * inode;
    734732
    735     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     733    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    736734    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    737735    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    747745    vfs_inode_destroy( inode );
    748746
    749     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     747    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    750748    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    751749    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    764762                                   error_t              * error )       // out
    765763{
    766     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     764    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    767765    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    768766    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    787785    *error     = (error_t)rpc.args[4];
    788786
    789     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     787    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    790788    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    791789    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    803801    char          name_copy[CONFIG_VFS_MAX_NAME_LENGTH];
    804802
    805     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     803    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    806804    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    807805    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    829827    hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );
    830828
    831     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     829    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    832830    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    833831    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    843841                                    vfs_dentry_t * dentry )
    844842{
    845     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     843    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    846844    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    847845    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    860858    rpc_send_sync( cxy , &rpc );
    861859
    862     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     860    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    863861    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    864862    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    870868    vfs_dentry_t * dentry;
    871869
    872     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     870    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    873871    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    874872    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    884882    vfs_dentry_destroy( dentry );
    885883
    886     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     884    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    887885    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    888886    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    901899                                 error_t              * error )      // out
    902900{
    903     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     901    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    904902    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    905903    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    923921    *error   = (error_t)rpc.args[3];
    924922
    925     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     923    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    926924    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    927925    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    936934    error_t       error;
    937935
    938     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     936    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    939937    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    940938    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    957955    hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
    958956
    959     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     957    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    960958    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    961959    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    970968                                  vfs_file_t * file )
    971969{
    972     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     970    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    973971    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    974972    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    987985    rpc_send_sync( cxy , &rpc );
    988986
    989     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     987    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    990988    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    991989    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    997995    vfs_file_t * file;
    998996
    999     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     997    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    1000998    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    1001999    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    10111009    vfs_file_destroy( file );
    10121010
    1013     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1011    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    10141012    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    10151013    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    10271025                                error_t     * error )          // out
    10281026{
    1029     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1027    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    10301028    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    10311029    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    10491047    *error   = (error_t)rpc.args[3];
    10501048
    1051     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1049    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    10521050    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    10531051    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    10641062    char          name_copy[CONFIG_VFS_MAX_NAME_LENGTH];
    10651063
    1066     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1064    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    10671065    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    10681066    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    10871085    hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
    10881086
    1089     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1087    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    10901088    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    10911089    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    11011099                                     error_t     * error )     // out
    11021100{
    1103     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1101    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    11041102    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    11051103    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    11211119    *error   = (error_t)rpc.args[1];
    11221120
    1123     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1121    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    11241122    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    11251123    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    11321130    vfs_inode_t * inode;
    11331131
    1134     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1132    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    11351133    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    11361134    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    11491147    hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );
    11501148
    1151     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1149    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    11521150    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    11531151    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    11621160                                   mapper_t * mapper,    // in
    11631161                                   uint32_t   first,     // in
    1164                                    uint32_t   page,      // in
     1162                                   uint32_t   index,     // in
    11651163                                   uint32_t * cluster,   // out
    11661164                                   error_t  * error )    // out
    11671165{
    1168     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1166    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    11691167    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    11701168    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    11801178    rpc.args[0] = (uint64_t)(intptr_t)mapper;
    11811179    rpc.args[1] = (uint64_t)first;
    1182     rpc.args[2] = (uint64_t)page;
     1180    rpc.args[2] = (uint64_t)index;
    11831181
    11841182    // register RPC request in remote RPC fifo
     
    11891187    *error   = (error_t)rpc.args[4];
    11901188
    1191     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1189    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    11921190    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    11931191    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    11991197    mapper_t    * mapper;
    12001198    uint32_t      first;
    1201     uint32_t      page;
     1199    uint32_t      index;
    12021200    uint32_t      cluster;
    12031201    error_t       error;
    12041202
    1205     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1203    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    12061204    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    12071205    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    12141212    mapper = (mapper_t *)(intptr_t)hal_remote_lpt( XPTR( client_cxy , &desc->args[0] ) );
    12151213    first  = (uint32_t)            hal_remote_lw ( XPTR( client_cxy , &desc->args[1] ) );
    1216     page   = (uint32_t)            hal_remote_lw ( XPTR( client_cxy , &desc->args[2] ) );
     1214    index  = (uint32_t)            hal_remote_lw ( XPTR( client_cxy , &desc->args[2] ) );
    12171215
    12181216    // call the kernel function
    1219     error = fatfs_get_cluster( mapper , first , page , &cluster );
     1217    error = fatfs_get_cluster( mapper , first , index , &cluster );
    12201218
    12211219    // set output argument
     
    12231221    hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );
    12241222
    1225     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1223    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    12261224    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    12271225    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    12391237                              error_t   * error )      // out
    12401238{
    1241     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1239    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    12421240    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    12431241    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    12611259    *error   = (error_t)rpc.args[3];
    12621260
    1263     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1261    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    12641262    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    12651263    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    12751273    error_t       error;
    12761274
    1277     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1275    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    12781276    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    12791277    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    12951293    hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
    12961294
    1297     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1295    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    12981296    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    12991297    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    13091307                             process_t * process,  // in
    13101308                             vpn_t       vpn,      // in
     1309                             bool_t      cow,      // in
    13111310                             uint32_t  * attr,     // out
    13121311                             ppn_t     * ppn,      // out
    13131312                             error_t   * error )   // out
    13141313{
    1315     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1314    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    13161315    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    13171316    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    13271326    rpc.args[0] = (uint64_t)(intptr_t)process;
    13281327    rpc.args[1] = (uint64_t)vpn;
     1328    rpc.args[2] = (uint64_t)cow;
    13291329
    13301330    // register RPC request in remote RPC fifo (blocking function)
     
    13321332
    13331333    // get output argument from rpc descriptor
    1334     *attr  = (uint32_t)rpc.args[2];
    1335     *ppn   = (ppn_t)rpc.args[3];
    1336     *error = (error_t)rpc.args[4];
    1337 
    1338     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1334    *attr  = (uint32_t)rpc.args[3];
     1335    *ppn   = (ppn_t)rpc.args[4];
     1336    *error = (error_t)rpc.args[5];
     1337
     1338    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    13391339    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    13401340    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    13461346    process_t   * process;
    13471347    vpn_t         vpn;
     1348    bool_t        cow;
    13481349    uint32_t      attr;
    13491350    ppn_t         ppn;
    13501351    error_t       error;
    13511352
    1352     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1353    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    13531354    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    13541355    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    13611362    process = (process_t *)(intptr_t)hal_remote_lwd( XPTR( client_cxy , &desc->args[0] ) );
    13621363    vpn     = (vpn_t)                hal_remote_lwd( XPTR( client_cxy , &desc->args[1] ) );
     1364    cow     = (bool_t)               hal_remote_lwd( XPTR( client_cxy , &desc->args[2] ) );
    13631365   
    13641366    // call local kernel function
    1365     error = vmm_get_pte( process , vpn , &attr , &ppn );
     1367    error = vmm_get_pte( process , vpn , cow , &attr , &ppn );
    13661368
    13671369    // set output argument "attr" & "ppn" to client RPC descriptor
    1368     hal_remote_swd( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)attr );
    1369     hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)ppn );
    1370     hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)error );
    1371 
    1372     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1370    hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)attr );
     1371    hal_remote_swd( XPTR( client_cxy , &desc->args[4] ) , (uint64_t)ppn );
     1372    hal_remote_swd( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error );
     1373
     1374    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    13731375    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    13741376    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    13841386                           xptr_t *   buf_xp )     // out
    13851387{
    1386     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1388    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    13871389    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    13881390    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    14041406    *buf_xp = (xptr_t)rpc.args[1];
    14051407
    1406     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1408    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    14071409    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    14081410    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    14121414void rpc_kcm_alloc_server( xptr_t xp )
    14131415{
    1414     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1416    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    14151417    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    14161418    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    14331435    hal_remote_swd( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)buf_xp );
    14341436
    1435     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1437    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    14361438    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    14371439    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    14471449                          uint32_t   kmem_type )   // in
    14481450{
    1449     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1451    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    14501452    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    14511453    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    14651467    rpc_send_sync( cxy , &rpc );
    14661468
    1467     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1469    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    14681470    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    14691471    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    14731475void rpc_kcm_free_server( xptr_t xp )
    14741476{
    1475     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1477    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    14761478    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    14771479    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    14911493    kmem_free( &req );
    14921494
    1493     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1495    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    14941496    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    14951497    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    15101512                                    error_t  * error )        // out
    15111513{
    1512     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1514    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    15131515    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    15141516    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    15351537    *error     = (error_t)rpc.args[6];
    15361538
    1537     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1539    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    15381540    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    15391541    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    15521554    error_t    error;
    15531555
    1554     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1556    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    15551557    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    15561558    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    15921594    hal_remote_swd( XPTR( client_cxy , &desc->args[6] ) , (uint64_t)error );
    15931595
    1594     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1596    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    15951597    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    15961598    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    16071609                                 page_t         ** page )      // out
    16081610{
    1609     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1611    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    16101612    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    16111613    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    16281630    *page = (page_t *)(intptr_t)rpc.args[2];
    16291631
    1630     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1632    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    16311633    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    16321634    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    16361638void rpc_mapper_get_page_server( xptr_t xp )
    16371639{
    1638     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1640    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    16391641    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    16401642    CURRENT_THREAD->core->lid , hal_time_stamp() );
     
    16541656    hal_remote_swd( XPTR( cxy , &desc->args[1] ) , (uint64_t)(intptr_t)page );
    16551657
    1656     rpc_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
    1657     __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
    1658     CURRENT_THREAD->core->lid , hal_time_stamp() );
    1659 }
    1660 
     1658    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1659    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     1660    CURRENT_THREAD->core->lid , hal_time_stamp() );
     1661}
     1662
     1663/////////////////////////////////////////////////////////////////////////////////////////
     1664// [26]          Marshaling functions attached to RPC_VMM_CREATE_VSEG
     1665/////////////////////////////////////////////////////////////////////////////////////////
     1666
     1667////////////////////////////////////////////////////////
     1668void rpc_vmm_create_vseg_client( cxy_t              cxy,
     1669                                 struct process_s * process,
     1670                                 vseg_type_t        type,
     1671                                 intptr_t           base,
     1672                                 uint32_t           size,
     1673                                 uint32_t           file_offset,
     1674                                 uint32_t           file_size,
     1675                                 xptr_t             mapper_xp,
     1676                                 cxy_t              vseg_cxy,
     1677                                 struct vseg_s   ** vseg )
     1678{
     1679    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1680    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     1681    CURRENT_THREAD->core->lid , hal_time_stamp() );
     1682
     1683    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
     1684
     1685    // initialise RPC descriptor header
     1686    rpc_desc_t  rpc;
     1687    rpc.index    = RPC_VMM_CREATE_VSEG;
     1688    rpc.response = 1;
     1689
     1690    // set input arguments in RPC descriptor
     1691    rpc.args[0] = (uint64_t)(intptr_t)process;
     1692    rpc.args[1] = (uint64_t)type;
     1693    rpc.args[2] = (uint64_t)base;
     1694    rpc.args[3] = (uint64_t)size;
     1695    rpc.args[4] = (uint64_t)file_offset;
     1696    rpc.args[5] = (uint64_t)file_size;
     1697    rpc.args[6] = (uint64_t)mapper_xp;
     1698    rpc.args[7] = (uint64_t)vseg_cxy;
     1699
     1700    // register RPC request in remote RPC fifo (blocking function)
     1701    rpc_send_sync( cxy , &rpc );
     1702
     1703    // get output values from RPC descriptor
     1704    *vseg = (vseg_t *)(intptr_t)rpc.args[8];
     1705
     1706    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1707    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     1708    CURRENT_THREAD->core->lid , hal_time_stamp() );
     1709}
     1710
     1711////////////////////////////////////////////
     1712void rpc_vmm_create_vseg_server( xptr_t xp )
     1713{
     1714    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1715    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     1716    CURRENT_THREAD->core->lid , hal_time_stamp() );
     1717
     1718    // get client cluster identifier and pointer on RPC descriptor
     1719    cxy_t        cxy  = (cxy_t)GET_CXY( xp );
     1720    rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );
     1721
     1722    // get input arguments from client RPC descriptor
     1723    process_t * process     = (process_t *)(intptr_t)hal_remote_lwd( XPTR(cxy , &desc->args[0]));
     1724    vseg_type_t type        = (vseg_type_t)(uint32_t)hal_remote_lwd( XPTR(cxy , &desc->args[1]));
     1725    intptr_t    base        = (intptr_t)             hal_remote_lwd( XPTR(cxy , &desc->args[2]));
     1726    uint32_t    size        = (uint32_t)             hal_remote_lwd( XPTR(cxy , &desc->args[3]));
     1727    uint32_t    file_offset = (uint32_t)             hal_remote_lwd( XPTR(cxy , &desc->args[4]));
     1728    uint32_t    file_size   = (uint32_t)             hal_remote_lwd( XPTR(cxy , &desc->args[5]));
     1729    xptr_t      mapper_xp   = (xptr_t)               hal_remote_lwd( XPTR(cxy , &desc->args[6]));
     1730    cxy_t       vseg_cxy    = (cxy_t)(uint32_t)      hal_remote_lwd( XPTR(cxy , &desc->args[7]));
     1731   
     1732    // call local kernel function
     1733    vseg_t * vseg = vmm_create_vseg( process,
     1734                                     type,
     1735                                     base,
     1736                                     size,
     1737                                     file_offset,
     1738                                     file_size,
     1739                                     mapper_xp,
     1740                                     vseg_cxy );
     1741
     1742    // set output arguments into client RPC descriptor
     1743    hal_remote_swd( XPTR( cxy , &desc->args[8] ) , (uint64_t)(intptr_t)vseg );
     1744
     1745    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1746    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     1747    CURRENT_THREAD->core->lid , hal_time_stamp() );
     1748}
     1749
     1750/////////////////////////////////////////////////////////////////////////////////////////
     1751// [27]          Marshaling functions attached to RPC_SCHED_DISPLAY
     1752/////////////////////////////////////////////////////////////////////////////////////////
     1753
     1754////////////////////////////////////////////////////////
     1755void rpc_sched_display_client( cxy_t              cxy,
     1756                               lid_t              lid)
     1757{
     1758    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1759    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     1760    CURRENT_THREAD->core->lid , hal_time_stamp() );
     1761
     1762    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
     1763
     1764    // initialise RPC descriptor header
     1765    rpc_desc_t  rpc;
     1766    rpc.index    = RPC_SCHED_DISPLAY;
     1767    rpc.response = 1;
     1768
     1769    // set input arguments in RPC descriptor
     1770    rpc.args[0] = (uint64_t)lid;
     1771
     1772    // register RPC request in remote RPC fifo (blocking function)
     1773    rpc_send_sync( cxy , &rpc );
     1774
     1775    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1776    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     1777    CURRENT_THREAD->core->lid , hal_time_stamp() );
     1778}
     1779
     1780//////////////////////////////////////////
     1781void rpc_sched_display_server( xptr_t xp )
     1782{
     1783    rpc_dmsg("\n[DBG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
     1784    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     1785    CURRENT_THREAD->core->lid , hal_time_stamp() );
     1786
     1787    // get client cluster identifier and pointer on RPC descriptor
     1788    cxy_t        cxy  = (cxy_t)GET_CXY( xp );
     1789    rpc_desc_t * desc = (rpc_desc_t *)GET_PTR( xp );
     1790
     1791    // get input arguments from client RPC descriptor
     1792    lid_t lid = (lid_t)hal_remote_lw( XPTR(cxy , &desc->args[0]));
     1793   
     1794    // call local kernel function
     1795    sched_display( lid );
     1796
     1797    rpc_dmsg("\n[DBG] %s : exit / thread %x on core[%x,%d] / cycle %d\n",
     1798    __FUNCTION__ , CURRENT_THREAD->trdid , local_cxy,
     1799    CURRENT_THREAD->core->lid , hal_time_stamp() );
     1800}
    16611801
    16621802/***************************************************************************************/
     
    16681808                    rpc_desc_t * rpc )
    16691809{
    1670     uint32_t   cores;
    16711810    error_t    error;
    1672     bool_t     first;
    1673     reg_t      sr_save;
    1674 
    1675     rpc_dmsg("\n[DMSG] %s : enter / client_cxy = %x / server_cxy = %x / cycle %d\n",
    1676     __FUNCTION__ , local_cxy , server_cxy , hal_time_stamp() );
    1677 
    1678     // allocate and initialise an extended pointer on the RPC descriptor
     1811
     1812    thread_t * this = CURRENT_THREAD;
     1813    core_t   * core = this->core;
     1814
     1815    // register client thread pointer and core lid in RPC descriptor
     1816    rpc->thread    = this;
     1817    rpc->lid       = core->lid;
     1818
     1819    // build an extended pointer on the RPC descriptor
    16791820        xptr_t   desc_xp = XPTR( local_cxy , rpc );
    16801821
    16811822    // get local pointer on rpc_fifo in remote cluster, with the
    1682     // assumption that rpc_fifo pddresses are identical in all clusters
    1683     rpc_fifo_t * rf = &LOCAL_CLUSTER->rpc_fifo;
     1823    // assumption that local pointers are identical in all clusters
     1824    remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
    16841825
    16851826        // try to post an item in remote fifo
     
    16871828    do
    16881829    {
    1689         error = remote_fifo_put_item( XPTR( server_cxy , &rf->fifo ),
    1690                                       (uint64_t )desc_xp,
    1691                                       &first );
     1830        error = remote_fifo_put_item( XPTR( server_cxy , rpc_fifo ),
     1831                                      (uint64_t )desc_xp );
    16921832            if ( error )
    16931833        {
     
    16951835            __FUNCTION__ , local_cxy , server_cxy );
    16961836
    1697             if( thread_can_yield() ) sched_yield( NULL );
     1837            if( thread_can_yield() ) sched_yield();
    16981838        }
    16991839    }
    17001840    while( error );
    17011841 
    1702     rpc_dmsg("\n[DMSG] %s : RPC %l registered / server_cxy = %x / cycle %d\n",
    1703     __FUNCTION__ , desc_xp , server_cxy , hal_time_stamp() );
     1842    hal_fence();
    17041843       
    1705     // send IPI to remote CP0, if this is the first RPC in remote FIFO,
    1706     // and there is no CPU is in kernel mode in server cluster.
    1707         if( first )
    1708         {
    1709         // get number of cores in kernel mode in server cluster
    1710         cores = hal_remote_lw( XPTR( server_cxy , &LOCAL_CLUSTER->cores_in_kernel ) );
    1711 
    1712                 if( cores == 0 ) // no core in kernel mode in server
    1713                 {
    1714                     dev_pic_send_ipi( server_cxy , 0 );
    1715 
    1716                     rpc_dmsg("\n[DMSG] %s : IPI sent / client_cxy = %x / server_cxy = %x\n",
    1717             __FUNCTION__, local_cxy , server_cxy );
    1718         }
    1719         }
    1720 
    1721         // enable IRQs to allow incoming RPC and avoid deadlock
    1722         hal_enable_irq( &sr_save );
    1723 
    1724     // the server thread poll the response slot until RPC completed
    1725     // TODO this could be replaced by a descheduling policy... [AG]
    1726     while( rpc->response ) asm volatile( "nop" );
    1727 
    1728     // restore IRQs
    1729         hal_restore_irq( sr_save );
    1730 
    1731     rpc_dmsg("\n[DMSG] %s : completed / client_cxy = %x / server_cxy = %x / cycle %d\n",
    1732     __FUNCTION__ , local_cxy , server_cxy , hal_time_stamp() );
    1733 
     1844    // send IPI to the remote core corresponding to the client core
     1845        dev_pic_send_ipi( server_cxy , core->lid );
     1846
     1847    // wait RPC completion:
     1848    // - busy waiting policy during kernel_init, or if threads cannot yield
     1849    // - block and deschedule in all other cases
     1850
     1851    if( (this->type == THREAD_IDLE) || (thread_can_yield() == false) ) // busy waiting
     1852    {
     1853
     1854grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s busy waiting after registering RPC\n"
     1855"        rpc = %d / server = %x / cycle %d\n",
     1856__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) ,
     1857rpc->index , server_cxy , hal_time_stamp() );
     1858
     1859        while( rpc->response ) hal_fixed_delay( 100 );
     1860   
     1861grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s exit after RPC completion\n",
     1862__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) );
     1863
     1864    }
     1865    else                                                              // block & deschedule
     1866    {
     1867
     1868grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s deschedule after registering RPC\n"
     1869"        rpc = %d / server = %x / cycle %d\n",
     1870__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) ,
     1871rpc->index , server_cxy , hal_time_stamp() );
     1872
     1873        thread_block( this , THREAD_BLOCKED_RPC );
     1874        sched_yield();
     1875
     1876grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %s resumes after RPC completion\n",
     1877__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , thread_type_str(this->type) );
     1878
     1879    }
     1880
     1881    // check response available
     1882    assert( (rpc->response == 0) , __FUNCTION__, "illegal RPC response\n" );
     1883
     1884    // acknowledge the IPI sent by the server
     1885    dev_pic_ack_ipi();
     1886   
    17341887}  // end rpc_send_sync()
    17351888
     
    17401893/***************************************************************************************/
    17411894
    1742 ///////////////////////////////////////////
    1743 void rpc_fifo_init( rpc_fifo_t * rpc_fifo )
    1744 {
    1745         rpc_fifo->count       = 0;
    1746         rpc_fifo->owner       = 0;
    1747         local_fifo_init( &rpc_fifo->fifo );
    1748 }
    1749 
    1750 /////////////////////////////////////////////
    1751 void rpc_execute_all( rpc_fifo_t * rpc_fifo )
    1752 {
    1753         xptr_t         xp;             // extended pointer on RPC descriptor
    1754         uint32_t       count;          // handled RPC request counter
    1755         thread_t     * this;           // pointer on this RPC thread
    1756     core_t       * core;           // pointer on core running this thread
    1757     rpc_desc_t   * desc;           // pointer on RPC descriptor
    1758     uint32_t       index;          // RPC index
    1759     cxy_t          client_cxy;     // client cluster identifier
    1760         error_t        error;
    1761      
    1762         this  = CURRENT_THREAD;
    1763     core  = this->core;   
    1764         count = 0;
    1765 
    1766     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    1767     __FUNCTION__, this->trdid, local_cxy, core->lid , hal_time_stamp() );
     1895////////////////
     1896void rpc_check()
     1897{
     1898    error_t         error;
     1899    thread_t      * thread; 
     1900    uint32_t        sr_save;
     1901
     1902    bool_t          found    = false;
     1903        thread_t      * this     = CURRENT_THREAD;
     1904    core_t        * core     = this->core;
     1905    scheduler_t   * sched    = &core->scheduler;
     1906        remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
     1907
     1908grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s / cycle %d\n",
     1909__FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() );
     1910
     1911    // interrupted thread not preemptable during RPC chek
     1912        hal_disable_irq( &sr_save );
     1913
     1914    // check RPC FIFO not empty and no RPC thread handling it 
     1915        if( (rpc_fifo->owner == 0) && (local_fifo_is_empty(rpc_fifo) == false) )
     1916    {
     1917        // search one non blocked RPC thread   
     1918        list_entry_t * iter;
     1919        LIST_FOREACH( &sched->k_root , iter )
     1920        {
     1921            thread = LIST_ELEMENT( iter , thread_t , sched_list );
     1922            if( (thread->type == THREAD_RPC) && (thread->blocked == 0 ) )
     1923            {
     1924                found = true;
     1925                break;
     1926            }
     1927        }
     1928
     1929        // create new RPC thread if not found   
     1930        if( found == false )                   
     1931        {
     1932            error = thread_kernel_create( &thread,
     1933                                          THREAD_RPC,
     1934                                                      &rpc_thread_func,
     1935                                          NULL,
     1936                                                      this->core->lid );
     1937                if( error )
     1938            {
     1939                printk("\n[WARNING] in %s : no memory for new RPC thread in cluster %x\n",
     1940                __FUNCTION__ , local_cxy );
     1941            }
     1942            else
     1943            {
     1944                // unblock created RPC thread
     1945                thread->blocked = 0;
     1946
     1947                // update core descriptor counter 
     1948                    hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 );
     1949
     1950grpc_dmsg("\n[DBG] %s : core [%x,%d] creates a new RPC thread %x / cycle %d\n",
     1951__FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );
     1952
     1953            }
     1954        }
     1955    }
     1956
     1957grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s deschedules / cycle %d\n",
     1958__FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() );
     1959
     1960    // interrupted thread deschedule always           
     1961        sched_yield();
     1962
     1963grpc_dmsg("\n[DBG] %s : core[%x,%d] / interrupted thread %s resume / cycle %d\n",
     1964__FUNCTION__, local_cxy, core->lid, thread_type_str(this->type), hal_time_stamp() );
     1965
     1966    // interrupted thread restore IRQs after resume
     1967        hal_restore_irq( sr_save );
     1968
     1969} // end rpc_check()
     1970
     1971
     1972//////////////////////
     1973void rpc_thread_func()
     1974{
     1975    uint32_t     count;       // handled RPC requests counter
     1976    error_t      empty;       // local RPC fifo state
     1977    xptr_t       desc_xp;     // extended pointer on RPC request
     1978    cxy_t        desc_cxy;    // RPC request cluster (client)
     1979    rpc_desc_t * desc_ptr;    // RPC request local pointer
     1980    uint32_t     index;       // RPC request index
     1981    uint32_t     responses;   // number of responses received by client
     1982    thread_t   * thread_ptr;  // local pointer on client thread
     1983    lid_t        core_lid;    // local index of client core
    17681984 
    1769     // handle up to CONFIG_RPC_PENDING_MAX requests before exit
    1770         do
    1771     {
    1772             error = local_fifo_get_item( &rpc_fifo->fifo, (uint64_t *)&xp );
    1773 
    1774                 if ( error == 0 )  // One RPC request successfully extracted from RPC_FIFO
     1985    // makes RPC thread not preemptable
     1986        hal_disable_irq( NULL );
     1987 
     1988        thread_t      * this     = CURRENT_THREAD;
     1989        remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
     1990
     1991    // two embedded loops:
     1992    // - external loop : "infinite" RPC thread
     1993    // - internal loop : handle up to CONFIG_RPC_PENDING_MAX RPC requests
     1994 
     1995        while(1)  // external loop
     1996        {
     1997        // try to take RPC_FIFO ownership
     1998        if( hal_atomic_test_set( &rpc_fifo->owner , this->trdid ) )
    17751999        {
    1776             // get client cluster identifier and pointer on RPC descriptor
    1777             client_cxy = (cxy_t)GET_CXY( xp );
    1778             desc       = (rpc_desc_t *)GET_PTR( xp );
    1779 
    1780             // get rpc index from RPC descriptor
    1781                 index = hal_remote_lw( XPTR( client_cxy , &desc->index ) );
    1782 
    1783             rpc_dmsg("\n[DMSG] %s : thread %x on core [%x,%d] / rpc = %d\n",
    1784                      __FUNCTION__ , this->trdid , core->lid , local_cxy , index );
    1785 
    1786             // call the relevant server function
    1787             rpc_server[index]( xp );
    1788 
    1789             // increment handled RPC counter
    1790                 count++;
    1791 
    1792             // notify RPC completion as required
    1793             hal_remote_atomic_add( XPTR( client_cxy , &desc->response ) , -1 );
    1794                 }
     2000            // initializes RPC requests counter
     2001            count = 0;
     2002
     2003            // acknowledge local IPI
     2004            dev_pic_ack_ipi();
     2005
     2006                    // exit internal loop in three cases:
     2007            // - RPC fifo is empty
     2008            // - ownership has been lost (because descheduling)
     2009            // - max number of RPCs is reached
     2010                while( 1 )  // internal loop
     2011            {
     2012                    empty = local_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp );
     2013
     2014                    if ( empty == 0 ) // one RPC request found
     2015                {
     2016                    // get client cluster and pointer on RPC descriptor
     2017                    desc_cxy = (cxy_t)GET_CXY( desc_xp );
     2018                    desc_ptr = (rpc_desc_t *)GET_PTR( desc_xp );
     2019
     2020                    // get rpc index from RPC descriptor
     2021                        index = hal_remote_lw( XPTR( desc_cxy , &desc_ptr->index ) );
     2022
     2023grpc_dmsg("\n[DBG] %s : core[%x,%d] / RPC thread %x / starts rpc %d / cycle %d\n",
     2024__FUNCTION__ , local_cxy , this->core->lid , this->trdid , index , hal_time_stamp() );
     2025
     2026                    // call the relevant server function
     2027                    rpc_server[index]( desc_xp );
     2028
     2029grpc_dmsg("\n[DBG] %s : core[%x,%d] / RPC thread %x / completes rpc %d / cycle %d\n",
     2030__FUNCTION__ , local_cxy , this->core->lid , this->trdid , index , hal_time_stamp() );
     2031
     2032                    // increment handled RPC counter
     2033                        count++;
     2034
     2035                    // decrement response counter in RPC descriptor
     2036                    responses = hal_remote_atomic_add(XPTR( desc_cxy, &desc_ptr->response ), -1);
     2037
     2038                    // unblock client thread  and send IPI to client core if last response
     2039                    if( responses == 1 )
     2040                    {
     2041                        // get pointer on client thread and unblock it
     2042                        thread_ptr = (thread_t *)hal_remote_lpt(XPTR(desc_cxy,&desc_ptr->thread));
     2043                        thread_unblock( XPTR(desc_cxy,thread_ptr) , THREAD_BLOCKED_RPC );
     2044
     2045                        hal_fence();
     2046
     2047                        // get client core lid and send IPI
     2048                        core_lid = hal_remote_lw(XPTR(desc_cxy, &desc_ptr->lid));
     2049                            dev_pic_send_ipi( desc_cxy , core_lid );
     2050                    }
     2051                        }
    17952052       
    1796                 // exit loop in three cases:
    1797         // - fifo is empty
    1798         // - look has been released (because descheduling)
    1799         // - max number of RPCs has been reached
    1800                 if( error ||
    1801             (rpc_fifo->owner != this->trdid) ||
    1802             (count > CONFIG_RPC_PENDING_MAX) ) break;
    1803         }
    1804     while( 1 );
    1805 
    1806     // update RPC_FIFO global counter
    1807         rpc_fifo->count += count;
    1808 
    1809 }  // end rpc_execute_all()
     2053                // chek exit condition
     2054                        if( local_fifo_is_empty( rpc_fifo )  ||
     2055                    (rpc_fifo->owner != this->trdid) ||
     2056                    (count >= CONFIG_RPC_PENDING_MAX) ) break;
     2057                } // end internal loop
     2058
     2059            // release rpc_fifo ownership if not lost
     2060            if( rpc_fifo->owner == this->trdid ) rpc_fifo->owner = 0;
     2061        }
     2062
     2063        // sucide if too many RPC threads in cluster
     2064        if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX )
     2065            {
     2066
     2067grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) suicide at cycle %d\n",
     2068__FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );
     2069
     2070            // update RPC threads counter
     2071                hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 );
     2072
     2073            // suicide
     2074                thread_exit();
     2075            }
     2076
     2077grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) deschedules / cycle %d\n",
     2078__FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );
     2079
     2080        // deschedule without blocking
     2081        sched_yield();
     2082
     2083grpc_dmsg("\n[DBG] %s : core[%x,%d] (RPC thread %x) resumes / cycle %d\n",
     2084__FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );
     2085
     2086        } // end external loop
     2087
     2088} // end rpc_thread_func()
     2089
     2090
     2091
     2092
     2093
     2094
     2095
     2096
     2097
     2098/* deprecated [AG] 29/09/2017
    18102099
    18112100////////////////////////////////////////////////////
    1812 error_t rpc_activate_thread( rpc_fifo_t * rpc_fifo )
     2101error_t rpc_activate_thread( remote_fifo_t * rpc_fifo )
    18132102{
    18142103        core_t      * core;
     
    18272116
    18282117    assert( (this->trdid == rpc_fifo->owner) , __FUNCTION__ ,
    1829           "calling thread is not RPC_FIFO owner\n" );
     2118    "calling thread is not RPC_FIFO owner\n" );
    18302119
    18312120    // makes the calling thread not preemptable
     
    18332122        hal_disable_irq( &sr_save );
    18342123
    1835     // search a free RPC thread (must be in THREAD_BLOCKED_IDLE state)   
     2124grpc_dmsg("\n[DBG] %s : core[%x,%d] enter at cycle %d\n",
     2125__FUNCTION__ , local_cxy , core->lid , hal_time_stamp() );
     2126
     2127    // search one non blocked RPC thread   
    18362128    list_entry_t * iter;
    18372129    LIST_FOREACH( &sched->k_root , iter )
    18382130    {
    18392131        thread = LIST_ELEMENT( iter , thread_t , sched_list );
    1840         if( (thread->type == THREAD_RPC) && (thread->blocked == THREAD_BLOCKED_IDLE ) )
     2132        if( (thread->type == THREAD_RPC) && (thread->blocked == 0 ) )
    18412133        {
    18422134            found = true;
     
    18452137    }
    18462138
    1847     if( found )                    // activate this idle RPC thread     
     2139    if( found == false )                    // create new RPC thread     
    18482140    {
    1849         // unblock it
    1850         thread->blocked = 0;
    1851 
    1852         rpc_dmsg("\n[DMSG] %s : activate RPC thread %x on core [%x,%d] / cycle %d\n",
    1853                           __FUNCTION__ , thread , core->gid , local_cxy , hal_time_stamp() );
    1854     }
    1855     else                           // create a new RPC thread
    1856     {
    1857         // create new thread
    18582141        error = thread_kernel_create( &thread,
    18592142                                      THREAD_RPC,
     
    18692152        }
    18702153
    1871         // unblock new thread
     2154        // unblock thread
    18722155        thread->blocked = 0;
    18732156
     
    18752158            hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 );
    18762159
    1877         rpc_dmsg("\n[DMSG] %s : create RPC thread %x on core [%x,%d] / cycle %d\n",
    1878                           __FUNCTION__ , thread->trdid, local_cxy, core->lid, hal_time_stamp() );
     2160grpc_dmsg("\n[DBG] %s : core [%x,%d] creates RPC thread %x at cycle %d\n",
     2161__FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );
     2162
    18792163    }
    1880 
    1881     // update owner in rpc_fifo
     2164    else                           // create a new RPC thread
     2165    {
     2166
     2167grpc_dmsg("\n[DBG] %s : core[%x,%d] activates RPC thread %x at cycle %d\n",
     2168__FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );
     2169
     2170    }
     2171
     2172    // update rpc_fifo owner
    18822173    rpc_fifo->owner = thread->trdid;
    18832174
    1884     // current thread switch to RPC thread 
    1885         sched_yield( thread );
     2175    // current thread deschedule           
     2176        sched_yield();
    18862177
    18872178    // restore IRQs for the calling thread
     
    18932184}  // end rpc_activate_thread()
    18942185
    1895 //////////////////
    1896 bool_t rpc_check()
     2186////////////////
     2187void rpc_check()
    18972188{
    18982189        thread_t   * this     = CURRENT_THREAD;
    1899         rpc_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
     2190        remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
    19002191    error_t      error;
    19012192
    1902     rpc_dmsg("\n[DMSG] %s : enter / thread %x / cluster %x / cycle %d\n",
    1903              __FUNCTION__ , this->trdid , local_cxy , hal_time_stamp() );
     2193grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / enter at cycle %d\n",
     2194__FUNCTION__ , local_cxy , this->core->lid , this->trdid , hal_time_stamp() );
    19042195
    19052196    // calling thread does nothing if light lock already taken or FIFO empty 
    19062197        if( (rpc_fifo->owner != 0) || (local_fifo_is_empty( &rpc_fifo->fifo )) )
    19072198    {
    1908         rpc_dmsg("\n[DMSG] %s : exit do nothing / thread %x / cluster %x / cycle %d\n",
    1909                  __FUNCTION__ , this->trdid , local_cxy , hal_time_stamp() );
    1910 
    1911         return false;
     2199
     2200grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / exit do nothing at cycle %d\n",
     2201__FUNCTION__ , local_cxy , this->core->lid , this->trdid , hal_time_stamp() );
     2202
     2203        return;
    19122204    }
    19132205
     
    19222214
    19232215            printk("\n[ERROR] in %s : no memory to create a RPC thread for core %d"
    1924                    " in cluster %x => do nothing\n",
    1925                    __FUNCTION__ , CURRENT_CORE->lid , local_cxy );
     2216            " in cluster %x => do nothing\n",
     2217            __FUNCTION__ , CURRENT_CORE->lid , local_cxy );
    19262218        }
    19272219
    1928         rpc_dmsg("\n[DMSG] %s : exit after RPC thread activation / "
    1929                  "thread %x / cluster %x / cycle %d\n",
    1930                  __FUNCTION__ , this->trdid , local_cxy , hal_time_stamp() );
    1931 
    1932         return true;
     2220        return;
    19332221    }
    19342222    else  // light lock taken by another thread
    19352223    {
    1936         rpc_dmsg("\n[DMSG] %s : exit do nothing / thread %x / cluster %x / cycle %d\n",
    1937                  __FUNCTION__ , this->trdid , local_cxy , hal_time_stamp() );
    1938 
    1939         return false;
     2224
     2225grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / exit do nothing at cycle %d\n",
     2226__FUNCTION__ , local_cxy , this->core->lid , this->trdid , hal_time_stamp() );
     2227
     2228        return;
    19402229    }
    19412230} // end rpc_check()
     
    19492238 
    19502239        thread_t   * this     = CURRENT_THREAD;
    1951         rpc_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
    1952 
    1953     rpc_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d] / cycle %d\n",
    1954              __FUNCTION__, this->trdid, local_cxy, this->core->lid, hal_time_stamp() );
    1955 
    1956     // this infinite loop is not preemptable
    1957     // the RPC thread deschedule only when the RPC_FIFO is empty
     2240        remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
     2241
    19582242        while(1)
    19592243        {
    19602244        // check fifo ownership (ownership should be given by rpc_activate()
    1961         if( this->trdid != rpc_fifo->owner )
    1962         {
    1963             panic("thread %x on core[%x,%d] not owner of RPC_FIFO",
    1964                   this->trdid, local_cxy, this->core->lid );
    1965         }
     2245        assert( (this->trdid == rpc_fifo->owner) , __FUNCTION__ ,
     2246        "thread %x on core[%x,%d] not owner of RPC_FIFO / owner = %x\n",
     2247        this->trdid, local_cxy, this->core->lid , rpc_fifo->owner );
    19662248 
    19672249        // executes pending RPC(s)
    19682250        rpc_execute_all( rpc_fifo );
    19692251
    1970         // release rpc_fifo ownership (can be lost during RPC execution)
     2252        // release rpc_fifo ownership if required
     2253        // (this ownership can be lost during RPC execution)
    19712254        if( rpc_fifo->owner == this->trdid ) rpc_fifo->owner = 0;
    19722255
    1973 
    1974         //  block and deschedule or sucide
    1975                 if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX )
     2256        //  deschedule or sucide
     2257                if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX )  // suicide
    19762258                {
    1977             rpc_dmsg("\n[DMSG] %s : RPC thread %x on core[%x,%d] suicide / cycle %d\n",
    1978                     __FUNCTION__, this->trdid, local_cxy, this->core->lid, hal_time_stamp() );
     2259
     2260grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / suicide at cycle %d\n",
     2261__FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );
    19792262
    19802263            // update core descriptor counter
     
    19842267                        thread_exit();
    19852268                }
    1986         else
     2269        else                                                       // deschedule
    19872270        {
    1988             rpc_dmsg("\n[DMSG] %s : RPC thread %x on core[%x,%d] blocks / cycle %d\n",
    1989                         __FUNCTION__, this->trdid, local_cxy, this->core->lid, hal_time_stamp() );
    1990 
    1991                     thread_block( this , THREAD_BLOCKED_IDLE );
    1992             sched_yield( NULL );
    1993 
    1994                     rpc_dmsg("\n[DMSG] %s : RPC thread %x wake up on core[%x,%d] / cycle %d\n",
    1995                 __FUNCTION__, this->trdid, local_cxy, this->core->lid, hal_time_stamp() );
     2271
     2272grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / deschedule at cycle %d\n",
     2273__FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );
     2274
     2275            sched_yield();
     2276
     2277grpc_dmsg("\n[DBG] %s : core[%x,%d] / thread %x / wake up at cycle %d\n",
     2278__FUNCTION__, local_cxy, this->core->lid, this->trdid, hal_time_stamp() );
     2279
    19962280        }
    19972281        } // end while
    19982282} // end rpc_thread_func()
    19992283
     2284*/
     2285
     2286
  • trunk/kernel/kern/rpc.h

    r401 r407  
    3030#include <bits.h>
    3131#include <spinlock.h>
     32#include <vseg.h>
    3233#include <remote_fifo.h>
    3334
     
    8283    RPC_MAPPER_MOVE_BUFFER     = 24,
    8384    RPC_MAPPER_GET_PAGE        = 25,
     85    RPC_VMM_CREATE_VSEG        = 26,
     86    RPC_SCHED_DISPLAY          = 27,
    8487
    8588    RPC_MAX_INDEX              = 30,
     
    100103typedef struct rpc_desc_s
    101104{
    102         rpc_index_t       index;       // index of requested RPC service
    103         volatile uint32_t response;    // response valid when 0
    104     uint64_t          args[10];    // input/output arguments buffer
     105        rpc_index_t         index;       /*! index of requested RPC service           */
     106        volatile uint32_t   response;    /*! response valid when 0                    */
     107    struct thread_s   * thread;      /*! local pointer on client thread           */
     108    uint32_t            lid;         /*! index of core running the calling thread */
     109    uint64_t            args[10];    /*! input/output arguments buffer            */
    105110}
    106111rpc_desc_t;
    107 
    108 /***********************************************************************************
    109  * This structure defines the RPC fifo, containing a remote_fifo, the owner RPC
    110  * thread TRDID (used as a light lock), and the intrumentation counter.
    111  *
    112  * Implementation note: the TRDID is a good owner identifier, because all
    113  * RPC threads in a given cluster belong to the same process_zero kernel process,
    114  * and RPC threads cannot have local index LTID = 0.
    115  **********************************************************************************/
    116 
    117 typedef struct rpc_fifo_s
    118 {
    119         trdid_t           owner;       // owner thread / 0 if no owner
    120         uint64_t          count;       // total number of received RPCs (instrumentation)
    121         remote_fifo_t     fifo;        // embedded remote fifo
    122 }
    123 rpc_fifo_t;
    124 
    125112
    126113/**********************************************************************************/
     
    149136
    150137/***********************************************************************************
    151  * This function initialises the local RPC fifo and the lock protecting readers.
    152  * The number of slots is defined by the CONFIG_REMOTE_FIFO_SLOTS parameter.
    153  * Each slot contains an extended pointer on the RPC descriptor.
    154  ***********************************************************************************
    155  * @ rf     : pointer on the local RPC fifo.
    156  **********************************************************************************/
    157 void rpc_fifo_init( rpc_fifo_t * rf );
    158 
    159 /***********************************************************************************
    160138 * This function is the entry point for RPC handling on the server side.
    161  * It is executed by a core receiving an IPI.
    162  * It checks the RPC fifo, try to take the light-lock and activates (or creates)
    163  * an RPC thread in case of success.
    164  ***********************************************************************************
    165  * @ returns true if success / false otherwise.
    166  **********************************************************************************/
    167 bool_t rpc_check();
     139 * It is executed by a core receiving an IPI, and each time the core enters,
     140 * or exit the kernel to handle .
     141 * It does nothing and return if the RPC_FIFO is empty.
     142 * The calling thread checks if it exist at least one non-blocked RPC thread,
     143 * creates a new RPC if required, and deschedule to allow the RPC thead to execute.
     144 **********************************************************************************/
     145void rpc_check();
    168146
    169147/***********************************************************************************
    170148 * This function contains the loop to execute all pending RPCs on the server side.
    171  * It should be called with irq disabled and after light lock acquisition.
     149 * It is called by the rpc_thread_func() function with irq disabled, and after
     150 * RPC_FIFO ownership acquisition.
    172151 ***********************************************************************************
    173152 * @ rpc_fifo  : pointer on the local RPC fifo
    174153 **********************************************************************************/
    175 void rpc_execute_all( rpc_fifo_t * rpc_fifo );
    176 
    177 /**********************************************************************************
    178  * This function is called by any thread running on any core in any cluster,
    179  * that detected a non-empty RPC_FIFO and got the RPC_FIFO ownership.
    180  * It activates one RPC thread, and immediately switches to the RPC thread.
    181  * It gets the first free RPC thread from the core free-list, or creates a new one
    182  * when the core free-list is empty.
    183  ***********************************************************************************
    184  * @ rpc_fifo : pointer on the non-empty RPC fifo.
    185  * @ return 0 if success / return ENOMEM if error.
    186  **********************************************************************************/
    187 error_t rpc_activate_thread( rpc_fifo_t * rpc_fifo );
    188 
    189 /***********************************************************************************
    190  * This function contains the infinite loop executed by each RPC thread.
     154void rpc_execute_all( remote_fifo_t * rpc_fifo );
     155
     156/***********************************************************************************
     157 * This function contains the infinite loop executed by a RPC thread.
    191158 **********************************************************************************/
    192159void rpc_thread_func();
     
    266233 ***********************************************************************************
    267234 * @ cxy       : server cluster identifier.
    268  * @ attr      : [in]  pointer on pthread_attr_t in client cluster.
    269  * @ thread_xp : [out] pointer on buffer for thread extended pointer.
     235 * @ attr      : [in]  local pointer on pthread_attr_t in client cluster.
     236 * @ thread_xp : [out] buffer for thread extended pointer.
    270237 * @ error     : [out] error status (0 if success).
    271238 **********************************************************************************/
     
    274241                                    void                  * start_func,
    275242                                    void                  * start_arg,
    276                                     struct pthread_attr_s * attr,
     243                                    pthread_attr_t        * attr,
    277244                                    xptr_t                * thread_xp,
    278245                                    error_t               * error );
     
    499466
    500467/***********************************************************************************
    501  * [21] The RPC_VMM_GET_PTE returns in the "ppn" and "attr" arguments the PTE value
    502  * for a given VPN in a given process.
     468 * [21] The RPC_VMM_GET_PTE returns in the <ppn> and <attr> arguments the PTE value
     469 * for a given <vpn> in a given <process> (page_fault or copy_on_write event).
    503470 * The server cluster is supposed to be the reference cluster, and the vseg
    504471 * containing the VPN must be registered in the reference VMM.
    505  * It returns an error if physical memory cannot be allocated for the PTE2,
     472 * It returns an error if physical memory cannot be allocated for the missing PTE2,
    506473 * or for the missing page itself.
    507474 ***********************************************************************************
     
    509476 * @ process : [in]   pointer on process descriptor in server cluster.
    510477 * @ vaddr   : [in]   virtual address to be searched.
     478 * @ cow     : [in]   "copy_on_write" event if true / "page_fault" event if false.
    511479 * @ attr    : [out]  address of buffer for attributes.
    512480 * @ ppn     : [out]  address of buffer for PPN.
     
    516484                             struct process_s * process,
    517485                             vpn_t              vpn,
     486                             bool_t             cow,
    518487                             uint32_t         * attr,
    519488                             ppn_t            * ppn,
     
    601570void rpc_mapper_get_page_server( xptr_t xp );
    602571
     572/***********************************************************************************
     573 * [26] The RPC_VMM_CREATE_VSEG allows a client thread to request the remote
     574 * reference cluster of a given process to allocate and register in the reference
     575 * process VMM a new vseg descriptor.
     576 * On the server side, this RPC uses the vmm_create_vseg() function, and returns
     577 * to the client the local pointer on the created vseg descriptor.
     578 ***********************************************************************************
     579 * @ cxy         : server cluster identifier.
     580 * @ process     : [in]  local pointer on process descriptor in server.
     581 * @ type        : [in]  vseg type.
     582 * @ base        : [in]  base address (unused for dynamically allocated vsegs).
     583 * @ size        : [in]  number of bytes.
     584 * @ file_offset : [in]  offset in file (for CODE, DATA, FILE types).
     585 * @ file_size   : [in]  can be smaller than size for DATA type.
     586 * @ mapper_xp   : [in]  extended pointer on mapper (for CODE, DATA, FILE types).
     587 * @ vseg_cxy    : [in]  target cluster for mapping (if not data type).
     588 * @ vseg        : [out] local pointer on vseg descriptor / NULL if failure.
     589 **********************************************************************************/
     590void rpc_vmm_create_vseg_client( cxy_t              cxy,
     591                                 struct process_s * process,
     592                                 vseg_type_t        type,
     593                                 intptr_t           base,
     594                                 uint32_t           size,
     595                                 uint32_t           file_offset,
     596                                 uint32_t           file_size,
     597                                 xptr_t             mapper_xp,
     598                                 cxy_t              vseg_cxy,
     599                                 struct vseg_s   ** vseg );
     600
     601void rpc_vmm_create_vseg_server( xptr_t xp );
     602
     603/***********************************************************************************
     604 * [27] The RPC_SCHED_DISPLAY allows a client thread to request the display
     605 * of a remote scheduler, identified by the <lid> argument.
     606 ***********************************************************************************
     607 * @ cxy         : server cluster identifier.
     608 * @ lid         : [in]  local index of target core in client cluster.
     609 **********************************************************************************/
     610void rpc_sched_display_client( cxy_t              cxy,
     611                               lid_t              lid );
     612
     613void rpc_sched_display_server( xptr_t xp );
     614
    603615#endif
  • trunk/kernel/kern/scheduler.c

    r406 r407  
    2424#include <kernel_config.h>
    2525#include <hal_types.h>
     26#include <hal_switch.h>
    2627#include <hal_irqmask.h>
    2728#include <hal_context.h>
     
    3839
    3940extern chdev_directory_t    chdev_dir;            // allocated in kernel_init.c file
    40 
     41extern uint32_t             switch_save_sr[];     // allocated in kernel_init.c file
    4142
    4243////////////////////////////////
     
    127128}  // end sched_remove()
    128129
    129 ///////////////////////////////////////////
    130 void sched_kill_thread( thread_t * thread )
    131 {
    132     // check thread locks
    133     if( thread_can_yield() == false )
    134     {
    135         panic("thread %x in process %x on core[%x][%d]"
    136               " did not released all locks",
    137               thread->trdid , thread->process->pid,
    138               local_cxy , thread->core->lid );
    139     }
    140 
    141     // remove thread from scheduler
    142     sched_remove_thread( thread );
    143 
    144     // reset the THREAD_SIG_KILL signal
    145     thread_reset_signal( thread , THREAD_SIG_KILL );
    146 
    147 }  // end sched_kill_thread()
    148 
    149130////////////////////////////////////////
    150131thread_t * sched_select( core_t * core )
     
    154135    scheduler_t * sched = &core->scheduler;
    155136
    156     sched_dmsg("\n[DMSG] %s : enter core[%x,%d] / cycle %d\n",
    157     __FUNCTION__ , local_cxy , core->lid , hal_time_stamp() );
    158 
    159137    // take lock protecting sheduler lists
    160138    spinlock_lock( &sched->lock );
     
    163141    list_entry_t * last;
    164142
    165     // first : scan the kernel threads list if not empty
     143    // first loop : scan the kernel threads list if not empty
    166144    if( list_is_empty( &sched->k_root ) == false )
    167145    {
     
    179157            thread = LIST_ELEMENT( current , thread_t , sched_list );
    180158
    181             // return thread if not idle_thread and runnable
    182             if( (thread->type != THREAD_IDLE) && (thread->blocked == 0) )
     159            // analyse kernel thread type
     160            switch( thread->type )
    183161            {
    184                 // release lock
    185                 spinlock_unlock( &sched->lock );
    186 
    187                 sched_dmsg("\n[DMSG] %s : exit core[%x,%d] / k_thread = %x / cycle %d\n",
    188                 __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );
    189 
    190                 return thread;
    191             }
     162                case THREAD_IDLE: // skip IDLE thread
     163                break;
     164
     165                case THREAD_RPC:  // RPC thread if non blocked and FIFO non-empty
     166                if( (thread->blocked == 0) &&
     167                    (local_fifo_is_empty( &LOCAL_CLUSTER->rpc_fifo ) == 0) )
     168                {
     169                    spinlock_unlock( &sched->lock );
     170                    return thread;
     171                }
     172                break;
     173
     174                default:          // DEV thread if non blocked
     175                if( thread->blocked == 0 )
     176                {
     177                    spinlock_unlock( &sched->lock );
     178                    return thread;
     179                }
     180                break;
     181            }  // end switch type
    192182        }
    193183        while( current != last );
    194184    }
    195185
    196     // second : scan the user threads list if not empty
     186    // second loop : scan the user threads list if not empty
    197187    if( list_is_empty( &sched->u_root ) == false )
    198188    {
     
    213203            if( thread->blocked == 0 )
    214204            {
    215                 // release lock
    216205                spinlock_unlock( &sched->lock );
    217 
    218                 sched_dmsg("\n[DMSG] %s : exit core[%x,%d] / u_thread = %x / cycle %d\n",
    219                 __FUNCTION__ , local_cxy , core->lid , thread->trdid , hal_time_stamp() );
    220206                return thread;
    221207            }
     
    224210    }
    225211
    226     // release lock
     212    // third : return idle thread if no runnable thread
    227213    spinlock_unlock( &sched->lock );
    228 
    229     sched_dmsg("\n[DMSG] %s : exit core[%x,%d] / idle = %x / cycle %d\n",
    230     __FUNCTION__ , local_cxy , core->lid , sched->idle->trdid , hal_time_stamp() );
    231 
    232     // third : return idle thread if no runnable thread
    233214    return sched->idle;
    234215
    235216}  // end sched_select()
     217
     218///////////////////////////////////////////
     219void sched_kill_thread( thread_t * thread )
     220{
     221    // check locks
     222    if( thread_can_yield() == false )
     223    {
     224        panic("locks not released for thread %x in process %x on core[%x][%d]",
     225        thread->trdid , thread->process->pid, local_cxy , thread->core->lid );
     226    }
     227
     228    // remove thread from scheduler
     229    sched_remove_thread( thread );
     230
     231    // reset the THREAD_SIG_KILL signal
     232    thread_reset_signal( thread , THREAD_SIG_KILL );
     233
     234    // detached thread can suicide
     235    if( thread->signals & THREAD_SIG_SUICIDE )
     236    {
     237        assert( (thread->flags & THREAD_FLAG_DETACHED), __FUNCTION__,
     238        "thread must be detached in case of suicide\n" );
     239
     240        // remove thread from process
     241        process_remove_thread( thread );
     242
     243        // release memory for thread descriptor
     244        thread_destroy( thread );
     245    }
     246}  // end sched_kill_thread()
    236247
    237248//////////////////////////////////////////
     
    242253    scheduler_t  * sched = &core->scheduler;
    243254
    244     sched_dmsg("\n[DMSG] %s : enter / thread %x on core[%x,%d]\n",
    245     __FUNCTION__, CURRENT_THREAD->trdid , local_cxy , core->lid );
    246 
    247255    // take lock protecting threads lists
    248256    spinlock_lock( &sched->lock );
     
    252260    {
    253261        thread = LIST_ELEMENT( iter , thread_t , sched_list );
    254         if( thread->signals & THREAD_SIG_KILL ) sched_kill_thread( thread );
     262        if( thread->signals ) sched_kill_thread( thread );
    255263    }
    256264
     
    259267    {
    260268        thread = LIST_ELEMENT( iter , thread_t , sched_list );
    261         if( thread->signals & THREAD_SIG_KILL ) sched_kill_thread( thread );
     269        if( thread->signals ) sched_kill_thread( thread );
    262270    }
    263271
     
    265273    spinlock_unlock( &sched->lock );
    266274
    267     sched_dmsg("\n[DMSG] %s : exit / thread %x on core[%x,%d]\n",
    268     __FUNCTION__, CURRENT_THREAD->trdid , local_cxy , core->lid );
    269 
    270275} // end sched_handle_signals()
    271276
    272 ///////////////////////////////////
    273 void sched_yield( thread_t * next )
    274 {
    275     reg_t         sr_save;
    276 
     277//////////////////////////////////////
     278void sched_update( thread_t * current,
     279                   thread_t * next )
     280{
     281    scheduler_t * sched = &current->core->scheduler;
     282
     283    if( current->type == THREAD_USER ) sched->u_last = &current->sched_list;
     284    else                               sched->k_last = &current->sched_list;
     285
     286    sched->current = next;
     287}
     288
     289//////////////////
     290void sched_yield()
     291{
     292    thread_t    * next;
    277293    thread_t    * current = CURRENT_THREAD;
    278     core_t      * core    = current->core;
    279     scheduler_t * sched   = &core->scheduler;
    280 
    281     sched_dmsg("\n[DMSG] %s : thread %x on core[%x,%d] enter / cycle %d\n",
    282     __FUNCTION__, current->trdid, local_cxy, core->lid, hal_time_stamp() );
     294 
     295#if( CONFIG_SCHED_DEBUG & 0x1 )
     296if( hal_time_stamp() > CONFIG_SCHED_DEBUG ) sched_display( current->core->lid );
     297#endif
    283298
    284299    // delay the yield if current thread has locks
    285     if( thread_can_yield() == false )
     300    if( (current->local_locks != 0) || (current->remote_locks != 0) )
    286301    {
    287302        current->flags |= THREAD_FLAG_SCHED;
     
    289304    }
    290305
    291     // first loop on all threads to handle pending signals
    292     sched_handle_signals( core );
    293 
    294     // second loop on threads to select next thread if required
    295     if( next == NULL ) next = sched_select( core );
     306    // loop on threads to select next thread
     307    next = sched_select( current->core );
    296308
    297309    // check next thread attached to same core as the calling thread
    298     assert( (next->core == current->core), __FUNCTION__ , "next core != current core\n");
    299 
    300     // check next thread not blocked
    301     assert( (next->blocked == 0), __FUNCTION__ , "next thread is blocked\n");
     310    assert( (next->core == current->core), __FUNCTION__ ,
     311    "next core != current core\n");
     312
     313    // check next thread not blocked when type != IDLE
     314    assert( (next->blocked == 0) || (next->type = THREAD_IDLE) , __FUNCTION__ ,
     315    "next thread %x (%s) is blocked on core[%x,%d]\n",
     316    next->trdid , thread_type_str(next->type) , local_cxy , current->core->lid );
    302317
    303318    // switch contexts and update scheduler state if next != current
    304319        if( next != current )
    305320    {
    306         sched_dmsg("\n[DMSG] %s : trd %x (%s) on core[%x,%d] => trd %x (%s) / cycle %d\n",
    307         __FUNCTION__, current->trdid, thread_type_str(current->type), local_cxy, core->lid,
    308         next->trdid, thread_type_str(next->type), hal_time_stamp() );
    309 
    310         // calling thread desactivate IRQs
    311         hal_disable_irq( &sr_save );
     321        // current thread desactivate IRQs
     322        hal_disable_irq( &switch_save_sr[CURRENT_THREAD->core->lid] );
     323
     324sched_dmsg("\n[DBG] %s : core[%x,%d] / trd %x (%s) (%x,%x) => trd %x (%s) (%x,%x) / cycle %d\n",
     325__FUNCTION__, local_cxy, current->core->lid,
     326current, thread_type_str(current->type), current->process->pid, current->trdid,
     327next   , thread_type_str(next->type)   , next->process->pid   , next->trdid,
     328hal_time_stamp() );
    312329
    313330        // update scheduler
    314         if( current->type == THREAD_USER ) sched->u_last = &current->sched_list;
    315         else                               sched->k_last = &current->sched_list;
    316         sched->current = next;
    317 
    318         // handle FPU
     331        sched_update( current , next );
     332
     333        // handle FPU ownership
    319334            if( next->type == THREAD_USER )
    320335        {
    321                 if( next == core->fpu_owner )  hal_fpu_enable();
    322                 else                           hal_fpu_disable();
     336                if( next == current->core->fpu_owner )  hal_fpu_enable();
     337                else                                    hal_fpu_disable();
    323338        }
    324339
    325         // switch contexts
    326         hal_cpu_context_switch( current , next );
    327 
    328         // restore IRQs when calling thread resume
    329         hal_restore_irq( sr_save );
     340        // switch CPU from calling thread context to new thread context
     341        hal_do_cpu_switch( current->cpu_context, next->cpu_context );
     342
     343        // restore IRQs when next thread resume
     344        hal_restore_irq( switch_save_sr[CURRENT_THREAD->core->lid] );
    330345    }
    331346    else
    332347    {
    333         sched_dmsg("\n[DMSG] %s : thread %x on core[%x,%d] continue / cycle %d\n",
    334         __FUNCTION__, current->trdid, local_cxy, core->lid, hal_time_stamp() );
     348
     349sched_dmsg("\n[DBG] %s : core[%x,%d] / thread %x (%s) continue / cycle %d\n",
     350__FUNCTION__, local_cxy, current->core->lid, current->trdid,
     351thread_type_str(current->type) ,hal_time_stamp() );
     352
    335353    }
    336354}  // end sched_yield()
    337355
    338 ////////////////////
    339 void sched_display()
     356
     357///////////////////////////////
     358void sched_display( lid_t lid )
    340359{
    341360    list_entry_t * iter;
     
    343362    uint32_t       save_sr;
    344363
    345     thread_t     * current = CURRENT_THREAD;
    346     core_t       * core    = current->core;
     364    if( lid >= LOCAL_CLUSTER->cores_nr )
     365    {
     366        printk("\n[ERROR] in %s : illegal local index %d in cluster %x\n",
     367        __FUNCTION__ , lid , local_cxy );
     368        return;
     369    }
     370
     371    core_t       * core    = &LOCAL_CLUSTER->core_tbl[lid];
    347372    scheduler_t  * sched   = &core->scheduler;
    348373   
    349374    // get pointers on TXT0 chdev
    350     xptr_t    txt0_xp  = chdev_dir.txt[0];
     375    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
    351376    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
    352377    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     
    358383    remote_spinlock_lock_busy( lock_xp , &save_sr );
    359384
    360     nolock_printk("\n***** scheduler state for core[%x,%d]\n"
    361            "kernel_threads = %d / user_threads = %d / current = %x\n",
    362             local_cxy , core->lid,
    363             sched->k_threads_nr, sched->u_threads_nr, sched->current->trdid );
     385    nolock_printk("\n***** scheduler state for core[%x,%d] at cycle %d\n"
     386           "kernel_threads = %d / user_threads = %d / current = %x / idle = %x\n",
     387            local_cxy , core->lid, hal_time_stamp(),
     388            sched->k_threads_nr, sched->u_threads_nr,
     389            sched->current->trdid , sched->idle->trdid );
    364390
    365391    // display kernel threads
     
    367393    {
    368394        thread = LIST_ELEMENT( iter , thread_t , sched_list );
    369         nolock_printk(" - type = %s / trdid = %x / pid = %x / func = %x / blocked_vect = %x\n",
     395        nolock_printk(" - type = %s / trdid = %X / pid = %X / func = %X / blocked = %X\n",
    370396        thread_type_str( thread->type ), thread->trdid, thread->process->pid,
    371397        thread->entry_func, thread->blocked );
     
    376402    {
    377403        thread = LIST_ELEMENT( iter , thread_t , sched_list );
    378         nolock_printk(" - type = %s / trdid = %x / pid = %x / func = %x / blocked_vect = %x\n",
     404        nolock_printk(" - type = %s / trdid = %X / pid = %X / func = %X / blocked = %X\n",
    379405        thread_type_str( thread->type ), thread->trdid, thread->process->pid,
    380406        thread->entry_func, thread->blocked );
  • trunk/kernel/kern/scheduler.h

    r296 r407  
    7474
    7575/*********************************************************************************************
    76  * This function handles pending signals for all registered threads, and tries to make
    77  * a context switch for the core running the calling thread.
    78  * - If the <next> argument is not NULL, this next thread starts execution.
    79  * - If <next> is NULL, it calls the sched_select() function. If there is a runable thread
    80  *   (other than current thread or idle thread), this selected thread starts execution.
    81  * - If there is no other runable thread, the calling thread continues execution.
    82  * - If there is no runable thread, the idle thread is executed.
    83  *********************************************************************************************
    84  * @ next  : local pointer on next thread to run / call sched_select() if NULL.
     76 * This function handles pending signals for all registered threads, and calls the
     77 * sched_select() function to make a context switch for the core running the calling thread.
    8578 ********************************************************************************************/
    86 void sched_yield( struct thread_s * next );
     79void sched_yield();
    8780
    8881/*********************************************************************************************
     
    9689/*********************************************************************************************
    9790 * This function is used by the scheduler of a given core to actually kill a thread that has
    98  * the SIG_KILL signal set (following a thread_exit() or a thread_kill() event).
     91 * the SIG_KILL / SIG_SUICIDE signal set (following a thread_exit() or a thread_kill() event).
    9992 * - It checks that the thread has released all locks => panic otherwise...
    100  * - It detach the thread from the local process descriptor.
    10193 * - It removes the thread from the scheduler.
    102  * - It release physical memory allocated for thread descriptor.
     94 * - It reset the SIG_KILL signal to acknoledge the killer.
     95 * - In case of SIG_SUCIDE, it removes the detached thread from its process, and destroys it.
    10396 *********************************************************************************************
    10497 * @ thread  : local pointer on the thread descriptor.
     
    121114
    122115/*********************************************************************************************
    123  * This function display the internal state of the calling core scheduler.
     116 * This function display the internal state of the local core identified by its <lid>.
     117 *********************************************************************************************
     118 * @ lid      : local index of target core.
    124119 ********************************************************************************************/
    125 void sched_display();
     120void sched_display( lid_t lid );
    126121
    127122
  • trunk/kernel/kern/signal.c

    r406 r407  
    4444                hal_atomic_or( &thread->signals , (1 << sig_id) );
    4545
    46         signal_dmsg("\n[DMSG] %s : thread %x in process %x received signal %d\n",
     46        signal_dmsg("\n[DBG] %s : thread %x in process %x received signal %d\n",
    4747                    __FUNCTION__, thread->trdid , process->pid , sig_id );
    4848        }
     
    5959        thread_s * this = CURRENT_THREAD;
    6060
    61         printk("\n[DMSG] %s : threadReceived signal %d, pid %d, tid %x, core %d  [ KILLED ]\n",
     61        printk("\n[DBG] %s : threadReceived signal %d, pid %d, tid %x, core %d  [ KILLED ]\n",
    6262               sig,
    6363               this->process->pid,
  • trunk/kernel/kern/thread.c

    r406 r407  
    5757    else if( type == THREAD_RPC    ) return "RPC";
    5858    else if( type == THREAD_DEV    ) return "DEV";
    59     else if( type == THREAD_KERNEL ) return "KER";
    6059    else if( type == THREAD_IDLE   ) return "IDL";
    6160    else                             return "undefined";
     
    153152    }
    154153
     154    // compute thread descriptor size without kernel stack
     155    uint32_t desc_size = (intptr_t)(&thread->signature) - (intptr_t)thread + 4;
     156
    155157        // Initialize new thread descriptor
    156158    thread->trdid           = trdid;
     
    170172    thread->u_stack_base    = u_stack_base;
    171173    thread->u_stack_size    = u_stack_size;
    172     thread->k_stack_base    = (intptr_t)thread;
    173     thread->k_stack_size    = CONFIG_THREAD_DESC_SIZE;
     174    thread->k_stack_base    = (intptr_t)thread + desc_size;
     175    thread->k_stack_size    = CONFIG_THREAD_DESC_SIZE - desc_size;
    174176
    175177    thread->entry_func      = func;         // thread entry point
     
    178180    thread->signals         = 0;            // no pending signal
    179181    thread->errno           = 0;            // no error detected
    180     thread->fork_user       = 0;            // no fork required
    181     thread->fork_cxy        = 0;
     182    thread->fork_user       = 0;            // no user defined placement for fork
     183    thread->fork_cxy        = 0;            // user defined target cluster for fork
    182184
    183185    // thread blocked
     
    221223    vseg_t       * vseg;         // stack vseg
    222224
    223     thread_dmsg("\n[DMSG] %s : enters for process %x\n", __FUNCTION__ , pid );
     225    assert( (attr != NULL) , __FUNCTION__, "pthread attributes must be defined" );
    224226
    225227    // get process descriptor local copy
     
    234236
    235237    // select a target core in local cluster
    236     if( attr->attributes & PT_ATTR_CORE_DEFINED ) core_lid = attr->lid;
    237     else                                          core_lid = cluster_select_local_core();
    238 
    239     // check core local index
    240     if( core_lid >= LOCAL_CLUSTER->cores_nr )
    241     {
    242             printk("\n[ERROR] in %s : illegal core index attribute = %d\n",
    243                __FUNCTION__ , core_lid );
    244 
    245         return EINVAL;
     238    if( attr->attributes & PT_ATTR_CORE_DEFINED )
     239    {
     240        core_lid = attr->lid;
     241        if( core_lid >= LOCAL_CLUSTER->cores_nr )
     242        {
     243                printk("\n[ERROR] in %s : illegal core index attribute = %d\n",
     244            __FUNCTION__ , core_lid );
     245            return EINVAL;
     246        }
     247    }
     248    else
     249    {
     250        core_lid = cluster_select_local_core();
    246251    }
    247252
    248253    // allocate a stack from local VMM
    249     vseg = vmm_create_vseg( process, 0 , 0 , VSEG_TYPE_STACK );
     254    vseg = vmm_create_vseg( process,
     255                            VSEG_TYPE_STACK,
     256                            0,                 // size unused
     257                            0,                 // length unused
     258                            0,                 // file_offset unused
     259                            0,                 // file_size unused
     260                            XPTR_NULL,         // mapper_xp unused
     261                            local_cxy );
    250262
    251263    if( vseg == NULL )
     
    287299
    288300    // set DETACHED flag if required
    289     if( attr->attributes & PT_ATTR_DETACH ) thread->flags |= THREAD_FLAG_DETACHED;
     301    if( attr->attributes & PT_ATTR_DETACH )
     302    {
     303        thread->flags |= THREAD_FLAG_DETACHED;
     304    }
    290305
    291306    // allocate & initialize CPU context
    292         error = hal_cpu_context_create( thread );
    293 
    294     if( error )
     307        if( hal_cpu_context_create( thread ) )
    295308    {
    296309            printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
     
    300313    }
    301314
    302     // allocate & initialize FPU context
    303     error = hal_fpu_context_create( thread );
    304 
    305     if( error )
     315    // allocate  FPU context
     316    if( hal_fpu_context_alloc( thread ) )
    306317    {
    307318            printk("\n[ERROR] in %s : cannot create FPU context\n", __FUNCTION__ );
     
    311322    }
    312323
    313     thread_dmsg("\n[DMSG] %s : exit / trdid = %x / process %x / core = %d\n",
    314                 __FUNCTION__ , thread->trdid , process->pid , core_lid );
     324thread_dmsg("\n[DBG] %s : core[%x,%d] exit / trdid = %x / process %x / core = %d\n",
     325__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid,
     326thread->trdid , process->pid , core_lid );
    315327
    316328    *new_thread = thread;
     
    319331}  // end thread_user_create()
    320332
    321 //////////////////////////////////////////////
     333////////////////////////////////////////////////////
    322334error_t thread_user_fork( process_t * process,
     335                          intptr_t    stack_base,
     336                          uint32_t    stack_size,
    323337                          thread_t ** new_thread )
    324338{
    325339    error_t        error;
    326         thread_t     * thread;       // pointer on new thread descriptor
     340        thread_t     * child;       // pointer on new thread descriptor
    327341    lid_t          core_lid;     // selected core local index
    328         vseg_t       * vseg;         // stack vseg
    329 
    330     thread_dmsg("\n[DMSG] %s : enters\n", __FUNCTION__ );
    331 
    332     // allocate a stack from local VMM
    333     vseg = vmm_create_vseg( process, 0 , 0 , VSEG_TYPE_STACK );
    334 
    335     if( vseg == NULL )
    336     {
    337             printk("\n[ERROR] in %s : cannot create stack vseg\n", __FUNCTION__ );
    338                 return ENOMEM;
    339     }
     342
     343thread_dmsg("\n[DBG] %s : core[%x,%d] enters\n",
     344__FUNCTION__ , local_cxy , core_lid );
    340345
    341346    // select a target core in local cluster
    342347    core_lid = cluster_select_local_core();
    343348
    344     // get pointer on calling thread descriptor
    345     thread_t * this = CURRENT_THREAD;
     349    // get pointer on parent thread descriptor
     350    thread_t * parent = CURRENT_THREAD;
    346351
    347352    // allocate memory for new thread descriptor
    348     thread = thread_alloc();
    349 
    350     if( thread == NULL )
     353    child = thread_alloc();
     354
     355    if( child == NULL )
    351356    {
    352357        printk("\n[ERROR] in %s : cannot allocate new thread\n", __FUNCTION__ );
    353         vmm_remove_vseg( vseg );
    354358        return ENOMEM;
    355359    }
    356360
    357361    // initialize thread descriptor
    358     error = thread_init( thread,
     362    error = thread_init( child,
    359363                         process,
    360364                         THREAD_USER,
    361                          this->entry_func,
    362                          this->entry_args,
     365                         parent->entry_func,
     366                         parent->entry_args,
    363367                         core_lid,
    364                          vseg->min,
    365                          vseg->max - vseg->min );
     368                         stack_base,
     369                         stack_size );
    366370
    367371    if( error )
    368372    {
    369373            printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ );
    370         vmm_remove_vseg( vseg );
    371         thread_release( thread );
     374        thread_release( child );
    372375        return EINVAL;
    373376    }
    374377
    375     // set ATTACHED flag if set in this thread
    376     if( this->flags & THREAD_FLAG_DETACHED ) thread->flags = THREAD_FLAG_DETACHED;
    377 
    378     // allocate & initialize CPU context from calling thread
    379         error = hal_cpu_context_copy( thread , this );
    380 
    381     if( error )
    382     {
    383             printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
    384         vmm_remove_vseg( vseg );
    385         thread_release( thread );
     378    // return child pointer
     379    *new_thread = child;
     380
     381    // set DETACHED flag if required
     382    if( parent->flags & THREAD_FLAG_DETACHED ) child->flags = THREAD_FLAG_DETACHED;
     383
     384    // allocate CPU context for child thread
     385        if( hal_cpu_context_alloc( child ) )
     386    {
     387            printk("\n[ERROR] in %s : cannot allocate CPU context\n", __FUNCTION__ );
     388        thread_release( child );
    386389        return ENOMEM;
    387390    }
    388391
    389     // allocate & initialize FPU context from calling thread
    390         error = hal_fpu_context_copy( thread , this );
    391 
    392     if( error )
    393     {
    394             printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ );
    395         vmm_remove_vseg( vseg );
    396         thread_release( thread );
     392    // allocate FPU context for child thread
     393        if( hal_fpu_context_alloc( child ) )
     394    {
     395            printk("\n[ERROR] in %s : cannot allocate FPU context\n", __FUNCTION__ );
     396        thread_release( child );
    397397        return ENOMEM;
    398398    }
    399399
    400     thread_dmsg("\n[DMSG] %s : exit / thread %x for process %x on core %d in cluster %x\n",
    401                  __FUNCTION__, thread->trdid, process->pid, core_lid, local_cxy );
    402 
    403     *new_thread = thread;
     400    // copy kernel stack content from parent to child thread descriptor
     401    void * dst = (void *)(&child->signature) + 4;
     402    void * src = (void *)(&parent->signature) + 4;
     403    memcpy( dst , src , parent->k_stack_size );
     404
     405thread_dmsg("\n[DBG] %s : core[%x,%d] exit / created main thread %x for process %x\n",
     406__FUNCTION__, local_cxy , core_lid , child->trdid , process->pid );
     407
    404408        return 0;
    405409
     
    416420        thread_t     * thread;       // pointer on new thread descriptor
    417421
    418     thread_dmsg("\n[DMSG] %s : enter / for type %s on core[%x,%d] / cycle %d\n",
    419     __FUNCTION__ , thread_type_str( type ) , local_cxy , core_lid , hal_time_stamp() );
    420 
    421     assert( ( (type == THREAD_KERNEL) || (type == THREAD_RPC) ||
    422               (type == THREAD_IDLE)   || (type == THREAD_DEV) ) ,
    423               __FUNCTION__ , "illegal thread type" );
     422thread_dmsg("\n[DBG] %s : core[%x,%d] enters / type % / cycle %d\n",
     423__FUNCTION__ , local_cxy , core_lid , thread_type_str( type ) , hal_time_stamp() );
     424
     425    assert( ( (type == THREAD_IDLE) || (type == THREAD_RPC) || (type == THREAD_DEV) ) ,
     426    __FUNCTION__ , "illegal thread type" );
    424427
    425428    assert( (core_lid < LOCAL_CLUSTER->cores_nr) ,
     
    449452        hal_cpu_context_create( thread );
    450453
    451     thread_dmsg("\n[DMSG] %s : exit / trdid = %x / type = %s / core = [%x,%d] / cycle %d\n",
    452     __FUNCTION__ , thread->trdid , thread_type_str(type) ,
    453     local_cxy , core_lid , hal_time_stamp() );
     454thread_dmsg("\n[DBG] %s : core = [%x,%d] exit / trdid = %x / type %s / cycle %d\n",
     455__FUNCTION__, local_cxy, core_lid, thread->trdid, thread_type_str(type), hal_time_stamp() );
    454456
    455457    *new_thread = thread;
     
    465467                                            lid_t           core_lid )
    466468{
    467     assert( ( (type == THREAD_KERNEL) || (type == THREAD_RPC) ||
    468               (type == THREAD_IDLE)   || (type == THREAD_DEV) ) ,
    469               __FUNCTION__ , "illegal thread type" );
    470 
    471     if( core_lid >= LOCAL_CLUSTER->cores_nr )
    472     {
    473         panic("illegal core_lid / cores = %d / lid = %d / cxy = %x",
    474               LOCAL_CLUSTER->cores_nr , core_lid , local_cxy );
    475     }
     469    assert( (type == THREAD_IDLE) , __FUNCTION__ , "illegal thread type" );
     470
     471    assert( (core_lid < LOCAL_CLUSTER->cores_nr) , __FUNCTION__ , "illegal core index" );
    476472
    477473    error_t  error = thread_init( thread,
     
    487483
    488484    return error;
    489 }
     485
     486}  // end thread_kernel_init()
    490487
    491488///////////////////////////////////////////////////////////////////////////////////////
     
    502499    core_t     * core       = thread->core;
    503500
    504     thread_dmsg("\n[DMSG] %s : enters for thread %x in process %x / type = %s\n",
     501    thread_dmsg("\n[DBG] %s : enters for thread %x in process %x / type = %s\n",
    505502                __FUNCTION__ , thread->trdid , process->pid , thread_type_str( thread->type ) );
    506503
     
    556553        tm_end = hal_get_cycles();
    557554
    558         thread_dmsg("\n[DMSG] %s : exit for thread %x in process %x / duration = %d\n",
     555        thread_dmsg("\n[DBG] %s : exit for thread %x in process %x / duration = %d\n",
    559556                       __FUNCTION__, thread->trdid , process->pid , tm_end - tm_start );
    560 }
     557
     558}   // end thread_destroy()
    561559
    562560/////////////////////////////////////////////////
     
    609607{
    610608    hal_atomic_or( &thread->signals , mask );
     609    hal_fence();
    611610}
    612611
     
    616615{
    617616    hal_atomic_and( &thread->signals , ~mask );
    618 }
    619 
    620 //////////////////////////////////
    621 inline bool_t thread_is_joinable()
    622 {
    623     thread_t * this = CURRENT_THREAD;
    624     return( (this->brothers_list.next != XPTR_NULL) &&
    625             (this->brothers_list.pred != XPTR_NULL) );
    626 }
    627 
    628 //////////////////////////////////
    629 inline bool_t thread_is_runnable()
    630 {
    631     thread_t * this = CURRENT_THREAD;
    632     return( this->blocked == 0 );
     617    hal_fence();
    633618}
    634619
     
    650635    {
    651636        this->flags &= ~THREAD_FLAG_SCHED;
    652         sched_yield( NULL );
    653     }
    654 }
     637        sched_yield();
     638    }
     639
     640}  // end thread_check_sched()
     641
     642/////////////////////////////////////
     643void thread_block( thread_t * thread,
     644                   uint32_t   cause )
     645{
     646    // set blocking cause
     647    hal_atomic_or( &thread->blocked , cause );
     648    hal_fence();
     649
     650} // end thread_block()
     651
     652/////////////////////////////////////////
     653uint32_t thread_unblock( xptr_t   thread,
     654                         uint32_t cause )
     655{
     656    // get thread cluster and local pointer
     657    cxy_t      cxy = GET_CXY( thread );
     658    thread_t * ptr = (thread_t *)GET_PTR( thread );
     659
     660    // reset blocking cause
     661    uint32_t previous = hal_remote_atomic_and( XPTR( cxy , &ptr->blocked ) , ~cause );
     662    hal_fence();
     663
     664    // return a non zero value if the cause bit is modified
     665    return( previous & cause );
     666
     667}  // end thread_unblock()
    655668
    656669/////////////////////
     
    664677        if( !thread_can_yield() )
    665678        {
    666         printk("ERROR in %s : thread %x in process %x on core %d in cluster %x\n"
    667                " did not released all locks\n",
    668                __FUNCTION__ , this->trdid , this->process->pid ,
    669                CURRENT_CORE->lid , local_cxy );
     679        printk("ERROR in %s : locks not released for thread %x in process %x on core[%x,%d]\n",
     680        __FUNCTION__, this->trdid, this->process->pid, local_cxy, this->core->lid );
    670681        return EINVAL;
    671682    }
     
    686697
    687698    // deschedule
    688     sched_yield( NULL );
     699    sched_yield();
    689700    return 0;
    690 }
    691 
    692 /////////////////////////////////////
    693 void thread_block( thread_t * thread,
    694                    uint32_t   cause )
    695 {
    696     // set blocking cause
    697     hal_atomic_or( &thread->blocked , cause );
    698 }
    699 
    700 ////////////////////////////////////
    701 void thread_unblock( xptr_t   thread,
    702                     uint32_t cause )
    703 {
    704     // get thread cluster and local pointer
    705     cxy_t      cxy = GET_CXY( thread );
    706     thread_t * ptr = (thread_t *)GET_PTR( thread );
    707 
    708     // reset blocking cause
    709     hal_remote_atomic_and( XPTR( cxy , &ptr->blocked ) , ~cause );
    710 }
     701
     702}  // end thread_exit()
    711703
    712704/////////////////////////////////////
     
    721713    // send an IPI to schedule the target thread core.
    722714    dev_pic_send_ipi( local_cxy , target->core->lid );
    723 }
     715
     716}  // end thread_kill()
    724717
    725718///////////////////////
    726719void thread_idle_func()
    727720{
    728 #if CONFIG_IDLE_DEBUG
    729     lid_t  lid = CURRENT_CORE->lid;
    730 #endif
    731 
    732721    while( 1 )
    733722    {
    734         idle_dmsg("\n[DMSG] %s : core[%x][%d] goes to sleep at cycle %d\n",
    735                     __FUNCTION__ , local_cxy , lid , hal_get_cycles() );
    736 
    737         // force core to sleeping state
    738         //hal_core_sleep();
    739 
    740         idle_dmsg("\n[DMSG] %s : core[%x][%d] wake up at cycle %d\n",
    741                     __FUNCTION__ , local_cxy , lid , hal_get_cycles() );
    742 
    743         // force scheduling
    744         sched_yield( NULL );
     723        if( CONFIG_THREAD_IDLE_MODE_SLEEP ) // force core to low-power mode
     724        {
     725
     726idle_dmsg("\n[DBG] %s : core[%x][%d] goes to sleep at cycle %d\n",
     727__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , hal_get_cycles() );
     728
     729            hal_core_sleep();
     730
     731idle_dmsg("\n[DBG] %s : core[%x][%d] wake up at cycle %d\n",
     732__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , hal_get_cycles() );
     733
     734        }
     735        else                                // yield each ~ 100000 cycles
     736
     737        {
     738             hal_fixed_delay( 500000 );
     739        }
     740
     741        // force scheduling at each iteration
     742        sched_yield();
    745743   }
    746 }
     744}  // end thread_idle()
     745
    747746
    748747/////////////////////////////////////////////////
  • trunk/kernel/kern/thread.h

    r406 r407  
    2727
    2828#include <hal_types.h>
     29#include <shared_syscalls.h>
    2930#include <hal_special.h>
    3031#include <xlist.h>
     
    5152
    5253/***************************************************************************************
    53  * This defines the various pthread_attr_t attributes bit-vector.
    54  **************************************************************************************/
    55 
    56 /***************************************************************************************
    57  * This opaque structure contains the user defined attributes for a user thread.
    58  * It is passed as input argument to the thread_user_create() function.
    59  * It is set by the user application itself, using the pthread_attr_***() functions.
    60  * The currently supported attributes are defined below.
    61  **************************************************************************************/
    62 
    63 typedef struct pthread_attr_s
    64 {
    65         uint32_t    attributes;      /*! user defined attributes bit vector               */
    66         cxy_t       cxy;             /*! target cluster identifier                        */
    67         lid_t       lid;             /*! target core index                                */
    68 }
    69 pthread_attr_t;
    70 
    71 typedef enum
    72 {
    73     PT_ATTR_DETACH          = 0x0001,  /*! user defined not joinable                  */
    74     PT_ATTR_CLUSTER_DEFINED = 0x0002,  /*! user defined target cluster                */
    75     PT_ATTR_CORE_DEFINED    = 0x0004,  /*! user defined core index in cluster         */
    76 }
    77 pt_attributes_t;
    78 
    79 /***************************************************************************************
    8054 * This enum defines the thread types.
    8155 **************************************************************************************/
     
    8660        THREAD_RPC     = 1,          /*! kernel thread executing pending RPCs             */
    8761        THREAD_DEV     = 2,          /*! kernel thread executing I/O device commands      */
    88         THREAD_KERNEL  = 3,          /*! other kernel thread                              */
    89         THREAD_IDLE    = 4,          /*! kernel idle thread                               */
    90         THREAD_TYPES_NR
     62        THREAD_IDLE    = 3,          /*! kernel idle thread                               */
    9163}
    9264thread_type_t;
     
    10072#define THREAD_FLAG_JOIN         0x0004  /*! Parent thread made a join                */
    10173#define THREAD_FLAG_EXIT         0x0008  /*! This thread made an exit                 */
    102 #define THREAD_FLAG_SCHED        0x0010  /*! Descheduling required for this thread    */
     74#define THREAD_FLAG_SCHED        0x0010  /*! Scheduling required for this thread      */
    10375
    10476/***************************************************************************************
     
    10678 **************************************************************************************/
    10779
    108 #define THREAD_SIG_KILL          0x0001  /*! This thread must be destroyed ASAP       */
     80#define THREAD_SIG_KILL          0x0001  /*! This thread killed by another thread     */
     81#define THREAD_SIG_SUICIDE       0x0002  /*! This thread required exit                */
    10982
    11083/***************************************************************************************
     
    11689#define THREAD_BLOCKED_MAPPER    0x0004  /*! thread wait mapper                       */
    11790#define THREAD_BLOCKED_JOIN      0x0008  /*! thread blocked in join / wait exit       */
    118 #define THREAD_BLOCKED_EXIT      0x0010  /*! thread blocked in exit / wait join i     */
     91#define THREAD_BLOCKED_EXIT      0x0010  /*! thread blocked in exit / wait join       */
    11992#define THREAD_BLOCKED_KILL      0x0020  /*! thread received kill signal              */
    12093#define THREAD_BLOCKED_SEM       0x0040  /*! thread wait semaphore                    */
    12194#define THREAD_BLOCKED_PAGE      0x0080  /*! thread wait page access                  */
    12295#define THREAD_BLOCKED_USERSYNC  0x0100  /*! thread wait POSIX (cond/mutex/barrier)   */
    123 
    124 #define THREAD_BLOCKED_IDLE      0x1000  /*! thread RPC wait activation               */
     96#define THREAD_BLOCKED_RPC       0x0200  /*! thread wait RPC completion               */
     97
    12598#define THREAD_BLOCKED_DEV_QUEUE 0x2000  /*! thread DEV wait queue                    */
    12699#define THREAD_BLOCKED_DEV_ISR   0x4000  /*! thread DEV wait ISR                      */
     
    199172
    200173    uint32_t            flags;           /*! bit vector of flags                      */
    201     uint32_t            blocked;         /*! bit vector of blocking causes            */
    202     uint32_t            signals;         /*! bit vector of signals                    */
     174    volatile uint32_t   blocked;         /*! bit vector of blocking causes            */
     175    volatile uint32_t   signals;         /*! bit vector of (KILL / SUICIDE) signals   */
    203176
    204177        error_t             errno;           /*! errno value set by last system call      */
     
    212185    remote_spinlock_t * children_lock;   /*! lock protecting the children list        */
    213186
    214     xlist_entry_t       brothers_list;   /*! member of threads with same parent       */
     187    xlist_entry_t       brothers_list;   /*! list of attached threads to same parent  */
    215188
    216189        list_entry_t        sched_list;      /*! member of threads attached to same core  */
     
    273246
    274247/***************************************************************************************
    275  * This function allocates memory for an user thread descriptor in the local cluster,
     248 * This function is used by the fork() system call to create the child process main
     249 * thread. It allocates memory for an user thread descriptor in the local cluster,
    276250 * and initializes it from information contained in the calling thread descriptor.
    277  * It is used by the fork() system call to create the child process main thread.
    278  * The new thread is attached to the core with the lowest load.
    279  * It is registered in the process descriptor defined by the "process" argument.
    280  * This new thread inherits its execution context from the calling thread,
    281  * and the "loadable" field is NOT set.
    282  * The THREAD_BLOCKED_GLOBAL bit is set, and the thread must be activated to start.
     251 * The new thread is attached to the core that has the lowest load in local cluster.
     252 * It is registered in the child process descriptor defined by the <process> argument.
     253 * This new thread inherits its user stack from the parent thread, as it uses the
     254 * Copy-On-Write mechanism to get a private stack when required.
     255 * The content of the parent kernel stack is copied into the child kernel stack, as
     256 * the Copy-On-Write mechanism cannot be used for kernel segments (because kernel
     257 * uses physical addressing on some architectures).
     258 * The CPU and FPU execution contexts are created and linked to the new thread,
     259 * but the actual context copy is NOT done. The THREAD_BLOCKED_GLOBAL bit is set,
     260 * and the thread must be explicitely unblocked later to make the new thread runable.
    283261 ***************************************************************************************
    284262 * @ process      : local pointer on owner process descriptor.
     263 * @ stack_base   : user stack base address (from parent).
     264 * @ stack_size   : user stack size (from parent).
    285265 * @ new_thread   : [out] address of buffer for new thread descriptor pointer.
    286266 * @ returns 0 if success / returns ENOMEM if error.
    287267 **************************************************************************************/
    288268error_t thread_user_fork( process_t * process,
     269                          intptr_t    stack_base,
     270                          uint32_t    stack_size,
    289271                          thread_t ** new_thread );
    290272
     
    351333 * It does NOT take a lock, as this function is always called by the parent thread.
    352334 ***************************************************************************************
    353  * @ xp_parent : extended pointer on the parent thread descriptor.
    354  * @ xp_child  : extended pointer on the child thread descriptor.
    355  **************************************************************************************/
    356 void thread_child_parent_link( xptr_t  xp_parent,
    357                                xptr_t  xp_child );
     335 * @ parent_xp : extended pointer on the parent thread descriptor.
     336 * @ child_xp  : extended pointer on the child thread descriptor.
     337 **************************************************************************************/
     338void thread_child_parent_link( xptr_t  parent_xp,
     339                               xptr_t  child_xp );
    358340
    359341/***************************************************************************************
     
    361343 * of attached children threads.
    362344 ***************************************************************************************
    363  * @ xp_parent : extended pointer on the parent thread descriptor.
    364  * @ xp_child  : extended pointer on the child thread descriptor.
    365  **************************************************************************************/
    366 void thread_child_parent_unlink( xptr_t xp_parent,
    367                                  xptr_t xp_child );
     345 * @ parent_xp : extended pointer on the parent thread descriptor.
     346 * @ child_xp  : extended pointer on the child thread descriptor.
     347 **************************************************************************************/
     348void thread_child_parent_unlink( xptr_t parent_xp,
     349                                 xptr_t child_xp );
    368350
    369351/***************************************************************************************
     
    386368
    387369/***************************************************************************************
    388  * This function returns true if the calling thread is attached to its parent thread.
    389  **************************************************************************************/
    390 inline bool_t thread_is_joinable();
    391 
    392 /***************************************************************************************
    393  * This function returns true if the calling thread is not blocked.
    394  **************************************************************************************/
    395 inline bool_t thread_is_runnable();
    396 
    397 /***************************************************************************************
    398370 * This function checks if the calling thread can deschedule.
    399371 ***************************************************************************************
     
    403375
    404376/***************************************************************************************
    405  * This function implements the delayed descheduling machanism : It is called  by
     377 * This function implements the delayed descheduling mechanism : It is called  by
    406378 * all lock release functions, and calls the sched_yield() function when all locks
    407  * have beeen released and the THREAD_FLAG_SCHED flag is set.
     379 * have beeen released and the calling thread THREAD_FLAG_SCHED flag is set.
    408380 **************************************************************************************/
    409381void thread_check_sched();
    410382
    411383/***************************************************************************************
    412  * This function can be used by the calling thread to suicide.
    413  * All locks must be previously released.
    414  * The scenario depends on the attached/detached flag :
    415  * - if detached, it sets the SIG_KILL signal in the "signals" bit_vector, registers
    416  *   the BLOCKED_EXIT bit in the "blocked" bit_vector, and deschedule.
    417  * - if attached, it simply sets the BLOCKED_EXIT bit in the "blocked" bit vector
    418  *   and deschedule. The SIG_KILL signal will be set by the parent thread when
    419  *   executing the pthread_join().
     384 * This function is used by the calling thread to suicide.
     385 * All locks must be previously released. The scenario depends on the DETACHED flag.
     386 * if detached :
     387 * 1) the calling thread sets the SIG_SUICIDE bit in the "signals" bit_vector,
     388 *    registers the BLOCKED_GLOBAL bit in the "blocked" bit_vector, and deschedule.
     389 * 2) the scheduler, detecting the SIG_SUICIDE bit, remove the thread from the
     390 *    scheduler list, remove the thread from its process, and destroys the thread.
     391 * if attached :
     392 * 1) the calling thread simply sets the BLOCKED_EXIT bit in the "blocked" bit vector
     393 *    and deschedule.
     394 * 2) The SIG_KILL bit and BLOCKED_SIGNAL bits are set by the parent thread when
     395 *    executing the pthread_join(), and detecting the BLOCKED_EXIT bit.
     396 *    The scenario is a standard kill as described below.
    420397 ***************************************************************************************
    421398 * @ returns 0 if success / returns EINVAL if locks_count is not zero.
     
    424401
    425402/***************************************************************************************
     403 * This function request to kill a local target thread, with the following scenario:
     404 * 1. This function set the BLOCKED_GLOBAL bit in target thread "blocked" bit_vector,
     405 *    set the SIG_KILL bit in target thread "signals" bit_vector, and send an IPI
     406 *    to the target thread core to force scheduling.
     407 * 2. The scheduler, detecting the SIG_KILL set, removes the thread from the scheduler
     408 *    list, and reset the SIG_KILL bit to acknowledge the killer.
     409 * 3. The caller of this function, (such as the process_kill() function), must poll
     410 *    SIG_KILL bit until reset, detach the thread from its parent if the thread is
     411 *    attached, remove the thread from its process, and destroys the thread.
     412 *
     413 * NOTE: The third step must be done by the caller to allows the process_kill()
     414 *       function to parallelize the work on all schedulers in a given cluster.
     415 ***************************************************************************************
     416 * @ thread   : local pointer on the target thread.
     417 **************************************************************************************/
     418void thread_kill( thread_t * thread );
     419
     420/***************************************************************************************
    426421 * This function registers a blocking cause in the target thread "blocked" bit vector.
    427  * Warning : this function does not deschedule the calling thread.
    428  * The descheduling can be forced by a sched_yield().
    429  ***************************************************************************************
    430  * @ thread   : local pointer on target thread.
     422 * Warning : this function does not deschedule the calling thread, and the descheduling
     423 * must be explicitely forced by a sched_yield().
     424 ***************************************************************************************
     425 * @ thread   : local pointer on target thread descriptor.
    431426 * @ cause    : mask defining the cause (one hot).
    432427 **************************************************************************************/
     
    436431/***************************************************************************************
    437432 * This function resets the bit identified by the cause argument in the "blocked"
    438  * bit vector of a remote thread descriptor.
     433 * bit vector of a remote thread descriptor, using an atomic access.
    439434 * We need an extended pointer, because the client thread of an I/O operation on a
    440435 * given device is not in the same cluster as the associated device descriptor.
     
    444439 * @ thread   : extended pointer on the remote thread.
    445440 * @ cause    : mask defining the cause (one hot).
    446  **************************************************************************************/
    447 void thread_unblock( xptr_t   thread,
    448                      uint32_t cause );
    449 
    450 /***************************************************************************************
    451  * This function kills a target thread, identified by its local pointer.
    452  * It is generally called by the local process_destroy() function.
    453  * - it forces the global blocked bit in target thread descriptor.
    454  * - it set the SIG_KILL signal in target thread.
    455  * - it send an IPI_SCHED_REQUEST to the target thread core.
    456  ***************************************************************************************
    457  * @ thread   : local pointer on the target thread.
    458  * @ returns 0 if success / returns EINVAL if locks_count is not zero.
    459  **************************************************************************************/
    460 void thread_kill( thread_t * thread );
     441 * @ return non zero if the bit-vector was actually modified / return 0 otherwise
     442 **************************************************************************************/
     443uint32_t thread_unblock( xptr_t   thread,
     444                         uint32_t cause );
    461445
    462446/***************************************************************************************
Note: See TracChangeset for help on using the changeset viewer.