Changeset 683 for trunk/kernel/libk


Ignore:
Timestamp:
Jan 13, 2021, 12:36:17 AM (4 years ago)
Author:
alain
Message:

All modifications required to support the <tcp_chat> application
including error recovery in case of packet loss.A

Location:
trunk/kernel/libk
Files:
14 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/libk/elf.c

    r671 r683  
    161161                {
    162162                        type                       = VSEG_TYPE_CODE;
    163                         process->vmm.code_vpn_base = vbase >> CONFIG_PPM_PAGE_SHIFT;
     163                        process->vmm.code_vpn_base = vbase >> CONFIG_PPM_PAGE_ORDER;
    164164                }
    165165                else               // found DATA segment
    166166                {
    167167                        type                       = VSEG_TYPE_DATA;
    168                         process->vmm.data_vpn_base = vbase >> CONFIG_PPM_PAGE_SHIFT;
     168                        process->vmm.data_vpn_base = vbase >> CONFIG_PPM_PAGE_ORDER;
    169169                }
    170170
     
    215215{
    216216    uint32_t     new_offset;       // unused, required by vfs_lseek()
    217         kmem_req_t   req;              // kmem request for program header
    218217        Elf_Ehdr     header;           // local buffer for .elf header
    219218        void       * segs_base;        // pointer on buffer for segment descriptors array
     
    278277
    279278        // allocate memory for segment descriptors array
    280         req.type  = KMEM_KCM;
    281         req.order = bits_log2(segs_size);
    282         req.flags = AF_KERNEL;
    283         segs_base = kmem_alloc( &req );
     279        segs_base = kmem_alloc( bits_log2(segs_size) , AF_NONE );
    284280
    285281        if( segs_base == NULL )
     
    295291        {
    296292                printk("\n[ERROR] in %s : cannot seek for descriptors array\n", __FUNCTION__ );
    297                 req.ptr = segs_base;
    298                 kmem_free( &req );
     293                kmem_free( segs_base , bits_log2(segs_size) );
    299294                return -1;
    300295        }
     
    314309        {
    315310                printk("\n[ERROR] in %s : cannot read segments descriptors\n", __FUNCTION__ );
    316                 req.ptr = segs_base;
    317                 kmem_free( &req );
     311                kmem_free( segs_base , bits_log2(segs_size) );
    318312                return -1;
    319313        }
     
    331325        if( error )
    332326        {
    333                 req.ptr = segs_base;
    334                 kmem_free( &req );
     327                printk("\n[ERROR] in %s : cannot register segments descriptors\n", __FUNCTION__ );
     328                kmem_free( segs_base , bits_log2(segs_size) );
    335329                return -1;
    336330        }
     
    343337
    344338        // release allocated memory for program header
    345         req.ptr = segs_base;
    346         kmem_free(&req);
     339    kmem_free( segs_base , bits_log2(segs_size) );
    347340
    348341#if DEBUG_ELF_LOAD
  • trunk/kernel/libk/grdxt.c

    r671 r683  
    4040                    uint32_t  ix3_width )
    4141{
     42
     43assert( __FUNCTION__, (rt != NULL),
     44"pointer on radix tree is NULL\n" );
     45
    4246    void      ** root;
    43         kmem_req_t   req;
    4447 
    4548        rt->ix1_width = ix1_width;
     
    4851
    4952    // allocates first level array
    50         req.type  = KMEM_KCM;
    51         req.order = ix1_width + ( (sizeof(void*) == 4) ? 2 : 3 );
    52         req.flags = AF_KERNEL | AF_ZERO;
    53         root = kmem_alloc( &req );
     53        uint32_t order = ix1_width + ( (sizeof(void*) == 4) ? 2 : 3 );
     54        root = kmem_alloc( order , AF_ZERO );
    5455
    5556        if( root == NULL )
     
    6869void grdxt_destroy( grdxt_t * rt )
    6970{
    70         kmem_req_t req;
     71
     72assert( __FUNCTION__, (rt != NULL),
     73"pointer on radix tree is NULL\n" );
     74
     75    uint32_t   order;
    7176
    7277    uint32_t   w1 = rt->ix1_width;
     
    8186        uint32_t   ix2;
    8287        uint32_t   ix3;
    83 
    84 assert( __FUNCTION__, (rt != NULL) , "pointer on radix tree is NULL\n" );
    8588
    8689        for( ix1=0 ; ix1 < (uint32_t)(1 << w1) ; ix1++ )
     
    106109
    107110            // release level 3 array
    108             req.type = KMEM_KCM;
    109                     req.ptr  = ptr3;
    110                     kmem_free( &req );
     111                order = w3 + ( (sizeof(void*) == 4) ? 2 : 3 );
     112                    kmem_free( ptr3 , order );
    111113        }
    112114
    113115        // release level 2 array
    114         req.type = KMEM_KCM;
    115                 req.ptr  = ptr2;
    116                 kmem_free( &req );
     116            order = w2 + ( (sizeof(void*) == 4) ? 2 : 3 );
     117                kmem_free( ptr2 , order );
    117118    }
    118119
    119120    // release level 1 array
    120     req.type = KMEM_KCM;
    121         req.ptr  = ptr1;
    122         kmem_free( &req );
     121        order = w1 + ( (sizeof(void*) == 4) ? 2 : 3 );
     122        kmem_free( ptr1 , order );
    123123
    124124}  // end grdxt_destroy()
     
    129129                      void     * value )
    130130{
    131         kmem_req_t      req;
     131    uint32_t        order;
    132132
    133133    uint32_t        w1 = rt->ix1_width;
     
    136136
    137137// Check key value
    138 assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key );
     138assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ),
     139"illegal key value %x\n", key );
    139140
    140141    // compute indexes
     
    155156        {
    156157        // allocate memory for level 2 array
    157         req.type  = KMEM_KCM;
    158         req.order = w2 + ( (sizeof(void*) == 4) ? 2 : 3 );
    159         req.flags = AF_KERNEL | AF_ZERO;
    160         ptr2 = kmem_alloc( &req );
     158        order = w2 + ( (sizeof(void*) == 4) ? 2 : 3 );
     159        ptr2 = kmem_alloc( order , AF_ZERO );
    161160
    162161        if( ptr2 == NULL) return -1;
     
    173172        {
    174173        // allocate memory for level 3 array
    175         req.type = KMEM_KCM;
    176         req.order = w3 + ( (sizeof(void*) == 4) ? 2 : 3 );
    177         req.flags = AF_KERNEL | AF_ZERO;
    178         ptr3 = kmem_alloc( &req );
     174        order = w3 + ( (sizeof(void*) == 4) ? 2 : 3 );
     175        ptr3 = kmem_alloc( order , AF_ZERO );
    179176
    180177        if( ptr3 == NULL) return -1;
     
    202199
    203200// Check key value
    204 assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key );
     201assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ),
     202"illegal key value %x\n", key );
    205203
    206204    // compute indexes
     
    244242
    245243// Check key value
    246 assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key );
     244assert( __FUNCTION__, ((key >> (w1 + w2 + w3)) == 0 ),
     245"illegal key value %x\n", key );
    247246
    248247    void         ** ptr1 = rt->root;
     
    284283
    285284// Check key value
    286 assert( __FUNCTION__, ((start_key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", start_key );
     285assert( __FUNCTION__, ((start_key >> (w1 + w2 + w3)) == 0 ),
     286"illegal key value %x\n", start_key );
    287287
    288288    // compute max indexes
     
    338338                           uint32_t   ix3_width )
    339339{
     340
     341assert( __FUNCTION__, (rt_xp != XPTR_NULL),
     342"extended pointer on radix tree is NULL\n" );
     343
    340344    void      ** root;
    341         kmem_req_t   req;
    342345
    343346    // get cluster and local pointer
     
    351354
    352355    // allocates first level array
    353         req.type  = KMEM_KCM;
    354         req.order = ix1_width + ( (sizeof(void*) == 4) ? 2 : 3 );
    355         req.flags = AF_KERNEL | AF_ZERO;
    356         root      = kmem_remote_alloc( rt_cxy , &req );
     356        uint32_t order = ix1_width + ( (sizeof(void*) == 4) ? 2 : 3 );
     357        root = kmem_remote_alloc( rt_cxy , order , AF_ZERO );
    357358
    358359        if( root == NULL )
     
    372373void grdxt_remote_destroy( xptr_t  rt_xp )
    373374{
    374         kmem_req_t req;
     375
     376assert( __FUNCTION__, (rt_xp != XPTR_NULL),
     377"extended pointer on radix tree is NULL\n" );
     378
     379    uint32_t   order;
    375380
    376381    uint32_t   w1;
     
    422427
    423428            // release level 3 array
    424             req.type = KMEM_KCM;
    425                     req.ptr  = ptr3;
    426                     kmem_remote_free( rt_cxy , &req );
     429                    order = w3 + ((sizeof(void*) == 4) ? 2 : 3 );
     430                    kmem_remote_free( rt_cxy , ptr3 , order );
    427431        }
    428432
    429433        // release level 2 array
    430         req.type = KMEM_KCM;
    431                 req.ptr  = ptr2;
    432             kmem_remote_free( rt_cxy , &req );
     434        order = w2 + ((sizeof(void*) == 4) ? 2 : 3 );
     435        kmem_remote_free( rt_cxy , ptr2 , order );
    433436    }
    434437
    435438    // release level 1 array
    436     req.type = KMEM_KCM;
    437         req.ptr  = ptr1;
    438     kmem_remote_free( rt_cxy , &req );
     439    order = w1 + ((sizeof(void*) == 4) ? 2 : 3 );
     440    kmem_remote_free( rt_cxy , ptr1 , order );
    439441
    440442}  // end grdxt_remote_destroy()
     
    445447                             void     * value )
    446448{
    447     kmem_req_t  req;
     449    uint32_t order;
    448450
    449451    // get cluster and local pointer on remote rt descriptor
     
    507509    {
    508510        // allocate memory in remote cluster
    509         req.type  = KMEM_KCM;
    510         req.order = w2 + ((sizeof(void*) == 4) ? 2 : 3 );
    511         req.flags = AF_ZERO | AF_KERNEL;
    512         ptr2 = kmem_remote_alloc( rt_cxy , &req );
     511        order = w2 + ((sizeof(void*) == 4) ? 2 : 3 );
     512        ptr2 = kmem_remote_alloc( rt_cxy , order , AF_ZERO );
    513513
    514514        if( ptr2 == NULL ) return -1;
     
    538538    {
    539539        // allocate memory in remote cluster
    540         req.type  = KMEM_KCM;
    541         req.order = w3 + ((sizeof(void*) == 4) ? 2 : 3 );
    542         req.flags = AF_ZERO | AF_KERNEL;
    543         ptr3 = kmem_remote_alloc( rt_cxy , &req );
     540        order = w3 + ((sizeof(void*) == 4) ? 2 : 3 );
     541        ptr3 = kmem_remote_alloc( rt_cxy , order , AF_ZERO );
    544542
    545543        if( ptr3 == NULL ) return -1;
  • trunk/kernel/libk/remote_barrier.c

    r671 r683  
    22 * remote_barrier.c -  POSIX barrier implementation.
    33 *
    4  * Author   Alain Greiner (2016,2017,2018,2019)
     4 * Author   Alain Greiner    (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    8484{
    8585    generic_barrier_t * gen_barrier_ptr;  // local pointer on generic barrier descriptor
    86     void              * barrier;          // local pointer on implementation barrier descriptor     
    87     kmem_req_t          req;              // kmem request
     86    void              * barrier;          // local pointer on impl barrier descriptor     
    8887
    8988    // get pointer on local process_descriptor
     
    9695
    9796    // allocate memory for generic barrier descriptor
    98     req.type   = KMEM_KCM;
    99     req.order  = bits_log2( sizeof(generic_barrier_t) );
    100     req.flags  = AF_ZERO | AF_KERNEL;
    101     gen_barrier_ptr = kmem_remote_alloc( ref_cxy , &req );
    102 
     97    gen_barrier_ptr = kmem_remote_alloc( ref_cxy,
     98                                         bits_log2(sizeof(generic_barrier_t)),
     99                                         AF_KERNEL );
    103100    if( gen_barrier_ptr == NULL )
    104101    {
     
    108105
    109106    // create implementation specific barrier descriptor
    110     if( attr == NULL )                                    // simple barrier implementation
     107    if( attr == NULL )                                    // simple barrier
    111108    {
    112109        // create simple barrier descriptor
    113110         barrier = simple_barrier_create( count );
    114 
    115         if( barrier == NULL ) return -1;
    116     }
    117     else                                                  // QDT barrier implementation
     111    }
     112    else                                                  // QDT barrier
    118113    {
    119114        uint32_t x_size   = attr->x_size;
     
    126121            printk("\n[ERROR] in %s : count(%d) != x_size(%d) * y_size(%d) * nthreads(%d)\n",
    127122            __FUNCTION__, count, x_size, y_size, nthreads );
     123            kmem_remote_free( ref_cxy,
     124                              gen_barrier_ptr,
     125                              bits_log2(sizeof(generic_barrier_t)) );
    128126            return -1;
    129127        }
     
    131129        // create DQT barrier descriptor
    132130        barrier = dqt_barrier_create( x_size , y_size , nthreads );
    133 
    134         if( barrier == NULL ) return -1;
     131    }
     132
     133    if( barrier == NULL )
     134    {
     135        printk("\n[ERROR] in %s : cannot create impl barrier\n", __FUNCTION__ );
     136        kmem_remote_free( ref_cxy,
     137                          gen_barrier_ptr,
     138                          bits_log2(sizeof(generic_barrier_t)) );
     139        return -1;
    135140    }
    136141
     
    157162void generic_barrier_destroy( xptr_t gen_barrier_xp )
    158163{
    159     kmem_req_t  req;              // kmem request
    160 
    161164    // get pointer on local process_descriptor
    162165    process_t * process = CURRENT_THREAD->process;
     
    191194    remote_busylock_release( lock_xp );
    192195
    193     // release memory allocated to barrier descriptor
    194     req.type          = KMEM_KCM;
    195     req.ptr           = gen_barrier_ptr;
    196     kmem_remote_free( ref_cxy , &req );
     196    // release memory allocated to generic barrier descriptor
     197    kmem_remote_free( gen_barrier_cxy,
     198                      gen_barrier_ptr,
     199                      bits_log2(sizeof(generic_barrier_t)) );
    197200
    198201}  // end generic_barrier_destroy()
     
    246249simple_barrier_t * simple_barrier_create( uint32_t  count )
    247250{
    248     kmem_req_t         req;
    249251    simple_barrier_t * barrier;
    250252
     
    258260
    259261    // allocate memory for simple barrier descriptor
    260     req.type   = KMEM_KCM;
    261     req.order  = bits_log2( sizeof(simple_barrier_t) );
    262     req.flags  = AF_ZERO | AF_KERNEL;
    263     barrier    = kmem_remote_alloc( ref_cxy , &req );
    264 
     262    barrier  = kmem_remote_alloc( ref_cxy,
     263                                  bits_log2(sizeof(simple_barrier_t)),
     264                                  AF_ZERO );
    265265    if( barrier == NULL )
    266266    {
     
    291291void simple_barrier_destroy( xptr_t barrier_xp )
    292292{
    293     kmem_req_t  req;
    294 
    295293    // get barrier cluster and local pointer
    296294    cxy_t              barrier_cxy = GET_CXY( barrier_xp );
     
    298296
    299297    // release memory allocated for barrier descriptor
    300     req.type = KMEM_KCM;
    301     req.ptr  = barrier_ptr;
    302     kmem_remote_free( barrier_cxy , &req );
     298    kmem_remote_free( barrier_cxy,
     299                      barrier_ptr,
     300                      bits_log2(sizeof(simple_barrier_t)) );
    303301
    304302#if DEBUG_BARRIER_DESTROY
     
    471469    uint32_t        y;             // Y coordinate in QDT mesh
    472470    uint32_t        l;             // level coordinate
    473     kmem_req_t      req;           // kmem request
    474471
    475472    // compute number of DQT levels, depending on the mesh size
     
    478475
    479476// check x_size and y_size arguments
    480 assert( __FUNCTION__, (z <= 16) , "DQT mesh size larger than (16*16)\n");
     477assert( __FUNCTION__, (z <= 16),
     478"DQT mesh size larger than (16*16)\n");
    481479
    482480// check size of an array of 5 DQT nodes
    483 assert( __FUNCTION__, (sizeof(dqt_node_t) * 5 <= 512 ), "array of DQT nodes larger than 512 bytes\n");
     481assert( __FUNCTION__, (sizeof(dqt_node_t) * 5 <= 512 ),
     482"array of DQT nodes larger than 512 bytes\n");
    484483
    485484// check size of DQT barrier descriptor
    486 assert( __FUNCTION__, (sizeof(dqt_barrier_t) <= 0x4000 ), "DQT barrier descriptor larger than 4 pages\n");
     485assert( __FUNCTION__, (sizeof(dqt_barrier_t) <= 0x4000 ),
     486"DQT barrier descriptor larger than 4 pages\n");
    487487
    488488    // get pointer on client thread and process descriptors
     
    502502
    503503    // 1. allocate 4 small pages for the DQT barrier descriptor in reference cluster
    504     req.type   = KMEM_PPM;
    505     req.order  = 2;                     // 4 small pages == 16 Kbytes                     
    506     req.flags  = AF_ZERO | AF_KERNEL;
    507     barrier    = kmem_remote_alloc( ref_cxy , &req );
    508 
     504    barrier    = kmem_remote_alloc( ref_cxy,
     505                                    CONFIG_PPM_PAGE_ORDER + 2,   // 4 small pages
     506                                    AF_ZERO );     
    509507    if( barrier == NULL )
    510508    {
     
    536534        {
    537535            cxy_t  cxy = HAL_CXY_FROM_XY( x , y );   // target cluster identifier
    538             xptr_t local_array_xp;                   // xptr of nodes array in cluster cxy
     536            xptr_t local_array_xp;                   // xptr on nodes array in cluster cxy
    539537
    540538            // allocate memory in existing clusters only
    541539            if( LOCAL_CLUSTER->cluster_info[x][y] )
    542540            {
    543                 req.type  = KMEM_KCM;
    544                 req.order = 9;                    // 512 bytes
    545                 req.flags = AF_ZERO | AF_KERNEL;
    546 
    547                 void * ptr = kmem_remote_alloc( cxy , &req );
     541                void * ptr = kmem_remote_alloc( cxy , 9 , AF_ZERO );  // 512 bytes
    548542
    549543                if( ptr == NULL )
     
    729723void dqt_barrier_destroy( xptr_t   barrier_xp )
    730724{
    731     kmem_req_t   req;                      // kmem request
    732725    uint32_t     x;
    733726    uint32_t     y;
    734 
    735727
    736728    // get DQT barrier descriptor cluster and local pointer
     
    767759                void  * buf       = GET_PTR( buf_xp );
    768760
    769 assert( __FUNCTION__, (cxy == GET_CXY(buf_xp)) , "bad extended pointer on dqt_nodes array\n" );
    770 
    771                 req.type  = KMEM_KCM;
    772                 req.ptr   = buf;
    773                 kmem_remote_free( cxy , &req );
     761                kmem_remote_free( cxy , buf , 9 );    // 512 bytes
    774762
    775763#if DEBUG_BARRIER_DESTROY
     
    785773
    786774    // 2. release memory allocated for barrier descriptor in ref cluster
    787     req.type = KMEM_PPM;
    788     req.ptr  = barrier_ptr;
    789     kmem_remote_free( barrier_cxy , &req );
     775    kmem_remote_free( barrier_cxy,
     776                      barrier_ptr,
     777                      CONFIG_PPM_PAGE_ORDER + 2 );   // 4 small pages
    790778
    791779#if DEBUG_BARRIER_DESTROY
  • trunk/kernel/libk/remote_buf.c

    r671 r683  
    3434remote_buf_t * remote_buf_alloc( cxy_t  cxy )
    3535{
    36     kmem_req_t req;
    37 
    38     req.type  = KMEM_KCM;
    39     req.order = bits_log2( sizeof(remote_buf_t) );
    40     req.flags = AF_ZERO;
    41     return kmem_remote_alloc( cxy , &req );
     36    return kmem_remote_alloc( cxy,
     37                              bits_log2(sizeof(remote_buf_t)),
     38                              AF_ZERO );
    4239}
    4340
     
    5047assert( __FUNCTION__ , (order < 32) , "order cannot be larger than 31" );
    5148
    52     kmem_req_t     req;
    5349    uint8_t      * data;
    5450
     
    5753
    5854    // allocate the data buffer
    59     if( order >= CONFIG_PPM_PAGE_SHIFT )  // use KMEM_PPM
    60     {
    61         req.type  = KMEM_PPM;
    62         req.order = order - CONFIG_PPM_PAGE_SHIFT;
    63         req.flags = AF_NONE;
    64         data = kmem_remote_alloc( buf_cxy , &req );
    65 
    66         if( data == NULL )  return -1;
    67     }
    68     else                                     // use KMEM_KCM
    69     {
    70         req.type  = KMEM_KCM;
    71         req.order = order;
    72         req.flags = AF_NONE;
    73         data = kmem_remote_alloc( buf_cxy , &req );
    74 
    75         if( data == NULL )  return -1;
    76     }
     55    data = kmem_remote_alloc( buf_cxy , order , AF_NONE );
     56
     57    if( data == NULL )  return -1;
    7758
    7859    // initialize buffer descriptor
     
    9071void remote_buf_release_data( xptr_t  buf_xp )
    9172{
    92     kmem_req_t     req;
    9373
    9474assert( __FUNCTION__ , (buf_xp != XPTR_NULL) , "buf_xp cannot be NULL" );
     
    10282
    10383    // release memory allocated for data buffer  if required
    104     if( data_ptr != NULL )
    105     {
    106         if( order >= CONFIG_PPM_PAGE_SHIFT )          // use KMEM_PPM
    107         {
    108             req.type  = KMEM_PPM;
    109             req.ptr   = data_ptr;
    110             kmem_remote_free( buf_cxy , &req );
    111         }
    112         else                                          // use KMEM_KCM
    113         {
    114             req.type  = KMEM_KCM;
    115             req.ptr   = data_ptr;
    116             kmem_remote_free( buf_cxy , &req );
    117         }
    118     }
     84    if( data_ptr != NULL )  kmem_remote_free( buf_cxy , data_ptr , order );
     85 
    11986}  // end remote_buf_release_data()
    12087
     
    12592assert( __FUNCTION__ , (buf_xp != XPTR_NULL) , "buf_xp cannot be NULL" );
    12693
    127     kmem_req_t   req;
    128 
    12994    remote_buf_t * buf_ptr = GET_PTR( buf_xp );
    13095    cxy_t          buf_cxy = GET_CXY( buf_xp );
     
    13499
    135100    // release remote_buf descriptor
    136     req.type = KMEM_KCM;
    137     req.ptr  = buf_ptr;
    138     kmem_remote_free( buf_cxy , &req );
     101    kmem_remote_free( buf_cxy , buf_ptr , bits_log2(sizeof(remote_buf_t)) );
    139102
    140103}  // end remote_buf_destroy()
     
    404367}  // end remote_buf_status()
    405368
    406 
     369///////////////////////////////////////////////
     370void remote_buf_display( const char * func_str,
     371                         xptr_t       buf_xp,
     372                         uint32_t     nbytes,
     373                         uint32_t     offset )
     374{
     375    if( nbytes > 256 )
     376    {
     377        printk("\n[WARNING] in %s : no more than 256 bytes\n", __FUNCTION__ );
     378        nbytes = 256;
     379    }
     380
     381    uint8_t        string[128];          // for header
     382    uint8_t        local_data[256];      // local data buffer
     383
     384    cxy_t          cxy = GET_CXY( buf_xp );
     385    remote_buf_t * ptr = GET_PTR( buf_xp );
     386
     387    uint32_t   order = hal_remote_l32( XPTR( cxy , &ptr->order ));
     388    uint32_t   rid   = hal_remote_l32( XPTR( cxy , &ptr->rid ));
     389    uint32_t   wid   = hal_remote_l32( XPTR( cxy , &ptr->wid ));
     390    uint32_t   sts   = hal_remote_l32( XPTR( cxy , &ptr->sts ));
     391    uint8_t  * data  = hal_remote_lpt( XPTR( cxy , &ptr->data ));
     392
     393    // make a local copy of data buffer
     394    hal_remote_memcpy( XPTR( local_cxy , local_data ),
     395                       XPTR( cxy , data + offset ),
     396                       nbytes );
     397
     398    // build header
     399    snprintk( (char*)string , 128 ,
     400    "in %s remote buffer [%x,%x] : size %d / rid %d / wid %d / sts %d ",
     401    func_str , cxy , ptr , 1<<order , rid , wid , sts );
     402
     403    // display buffer on TXT0
     404    putb( (char*)string , local_data , nbytes );
     405
     406}  // end remote_buf_display()
  • trunk/kernel/libk/remote_buf.h

    r671 r683  
    176176uint32_t remote_buf_status( xptr_t  buf_xp );
    177177
     178/************************************************************************************
     179 * This debug function displays on the kernel terminal the current state of a remote
     180 * buffer identified by the <buf_xp> argument : order / rid / wid / sts.
     181 * If the <nbytes> argument is not nul, and not larger than 256, it displays up to
     182 * 256 bytes of the data buffer, from <offset> to (offset + nbytes -1).
     183 ************************************************************************************
     184 * @ func_str  : [in] calling function name (displayed in header). 
     185 * @ buf_xp    : [in] extended pointer pointer on remote buffer descriptor.
     186 * @ nbytes    : [in] number of data bytes to display.
     187 * @ offset    : [in] index of first displayed byte in data buffer.
     188 ***********************************************************************************/
     189void remote_buf_display( const char * func_str,
     190                         xptr_t       buf_xp,
     191                         uint32_t     nbytes,
     192                         uint32_t     offset );
     193
    178194#endif  /* _REMOTE_BUFFER_H_ */
  • trunk/kernel/libk/remote_condvar.c

    r635 r683  
    22 * remote_condvar.c - remote kernel condition variable implementation.
    33 *
    4  * Authors     Alain Greiner (2016,2017,2018,2019)
     4 * Authors     Alain Greiner (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    8686{
    8787    remote_condvar_t * condvar_ptr;
    88     kmem_req_t         req;   
    8988
    9089    // get pointer on local process descriptor
     
    9897    process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
    9998
    100     req.type    = KMEM_KCM;
    101     req.order   = bits_log2( sizeof(remote_condvar_t) );
    102     req.flags   = AF_ZERO | AF_KERNEL;
    103     condvar_ptr = kmem_alloc( &req );
     99    // allocate memory for condvar descriptor
     100    condvar_ptr = kmem_alloc( bits_log2(sizeof(remote_condvar_t)) , AF_ZERO );
    104101
    105102    if( condvar_ptr == NULL )
     
    130127void remote_condvar_destroy( xptr_t condvar_xp )
    131128{
    132     kmem_req_t  req;
    133 
    134129    // get pointer on local process descriptor
    135130    process_t * process = CURRENT_THREAD->process;
     
    162157
    163158    // release memory allocated for condvar descriptor
    164     req.type = KMEM_KCM;
    165     req.ptr  = condvar_ptr;
    166     kmem_remote_free( ref_cxy , &req );
     159    kmem_remote_free( ref_cxy , condvar_ptr , bits_log2(sizeof(remote_condvar_t)) );
    167160
    168161}  // end remote_convar_destroy()
  • trunk/kernel/libk/remote_condvar.h

    r635 r683  
    22 * remote_condvar.h: POSIX condition variable definition.     
    33 *
    4  * Authors  Alain Greiner (2016,2017,2018,2019)
     4 * Authors  Alain Greiner (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/libk/remote_fifo.c

    r657 r683  
    4242        fifo->wr_id     = 0;
    4343        fifo->rd_id     = 0;
    44     for( slot = 0 ; slot < CONFIG_REMOTE_FIFO_SLOTS ; slot++ )
     44    for( slot = 0 ; slot < CONFIG_RPC_FIFO_SLOTS ; slot++ )
    4545    {
    4646        fifo->valid[slot] = 0;
     
    6969
    7070    // wait until allocated slot is empty in remote FIFO
    71     // max retry = CONFIG_REMOTE_FIFO_MAX_ITERATIONS 
     71    // max retry = CONFIG_RPC_FIFO_MAX_ITERATIONS 
    7272    // return error if watchdog is reached
    7373    while( 1 )
    7474    {
    7575        // return error if contention detected by watchdog
    76         if( watchdog > CONFIG_REMOTE_FIFO_MAX_ITERATIONS )  return EBUSY;
     76        if( watchdog > CONFIG_RPC_FIFO_MAX_ITERATIONS )  return EBUSY;
    7777
    7878        // read remote rd_id value
     
    8484
    8585        // exit waiting loop as soon as fifo not full
    86         if ( nslots < CONFIG_REMOTE_FIFO_SLOTS )  break;
     86        if ( nslots < CONFIG_RPC_FIFO_SLOTS )  break;
    8787       
    8888        // retry later if fifo full:
     
    9797
    9898    // compute actual write slot pointer
    99     ptw = wr_id % CONFIG_REMOTE_FIFO_SLOTS;
     99    ptw = wr_id % CONFIG_RPC_FIFO_SLOTS;
    100100
    101101    // copy item to fifo
     
    123123       
    124124    // compute actual read slot pointer
    125         uint32_t ptr = rd_id % CONFIG_REMOTE_FIFO_SLOTS;
     125        uint32_t ptr = rd_id % CONFIG_RPC_FIFO_SLOTS;
    126126       
    127127        // wait slot filled by the writer
     
    158158    else                 nslots = (0xFFFFFFFF - rd_id) + wr_id;
    159159
    160     return ( nslots >= CONFIG_REMOTE_FIFO_SLOTS );
     160    return ( nslots >= CONFIG_RPC_FIFO_SLOTS );
    161161}
    162162
  • trunk/kernel/libk/remote_fifo.h

    r563 r683  
    3636 * that is used for - RPC based - inter cluster communications.
    3737 * Each FIF0 slot can contain one 64 bits integer (or one extended pointer).
    38  * The number of slots is defined by the CONFIG_REMOTE_FIFO_SLOTS parameter.
     38 * The number of slots is defined by the CONFIG_RPC_FIFO_SLOTS parameter.
    3939 * - The write accesses are implemented using a lock-free algorithm, as it uses
    4040 *   a ticket based mechanism to handle concurrent access between multiple writers.
     
    4545 *   and RPC threads cannot have local index LTID = 0.
    4646*
    47  * WARNING : Each FIFO requires 12 + (12 * CONFIG_REMOTE_FIFO_SLOTS) bytes.
     47 * WARNING : Each FIFO requires 12 + (12 * CONFIG_RPC_FIFO_SLOTS) bytes.
    4848 ***********************************************************************************/
    4949
     
    5353        volatile uint32_t  wr_id;                            /*! write slot index      */
    5454        volatile uint32_t  rd_id;                            /*! read  slot index      */
    55     volatile uint32_t  valid[CONFIG_REMOTE_FIFO_SLOTS];  /*! empty slot if 0       */
    56         uint64_t           data[CONFIG_REMOTE_FIFO_SLOTS];   /*! fifo slot content     */
     55    volatile uint32_t  valid[CONFIG_RPC_FIFO_SLOTS];  /*! empty slot if 0       */
     56        uint64_t           data[CONFIG_RPC_FIFO_SLOTS];   /*! fifo slot content     */
    5757}
    5858remote_fifo_t;
     
    8484 * the slot is empty, using a descheduling policy without blocking if required.
    8585 * It implements a watchdog, returning when the item has been successfully
    86  * registered, or after CONFIG_REMOTE_FIFO_MAX_ITERATIONS failures.   
     86 * registered, or after CONFIG_RPC_FIFO_MAX_ITERATIONS failures.   
    8787 ************************************************************************************
    8888 * @ fifo    : extended pointer to the remote fifo.
  • trunk/kernel/libk/remote_mutex.c

    r635 r683  
    22 * remote_mutex.c - POSIX mutex implementation.
    33 *
    4  * Authors   Alain   Greiner (2016,2017,2018,2019)
     4 * Authors   Alain   Greiner (2016,2017,2018,2019,2020:)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    8585{
    8686    remote_mutex_t * mutex_ptr;
    87     kmem_req_t       req;   
    8887
    8988    // get pointer on local process descriptor
     
    9897
    9998    // allocate memory for mutex descriptor in reference cluster
    100     req.type    = KMEM_KCM;
    101     req.order   = bits_log2( sizeof(remote_mutex_t) );
    102     req.flags   = AF_ZERO | AF_KERNEL;
    103     mutex_ptr   = kmem_remote_alloc( ref_cxy , &req );
     99    mutex_ptr = kmem_remote_alloc( ref_cxy , bits_log2(sizeof(remote_mutex_t)) , AF_ZERO );
    104100
    105101    if( mutex_ptr == NULL )
     
    145141void remote_mutex_destroy( xptr_t mutex_xp )
    146142{
    147     kmem_req_t  req;
    148 
    149143    // get pointer on local process descriptor
    150144    process_t * process = CURRENT_THREAD->process;
     
    171165
    172166    // release memory allocated for mutex descriptor
    173     req.type = KMEM_KCM;
    174     req.ptr  = mutex_ptr;
    175     kmem_remote_free( mutex_cxy , &req );
     167    kmem_remote_free( mutex_cxy , mutex_ptr , bits_log2(sizeof(remote_mutex_t)) );
    176168
    177169}  // end remote_mutex_destroy()
  • trunk/kernel/libk/remote_sem.c

    r671 r683  
    22 * remote_sem.c - POSIX unnamed semaphore implementation.
    33 *
    4  * Author   Alain Greiner  (2016,2017,2018,2019)
     4 * Author   Alain Greiner  (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    8686                           uint32_t   value )
    8787{
    88     kmem_req_t     req;   
    8988    remote_sem_t * sem_ptr;
    9089
     
    10099
    101100    // allocate memory for new semaphore in reference cluster
    102     req.type  = KMEM_KCM;
    103     req.order = bits_log2( sizeof(remote_sem_t) );
    104     req.flags = AF_ZERO | AF_KERNEL;
    105     sem_ptr   = kmem_remote_alloc( ref_cxy, &req );
     101    sem_ptr = kmem_remote_alloc( ref_cxy , bits_log2(sizeof(remote_sem_t)) , AF_ZERO );
    106102
    107103    if( sem_ptr == NULL )
     
    144140void remote_sem_destroy( xptr_t sem_xp )
    145141{
    146     kmem_req_t  req;
    147 
    148142    // get pointer on local process descriptor
    149143    process_t * process = CURRENT_THREAD->process;
     
    176170
    177171    // release memory allocated for semaphore descriptor
    178     req.type = KMEM_KCM;
    179     req.ptr  = sem_ptr;
    180     kmem_remote_free( sem_cxy , &req );
     172    kmem_remote_free( sem_cxy , sem_ptr , bits_log2(sizeof(remote_sem_t)) );
    181173
    182174}  // end remote_sem_destroy()
  • trunk/kernel/libk/remote_sem.h

    r581 r683  
    22 * remote_sem.h - POSIX unnamed semaphore definition.
    33 *
    4  * Author   Alain Greiner (2016,2017,2018)
     4 * Author   Alain Greiner    (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c)  UPMC Sorbonne Universites
  • trunk/kernel/libk/user_dir.c

    r671 r683  
    22 * user_dir.c - kernel DIR related operations implementation.
    33 *
    4  * Authors   Alain   Greiner (2016,2017,2018,2019)
     4 * Authors   Alain   Greiner (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    105105    list_entry_t    root;              // root of temporary list of allocated pages
    106106    uint32_t        page_id;           // page index in list of physical pages
    107     kmem_req_t      req;               // kmem request descriptor
    108107    ppn_t           fake_ppn;          // unused, but required by hal_gptlock_pte()
    109108    uint32_t        fake_attr;         // unused, but required by hal_gptlock_pte()
    110109    error_t         error;
     110
     111#if DEBUG_USER_DIR_CREATE || DEBUG_USER_DIR_ERROR
     112uint32_t   cycle = (uint32_t)hal_get_cycles();
     113thread_t * this  = CURRENT_THREAD;
     114#endif
    111115
    112116    // get cluster, local pointer, and pid of reference process
     
    115119    ref_pid = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->pid ) );
    116120
    117 #if DEBUG_USER_DIR
    118 uint32_t cycle = (uint32_t)hal_get_cycles();
    119 thread_t * this = CURRENT_THREAD;
    120 if( cycle > DEBUG_USER_DIR )
     121#if DEBUG_USER_DIR_CREATE
     122if( DEBUG_USER_DIR_CREATE < cycle )
    121123printk("\n[%s] thread[%x,%x] enter for inode (%x,%x) and process %x / cycle %d\n",
    122124__FUNCTION__, this->process->pid, this->trdid, local_cxy, inode, ref_pid, cycle );
     
    133135
    134136    // allocate memory for a local user_dir descriptor
    135     req.type  = KMEM_KCM;
    136     req.order = bits_log2( sizeof(user_dir_t) );
    137     req.flags = AF_ZERO | AF_KERNEL;
    138     dir       = kmem_alloc( &req );
     137    dir = kmem_alloc( bits_log2(sizeof(user_dir_t)) , AF_ZERO );
    139138
    140139    if( dir == NULL )
    141140    {
    142         printk("\n[ERROR] in %s : cannot allocate user_dir_t in cluster %x\n",
    143         __FUNCTION__, local_cxy );
     141
     142#if DEBUG_USER_DIR_ERROR
     143printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate user_dir_t in cluster %x / cycle %d\n",
     144__FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle );
     145#endif
    144146        return NULL;
    145147    }
    146148
    147     // Build and initialize the dirent array as a list of pages.
    148     // For each iteration in this while loop:
     149    // First loop to build and initialize the dirent array
     150    // as a temporary list of pages. For each iteration :
    149151    // - allocate one physical 4 Kbytes (64 dirent slots)
    150152    // - call the relevant FS specific function to scan the directory mapper,
     
    162164    {
    163165        // allocate one physical page
    164         req.type  = KMEM_PPM;
    165         req.order = 0;
    166         req.flags = AF_ZERO;
    167         base      = kmem_alloc( &req );
     166        base = kmem_alloc( CONFIG_PPM_PAGE_ORDER , AF_ZERO );
    168167
    169168        if( base == NULL )
    170169        {
    171             printk("\n[ERROR] in %s : cannot allocate page in cluster %x\n",
    172             __FUNCTION__, ref_cxy );
     170
     171#if DEBUG_USER_DIR_ERROR
     172printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate page in cluster %x / cycle %d\n",
     173__FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle );
     174#endif
    173175            goto user_dir_create_failure;
    174176        }
     
    184186        if( error )
    185187        {
    186             printk("\n[ERROR] in %s : cannot initialise dirent array in cluster %x\n",
    187             __FUNCTION__, ref_cxy );
     188
     189#if DEBUG_USER_DIR_ERROR
     190printk("\n[ERROR] in %s : thread[%x,%x] cannot initialize dirent array in cluster %x / cycle %d\n",
     191__FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle );
     192#endif
    188193            goto user_dir_create_failure;
    189194        }
     
    204209    } // end while
    205210       
    206 #if DEBUG_USER_DIR
    207 if( cycle > DEBUG_USER_DIR )
     211#if DEBUG_USER_DIR_CREATE
     212if( DEBUG_USER_DIR_CREATE < cycle )
    208213printk("\n[%s] thread[%x,%x] initialised dirent array / %d entries\n",
    209214__FUNCTION__, this->process->pid, this->trdid, total_dirents, cycle );
     
    241246    if( vseg == NULL )
    242247    {
    243         printk("\n[ERROR] in %s : cannot create vseg for user_dir in cluster %x\n",
    244         __FUNCTION__, ref_cxy);
     248
     249#if DEBUG_USER_DIR_ERROR
     250printk("\n[ERROR] in %s : thread[%x,%x] cannot create vseg in cluster %x / cycle %d\n",
     251__FUNCTION__, this->process->pid, this->trdid, local_cxy, cycle );
     252#endif
    245253        goto user_dir_create_failure;
    246254    }
    247255
    248 #if DEBUG_USER_DIR
    249 if( cycle > DEBUG_USER_DIR )
     256#if DEBUG_USER_DIR_CREATE
     257if( DEBUG_USER_DIR_CREATE < cycle )
    250258printk("\n[%s] thread[%x,%x] allocated vseg ANON / base %x / size %x\n",
    251259__FUNCTION__, this->process->pid, this->trdid, vseg->min, vseg->max - vseg->min );
     
    269277    vpn_base = hal_remote_l32( XPTR( ref_cxy , &vseg->vpn_base ) );
    270278
    271     // scan the list of allocated physical pages to map
     279    // Second loop on the allocated physical pages to map
    272280    // all physical pages in the reference process GPT
     281    // The pages are mapped in the user process GPT, but
     282    // are removed from the temporary list
     283
    273284    page_id = 0;
     285
    274286    while( list_is_empty( &root ) == false )
    275287    {
     
    290302        if( error )
    291303        {
    292             printk("\n[ERROR] in %s : cannot map vpn %x in GPT\n",
    293             __FUNCTION__, vpn );
    294 
     304
     305#if DEBUG_USER_DIR_ERROR
     306printk("\n[ERROR] in %s : thread[%x,%x] cannot map vpn %x in cluster %x / cycle %d\n",
     307__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, cycle );
     308#endif
    295309            // delete the vseg
    296310            intptr_t base = (intptr_t)hal_remote_lpt( XPTR( ref_cxy , &vseg->min ) );
     
    298312         
    299313            // release the user_dir descriptor
    300             req.type = KMEM_KCM;
    301             req.ptr  = dir;
    302             kmem_free( &req );
     314            kmem_free( dir , bits_log2(sizeof(user_dir_t)) );
    303315            return NULL;
    304316        }
     
    310322                         ppn );
    311323
    312 #if DEBUG_USER_DIR 
    313 if( cycle > DEBUG_USER_DIR )
     324#if DEBUG_USER_DIR_CREATE
     325if( DEBUG_USER_DIR_CREATE < cycle )
    314326printk("\n[%s] thread[%x,%x] mapped vpn %x to ppn %x\n",
    315327__FUNCTION__, this->process->pid, this->trdid, vpn + page_id, ppn );
     
    329341    dir->current = 0;
    330342    dir->entries = total_dirents;
    331     dir->ident   = (intptr_t)(vpn_base << CONFIG_PPM_PAGE_SHIFT);
     343    dir->ident   = (intptr_t)(vpn_base << CONFIG_PPM_PAGE_ORDER);
    332344
    333345    // build extended pointers on root and lock of user_dir xlist in ref process
     
    347359    remote_queuelock_release( lock_xp );
    348360
    349 #if DEBUG_USER_DIR
    350 cycle = (uint32_t)hal_get_cycles();
    351 if( cycle > DEBUG_USER_DIR )
     361#if DEBUG_USER_DIR_CREATE
     362if( DEBUG_USER_DIR_CREATE < cycle )
    352363printk("\n[%s] thread[%x,%x] created user_dir (%x,%x) / %d entries / cycle %d\n",
    353364__FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, total_dirents, cycle );
     
    358369user_dir_create_failure:
    359370
    360     // release local user_dir_t structure
    361     req.type = KMEM_KCM;
    362     req.ptr  = dir;
    363     kmem_free( &req );
    364 
    365     // release local physical pages
     371    // release user_dir_t structure
     372    kmem_free( dir , bits_log2(sizeof(user_dir_t)) );
     373
     374    // release physical pages
    366375    while( list_is_empty( &root ) == false )
    367376    {
     377        // get page descriptor
    368378        page = LIST_FIRST( &root , page_t , list );
    369379
     
    371381        base = GET_PTR( ppm_page2base( XPTR( local_cxy , page ) ) );
    372382 
    373         req.type  = KMEM_PPM;
    374         req.ptr   = base;
    375         kmem_free( &req );
     383        // release the page
     384        kmem_free( base , CONFIG_PPM_PAGE_ORDER );
    376385    }
    377386
     
    402411    cluster = LOCAL_CLUSTER;
    403412
     413#if DEBUG_USER_DIR_DESTROY
     414uint32_t cycle = (uint32_t)hal_get_cycles();
     415#endif
     416
    404417    // get cluster, local pointer, and PID of reference user process
    405418    ref_cxy = GET_CXY( ref_xp );
     
    407420    ref_pid = hal_remote_l32( XPTR( ref_cxy , &ref_ptr->pid ) );
    408421
    409 #if DEBUG_USER_DIR
    410 uint32_t cycle = (uint32_t)hal_get_cycles();
    411 if( cycle > DEBUG_USER_DIR )
     422#if DEBUG_USER_DIR_DESTROY
     423if( DEBUG_USER_DIR_DESTROY < cycle )
    412424printk("\n[%s] thread[%x,%x] enter for user_dir (%x,%x) and process %x / cycle %d\n",
    413425__FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, ref_pid, cycle );
     
    475487        hal_atomic_add( &responses , 1 );
    476488
    477 #if (DEBUG_USER_DIR & 1)
    478 uint32_t cycle = (uint32_t)hal_get_cycles();
    479 if( cycle > DEBUG_USER_DIR )
     489#if (DEBUG_USER_DIR_DESTROY & 1)
     490if(  DEBUG_USER_DIR_DESTROY < cycle )
    480491printk("\n[%s] thread[%x,%x] register RPC request in cluster %x\n",
    481492__FUNCTION__, this->process->pid, this->trdid, process_cxy );
     
    496507
    497508    // release local user_dir_t structure
    498     kmem_req_t  req;
    499     req.type = KMEM_KCM;
    500     req.ptr  = dir;
    501     kmem_free( &req );
    502 
    503 #if DEBUG_USER_DIR
     509    kmem_free( dir , bits_log2(sizeof(user_dir_t)) );
     510
     511#if DEBUG_USER_DIR_DESTROY
    504512cycle = (uint32_t)hal_get_cycles();
    505 if( cycle > DEBUG_USER_DIR )
     513if( DEBUG_USER_DIR_DESTROY < cycle )
    506514printk("\n[%s] thread[%x,%x] deleted user_dir (%x,%x) / cycle %d\n",
    507515__FUNCTION__, this->process->pid, this->trdid, local_cxy, dir, cycle );
  • trunk/kernel/libk/user_dir.h

    r651 r683  
    22 * user_dir.h -  DIR related operations definition.
    33 *
    4  * Authors   Alain Greiner   (2016,2017,2018,2019)
     4 * Authors   Alain Greiner   (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
Note: See TracChangeset for help on using the changeset viewer.