Changeset 623 for trunk/kernel


Ignore:
Timestamp:
Mar 6, 2019, 4:37:15 PM (6 years ago)
Author:
alain
Message:

Introduce three new types of vsegs (KCODE,KDATA,KDEV)
to map the kernel vsegs in the process VSL and GPT.
This now used by both the TSAR and the I86 architectures.

Location:
trunk/kernel
Files:
41 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/fs/devfs.c

    r614 r623  
    33 *
    44 * Author   Mohamed Lamine Karaoui (2014,2015)
    5  *          Alain Greiner (2016,2017)
     5 *          Alain Greiner (2016,2017,2018,2019)
    66 *
    77 * Copyright (c) Sorbonne Universites
     
    9191                        xptr_t * devfs_external_inode_xp )
    9292{
    93     error_t  error;
    94     xptr_t   unused_xp;   // required by vfs_add_child_in_parent()
     93    error_t       error;
     94    xptr_t        unused_xp;   // required by vfs_add_child_in_parent()
     95    vfs_inode_t * inode;
    9596
    9697    // create DEVFS "dev" inode in cluster 0
    9798    error = vfs_add_child_in_parent( 0,                // cxy
    98                                      INODE_TYPE_DIR,
    9999                                     FS_TYPE_DEVFS,
    100100                                     root_inode_xp,
     
    103103                                     devfs_dev_inode_xp );
    104104
     105    // update inode "type" field
     106    inode = GET_PTR( *devfs_dev_inode_xp );
     107    inode->type = INODE_TYPE_DIR;
     108 
    105109    // create dentries <.> and <..> in <dev>
    106110    error |= vfs_add_special_dentries( *devfs_dev_inode_xp,
    107111                                       root_inode_xp );
    108112
    109 // check success
    110 assert( (error == 0) , "cannot create <dev>\n" );
     113    if( error )
     114    {
     115        printk("\n[PANIC] in %s : cannot create <dev> directory\n", __FUNCTION__ );
     116        hal_core_sleep();
     117    }
    111118
    112119#if DEBUG_DEVFS_GLOBAL_INIT
     
    120127    // create DEVFS "external" inode in cluster 0
    121128    error = vfs_add_child_in_parent( 0,               // cxy
    122                                      INODE_TYPE_DIR,
    123129                                     FS_TYPE_DEVFS,
    124130                                     *devfs_dev_inode_xp,
     
    127133                                     devfs_external_inode_xp );
    128134
     135    // update inode "type" field
     136    inode = GET_PTR( *devfs_external_inode_xp );
     137    inode->type = INODE_TYPE_DIR;
     138 
    129139    // create dentries <.> and <..> in <external>
    130140    error |= vfs_add_special_dentries( *devfs_external_inode_xp,
    131141                                       *devfs_dev_inode_xp );
    132142
    133 // check success
    134 assert( (error == 0) , "cannot create <external>\n" );
     143    if( error )
     144    {
     145        printk("\n[PANIC] in %s : cannot create <external> directory\n", __FUNCTION__ );
     146        hal_core_sleep();
     147    }
    135148
    136149#if DEBUG_DEVFS_GLOBAL_INIT
     
    153166    chdev_t     * chdev_ptr;
    154167    xptr_t        inode_xp;
    155     cxy_t         inode_cxy;
    156168    vfs_inode_t * inode_ptr;
    157169    uint32_t      channel;
     
    171183
    172184    error = vfs_add_child_in_parent( local_cxy,
    173                                      INODE_TYPE_DIR,
    174185                                     FS_TYPE_DEVFS,
    175186                                     devfs_dev_inode_xp,
     
    178189                                     devfs_internal_inode_xp );
    179190
     191    // set inode "type" field
     192    inode_ptr = GET_PTR( *devfs_internal_inode_xp );
     193    inode_ptr->type = INODE_TYPE_DEV;
     194 
    180195    // create dentries <.> and <..> in <internal>
    181196    error |= vfs_add_special_dentries( *devfs_internal_inode_xp,
    182197                                       devfs_dev_inode_xp );
    183198
    184 // check success
    185 assert( (error == 0) , "cannot create <external>\n" );
     199    if( error )
     200    {
     201        printk("\n[PANIC] in %s : cannot create <internal> directory\n", __FUNCTION__ );
     202        hal_core_sleep();
     203    }
    186204
    187205#if DEBUG_DEVFS_LOCAL_INIT
     
    199217        chdev_cxy = GET_CXY( chdev_xp );
    200218
    201 assert( (chdev_cxy == local_cxy ), "illegal MMC chdev in cluster %x\n", local_cxy );
     219        if( chdev_cxy != local_cxy )
     220        {
     221            printk("\n[PANIC] in %s : illegal MMC chdev in cluster %x\n",
     222            __FUNCTION__, local_cxy );
     223            hal_core_sleep();
     224        }
    202225
    203226        error = vfs_add_child_in_parent( local_cxy,
    204                                          INODE_TYPE_DEV,
    205227                                         FS_TYPE_DEVFS,
    206228                                         *devfs_internal_inode_xp,
     
    209231                                         &inode_xp );
    210232
    211 assert( (error == 0) , "cannot create MMC inode\n" );
    212 
    213         // update child inode "extend" field
    214         inode_cxy = GET_CXY( inode_xp );
     233        if( error )
     234        {
     235            printk("\n[PANIC] in %s : cannot create MMC inode in cluster %x\n",
     236            __FUNCTION__, local_cxy );
     237            hal_core_sleep();
     238        }
     239
     240        // update child inode "extend" and "type" fields
    215241        inode_ptr = GET_PTR( inode_xp );
    216         hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     242        inode_ptr->extend = chdev_ptr;
     243        inode_ptr->type   = INODE_TYPE_DEV;
    217244       
    218245#if DEBUG_DEVFS_LOCAL_INIT
     
    234261            chdev_cxy = GET_CXY( chdev_xp );
    235262
    236 assert( (chdev_cxy == local_cxy ), "illegal DMA chdev in cluster %x\n", local_cxy );
     263            if( chdev_cxy != local_cxy )
     264            {
     265                printk("\d[PANIC] in %s : illegal DMA chdev in cluster %x\n",
     266                __FUNCTION__, local_cxy );
     267                hal_core_sleep();
     268            }
    237269
    238270            error = vfs_add_child_in_parent( local_cxy,
    239                                              INODE_TYPE_DEV,
    240271                                             FS_TYPE_DEVFS,
    241272                                             *devfs_internal_inode_xp,
     
    243274                                             &unused_xp,
    244275                                             &inode_xp );
    245 
    246 assert( (error == 0) , "cannot create DMA inode\n" );
    247 
    248             // update child inode "extend" field
    249             inode_cxy = GET_CXY( inode_xp );
     276            if( error )
     277            {
     278                printk("\n[PANIC] in %s : cannot create DMA inode in cluster %x\n",
     279                __FUNCTION__, local_cxy );
     280                hal_core_sleep();
     281            }
     282
     283            // update child inode "extend" and "type" fields
    250284            inode_ptr = GET_PTR( inode_xp );
    251             hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     285            inode_ptr->extend = chdev_ptr;
     286            inode_ptr->type   = INODE_TYPE_DEV;
    252287       
    253288#if DEBUG_DEVFS_LOCAL_INIT
     
    270305        {
    271306            error = vfs_add_child_in_parent( local_cxy,
    272                                              INODE_TYPE_DEV,
    273307                                             FS_TYPE_DEVFS,
    274308                                             devfs_external_inode_xp,
     
    276310                                             &unused_xp,
    277311                                             &inode_xp );
    278 
    279 assert( (error == 0) , "cannot create IOB inode\n" );
    280 
    281             // update child inode "extend" field
    282             inode_cxy = GET_CXY( inode_xp );
     312            if( error )
     313            {
     314                printk("\n[PANIC] in %s : cannot create IOB inode in cluster %x\n",
     315                __FUNCTION__, local_cxy );
     316                hal_core_sleep();
     317            }
     318
     319            // update child inode "extend" and "type" fields
    283320            inode_ptr = GET_PTR( inode_xp );
    284             hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     321            inode_ptr->extend = chdev_ptr;
     322            inode_ptr->type   = INODE_TYPE_DEV;
    285323       
    286324#if DEBUG_DEVFS_LOCAL_INIT
     
    303341        {
    304342            error = vfs_add_child_in_parent( local_cxy,
    305                                              INODE_TYPE_DEV,
    306343                                             FS_TYPE_DEVFS,
    307344                                             devfs_external_inode_xp,
     
    310347                                             &inode_xp );
    311348
    312 assert( (error == 0) , "cannot create PIC inode\n" );
     349            if( error )
     350            {
     351                printk("\n[PANIC] in %s : cannot create PIC inode in cluster %x\n",
     352                __FUNCTION__, local_cxy );
     353                hal_core_sleep();
     354            }
    313355
    314356            // update child inode "extend" field
    315             inode_cxy = GET_CXY( inode_xp );
    316357            inode_ptr = GET_PTR( inode_xp );
    317             hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     358            inode_ptr->extend = chdev_ptr;
     359            inode_ptr->type   = INODE_TYPE_DEV;
    318360       
    319361#if DEBUG_DEVFS_LOCAL_INIT
     
    338380            {
    339381                error = vfs_add_child_in_parent( local_cxy,
    340                                                  INODE_TYPE_DEV,
    341382                                                 FS_TYPE_DEVFS,
    342383                                                 devfs_external_inode_xp,
     
    345386                                                 &inode_xp );
    346387
    347 assert( (error == 0) , "cannot create TXT_RX inode\n" );
    348 
    349                 // update child inode "extend" field
    350                 inode_cxy = GET_CXY( inode_xp );
     388                if( error )
     389                {
     390                    printk("\n[PANIC] in %s : cannot create TXT_RX inode in cluster %x\n",
     391                    __FUNCTION__, local_cxy );
     392                    hal_core_sleep();
     393                }
     394
     395                // update child inode "extend" and "type" fields
    351396                inode_ptr = GET_PTR( inode_xp );
    352                 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     397                inode_ptr->extend = chdev_ptr;
     398                inode_ptr->type   = INODE_TYPE_DEV;
    353399       
    354400#if DEBUG_DEVFS_LOCAL_INIT
     
    374420            {
    375421                error = vfs_add_child_in_parent( local_cxy,
    376                                                  INODE_TYPE_DEV,
    377422                                                 FS_TYPE_DEVFS,
    378423                                                 devfs_external_inode_xp,
     
    380425                                                 &unused_xp,
    381426                                                 &inode_xp );
    382 
    383 assert( (error == 0) , "cannot create TXT_TX inode\n" );
    384 
    385                 // update child inode "extend" field
    386                 inode_cxy = GET_CXY( inode_xp );
     427                if( error )
     428                {
     429                    printk("\n[PANIC] in %s : cannot create TXT_TX inode in cluster %x\n",
     430                    __FUNCTION__, local_cxy );
     431                    hal_core_sleep();
     432                }
     433
     434                // update child inode "extend" and "type" fields
    387435                inode_ptr = GET_PTR( inode_xp );
    388                 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     436                inode_ptr->extend = chdev_ptr;
     437                inode_ptr->type   = INODE_TYPE_DEV;
    389438       
    390439#if DEBUG_DEVFS_LOCAL_INIT
     
    410459            {
    411460                error = vfs_add_child_in_parent( local_cxy,
    412                                                  INODE_TYPE_DEV,
    413461                                                 FS_TYPE_DEVFS,
    414462                                                 devfs_external_inode_xp,
     
    416464                                                 &unused_xp,
    417465                                                 &inode_xp );
    418 
    419 assert( (error == 0) , "cannot create IOC inode\n" );
    420 
    421                 // update child inode "extend" field
    422                 inode_cxy = GET_CXY( inode_xp );
     466                if( error )
     467                {
     468                    printk("\n[PANIC] in %s : cannot create IOC inode in cluster %x\n",
     469                    __FUNCTION__, local_cxy );
     470                    hal_core_sleep();
     471                }
     472
     473                // update child inode "extend" and "type" fields
    423474                inode_ptr = GET_PTR( inode_xp );
    424                 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     475                inode_ptr->extend = chdev_ptr;
     476                inode_ptr->type   = INODE_TYPE_DEV;
    425477       
    426478#if DEBUG_DEVFS_LOCAL_INIT
     
    446498            {
    447499                error = vfs_add_child_in_parent( local_cxy,
    448                                                  INODE_TYPE_DEV,
    449500                                                 FS_TYPE_DEVFS,
    450501                                                 devfs_external_inode_xp,
     
    452503                                                 &unused_xp,
    453504                                                 &inode_xp );
    454 
    455 assert( (error == 0) , "cannot create FBF inode\n" );
    456 
    457                 // update child inode "extend" field
    458                 inode_cxy = GET_CXY( inode_xp );
     505                if( error )
     506                {
     507                    printk("\n[PANIC] in %s : cannot create FBF inode in cluster %x\n",
     508                    __FUNCTION__, local_cxy );
     509                    hal_core_sleep();
     510                }
     511
     512                // update child inode "extend" and "type" fields
    459513                inode_ptr = GET_PTR( inode_xp );
    460                 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     514                inode_ptr->extend = chdev_ptr;
     515                inode_ptr->type   = INODE_TYPE_DEV;
    461516       
    462517#if DEBUG_DEVFS_LOCAL_INIT
     
    482537            {
    483538                error = vfs_add_child_in_parent( local_cxy,
    484                                                  INODE_TYPE_DEV,
    485539                                                 FS_TYPE_DEVFS,
    486540                                                 devfs_external_inode_xp,
     
    488542                                                 &unused_xp,
    489543                                                 &inode_xp );
    490 
    491 assert( (error == 0) , "cannot create NIC_RX inode\n" );
    492 
    493                 // update child inode "extend" field
    494                 inode_cxy = GET_CXY( inode_xp );
     544                if( error )
     545                {
     546                    printk("\n[PANIC] in %s : cannot create NIC_RX inode in cluster %x\n",
     547                    __FUNCTION__, local_cxy );
     548                    hal_core_sleep();
     549                }
     550
     551                // update child inode "extend" and "type" fields
    495552                inode_ptr = GET_PTR( inode_xp );
    496                 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     553                inode_ptr->extend = chdev_ptr;
     554                inode_ptr->type   = INODE_TYPE_DEV;
    497555 
    498556#if DEBUG_DEVFS_LOCAL_INIT
     
    518576            {
    519577                error = vfs_add_child_in_parent( local_cxy,
    520                                                  INODE_TYPE_DEV,
    521578                                                 FS_TYPE_DEVFS,
    522579                                                 devfs_external_inode_xp,
     
    524581                                                 &unused_xp,
    525582                                                 &inode_xp );
    526 
    527 assert( (error == 0) , "cannot create NIC_TX inode\n" );
    528 
    529                 // update child inode "extend" field
    530                 inode_cxy = GET_CXY( inode_xp );
     583                if( error )
     584                {
     585                    printk("\n[PANIC] in %s : cannot create NIC_TX inode in cluster %x\n",
     586                    __FUNCTION__, local_cxy );
     587                    hal_core_sleep();
     588                }
     589
     590                // update child inode "extend" and "type" fields
    531591                inode_ptr = GET_PTR( inode_xp );
    532                 hal_remote_spt( XPTR( inode_cxy , &inode_ptr->extend ) , chdev_ptr );
     592                inode_ptr->extend = chdev_ptr;
     593                inode_ptr->type   = INODE_TYPE_DEV;
    533594       
    534595#if DEBUG_DEVFS_LOCAL_INIT
  • trunk/kernel/fs/fatfs.c

    r614 r623  
    793793#if (DEBUG_FATFS_CTX_INIT & 0x1)
    794794if( DEBUG_FATFS_CTX_INIT < cycle )
    795 {
    796     uint32_t   line;
    797     uint32_t   byte = 0;
    798     printk("\n***** %s : FAT boot record\n", __FUNCTION__ );
    799     for ( line = 0 ; line < 32 ; line++ )
    800     {
    801         printk(" %X | %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x |\n",
    802                byte,
    803                buffer[byte+ 0],buffer[byte+ 1],buffer[byte+ 2],buffer[byte+ 3],
    804                buffer[byte+ 4],buffer[byte+ 5],buffer[byte+ 6],buffer[byte+ 7],
    805                buffer[byte+ 8],buffer[byte+ 9],buffer[byte+10],buffer[byte+11],
    806                buffer[byte+12],buffer[byte+13],buffer[byte+14],buffer[byte+15] );
    807 
    808          byte += 16;
    809     }
    810 }
     795putb( "boot record", buffer , 256 );
    811796#endif
    812797
     
    960945assert( (inode != NULL) , "inode pointer is NULL\n" );
    961946assert( (dentry != NULL) , "dentry pointer is NULL\n" );
    962 assert( (inode->type == INODE_TYPE_DIR) , "inode is not a directory\n" );
    963947assert( (inode->mapper != NULL ) , "mapper pointer is NULL\n" );
    964948 
     
    13591343}  // end fatfs_remove_dentry
    13601344
    1361 /////////////////////////////////////////////////////
    1362 error_t fatfs_get_dentry( vfs_inode_t * parent_inode,
    1363                           char        * name,
    1364                           xptr_t        child_inode_xp )
     1345
     1346//////////////////////////////////////////////////////////////////////////////////////////////
     1347// This static function scan the pages of a mapper containing a FAT32 directory, identified
     1348// by the <mapper> argument, to find the directory entry identified by the <name> argument,
     1349// and return a pointer on the directory entry, described as and array of 32 bytes, and the
     1350// incex of this entry in the FAT32 mapper, seen as an array of 32 bytes entries.
     1351// It is called by the fatfs_new_dentry() and fatfs_update_dentry() functions.
     1352// It must be called by a thread running in the cluster containing the mapper.
     1353//////////////////////////////////////////////////////////////////////////////////////////////
     1354// @ mapper    : [in]  local pointer on directory mapper.
     1355// @ name      : [in]  searched directory entry name.
     1356// @ entry     : [out] buffer for the pointer on the 32 bytes directory entry (when found).
     1357// @ index     : [out] buffer for the directory entry index in mapper.
     1358// @ return 0 if found / return 1 if not found / return -1 if mapper access error.
     1359//////////////////////////////////////////////////////////////////////////////////////////////
     1360error_t fatfs_scan_directory( mapper_t *  mapper,
     1361                              char     *  name,
     1362                              uint8_t  ** entry,
     1363                              uint32_t *  index )
    13651364{
    13661365    // Two embedded loops to scan the directory mapper:
     
    13681367    // - scan the directory entries in each 4 Kbytes page
    13691368
    1370 #if DEBUG_FATFS_GET_DENTRY
     1369// check parent_inode and child_inode
     1370assert( (mapper != NULL) , "mapper pointer is NULL\n" );
     1371assert( (name   != NULL ), "child name is undefined\n" );
     1372assert( (entry  != NULL ), "entry buffer undefined\n" );
     1373
     1374#if DEBUG_FATFS_SCAN_DIRECTORY
    13711375char       parent_name[CONFIG_VFS_MAX_NAME_LENGTH];
    13721376uint32_t   cycle = (uint32_t)hal_get_cycles();
    13731377thread_t * this  = CURRENT_THREAD;
    1374 vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , parent_name );
    1375 if( DEBUG_FATFS_GET_DENTRY < cycle )
    1376 printk("\n[%s]  thread[%x,%x] enter for child <%s> in parent <%s> / cycle %d\n",
     1378vfs_inode_get_name( XPTR( local_cxy , mapper->inode ) , parent_name );
     1379if( DEBUG_FATFS_SCAN_DIRECTORY < cycle )
     1380printk("\n[%s]  thread[%x,%x] enter to search child <%s> in parent <%s> / cycle %d\n",
    13771381__FUNCTION__, this->process->pid, this->trdid, name , parent_name , cycle );
    13781382#endif
    13791383
    1380 // check parent_inode and child_inode
    1381 assert( (parent_inode != NULL) , "parent_inode is NULL\n" );
    1382 assert( (child_inode_xp != XPTR_NULL ) , "child_inode is XPTR_NULL\n" );
    1383 
    1384     mapper_t * mapper    = parent_inode->mapper;
    1385     xptr_t     mapper_xp = XPTR( local_cxy , mapper );
    1386 
    1387 // check parent mapper
    1388 assert( (mapper != NULL) , "parent mapper is NULL\n");
    1389    
    1390     char       cname[CONFIG_VFS_MAX_NAME_LENGTH];  // name extracter from each directory entry
     1384    char       cname[CONFIG_VFS_MAX_NAME_LENGTH];  // name extracted from each directory entry
    13911385
    13921386    char       lfn1[16];         // buffer for one partial cname
    13931387    char       lfn2[16];         // buffer for one partial cname
    13941388    char       lfn3[16];         // buffer for one partial cname
     1389    xptr_t     mapper_xp;        // extended pointer on mapper descriptor
    13951390    xptr_t     page_xp;          // extended pointer on page descriptor
    13961391    xptr_t     base_xp;          // extended pointer on page base
     
    14001395    uint32_t   seq;              // sequence index
    14011396    uint32_t   lfn       = 0;    // LFN entries number
    1402     uint32_t   size      = 0;    // searched file/dir size (bytes)
    1403     uint32_t   cluster   = 0;    // searched file/dir cluster index
    1404     uint32_t   is_dir    = 0;    // searched file/dir type
    1405     int32_t    found     = 0;    // not found (0) / name found (1) / end of dir (-1)
     1397    int32_t    found     = 0;    // not yet = 0 / success = 1 / not found = 2 / error = -1
    14061398    uint32_t   page_id   = 0;    // page index in mapper
    1407     uint32_t   dentry_id = 0;    // directory entry index
    14081399    uint32_t   offset    = 0;    // byte offset in page
    14091400
    1410     // scan the parent directory mapper
     1401    mapper_xp = XPTR( local_cxy , mapper );
     1402
     1403    // scan the mapper pages
    14111404    while ( found == 0 )
    14121405    {
     
    14141407        page_xp = mapper_remote_get_page( mapper_xp , page_id );
    14151408
    1416         if( page_xp == XPTR_NULL) return EIO;
     1409        if( page_xp == XPTR_NULL)
     1410        {
     1411            found = -1;
     1412        }
    14171413
    14181414        // get page base
     
    14201416        base    = (uint8_t *)GET_PTR( base_xp );
    14211417
    1422 #if (DEBUG_FATFS_GET_DENTRY & 0x1)
    1423 if( DEBUG_FATFS_GET_DENTRY < cycle )
     1418#if (DEBUG_FATFS_SCAN_DIRECTORY & 0x1)
     1419if( DEBUG_FATFS_SCAN_DIRECTORY < cycle )
    14241420mapper_display_page( mapper_xp , page_id , 256 );
    14251421#endif
     
    14321428            if (ord == NO_MORE_ENTRY)                 // no more entry => break
    14331429            {
    1434                 found = -1;
     1430                found = 2;
    14351431            }
    14361432            else if ( ord == FREE_ENTRY )             // free entry => skip
     
    14771473                if ( strcmp( name , cname ) == 0 )
    14781474                {
    1479                     cluster = (fatfs_get_record( DIR_FST_CLUS_HI , base + offset , 1 ) << 16) |
    1480                               (fatfs_get_record( DIR_FST_CLUS_LO , base + offset , 1 )      ) ;
    1481                     dentry_id = ((page_id<<12) + offset)>>5;
    1482                     is_dir    = ((attr & ATTR_DIRECTORY) == ATTR_DIRECTORY);
    1483                     size      = fatfs_get_record( DIR_FILE_SIZE , base + offset , 1 );
     1475                    *entry = base + offset;
     1476                    *index = ((page_id<<12) + offset)>>5;
    14841477                    found     = 1;
    14851478                }
     
    14941487    }  // end loop on pages
    14951488
    1496     // analyse the result of scan
    1497 
    1498     if ( found == -1 )  // found end of directory => failure
    1499     {
     1489    if( found == 1 )
     1490    {
     1491
     1492#if DEBUG_FATFS_SCAN_DIRECTORY
     1493cycle = (uint32_t)hal_get_cycles();
     1494if( DEBUG_FATFS_SCAN_DIRECTORY < cycle )
     1495printk("\n[%s]  thread[%x,%x] exit / found child <%s> in <%s>\n",
     1496__FUNCTION__, this->process->pid, this->trdid, name, parent_name );
     1497#endif
     1498        return 0;
     1499    }
     1500    else if( found == 2 )
     1501    {
     1502
     1503#if DEBUG_FATFS_SCAN_DIRECTORY
     1504cycle = (uint32_t)hal_get_cycles();
     1505if( DEBUG_FATFS_SCAN_DIRECTORY < cycle )
     1506printk("\n[%s]  thread[%x,%x] exit / child <%s> in <%s> not found\n",
     1507__FUNCTION__, this->process->pid, this->trdid, name, parent_name );
     1508#endif
     1509        return 1;
     1510    }
     1511    else
     1512    {
     1513        printk("\n[ERROR] in %s : cannot get page %d from mapper\n",
     1514        __FUNCTION__, page_id );
     1515
     1516        return -1;
     1517    }
     1518}  // end fatfs_scan_directory()
     1519
     1520
     1521
     1522/////////////////////////////////////////////////////
     1523error_t fatfs_new_dentry( vfs_inode_t * parent_inode,
     1524                          char        * name,
     1525                          xptr_t        child_inode_xp )
     1526{
     1527    uint8_t  * entry;    // pointer on FAT32 directory entry (array of 32 bytes)
     1528    uint32_t   index;    // index of FAT32 directory entry in mapper
     1529    mapper_t * mapper;   // pointer on directory mapper
     1530    uint32_t   cluster;  // directory entry cluster
     1531    uint32_t   size;     // directory entry size
     1532    bool_t     is_dir;   // directory entry type (file/dir)
     1533    error_t    error;
     1534
     1535// check arguments
     1536assert( (parent_inode != NULL)         , "parent_inode is NULL\n" );
     1537assert( (name         != NULL)         , "name is NULL\n" );
     1538assert( (child_inode_xp != XPTR_NULL ) , "child_inode is XPTR_NULL\n" );
     1539
     1540#if DEBUG_FATFS_GET_DENTRY
     1541char       parent_name[CONFIG_VFS_MAX_NAME_LENGTH];
     1542uint32_t   cycle = (uint32_t)hal_get_cycles();
     1543thread_t * this  = CURRENT_THREAD;
     1544vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , parent_name );
     1545if( DEBUG_FATFS_GET_DENTRY < cycle )
     1546printk("\n[%s]  thread[%x,%x] enter for child <%s> in parent <%s> / cycle %d\n",
     1547__FUNCTION__, this->process->pid, this->trdid, name , parent_name , cycle );
     1548#endif
     1549
     1550    // get pointer and index of searched directory entry in mapper
     1551    mapper = parent_inode->mapper;
     1552    error  = fatfs_scan_directory( mapper, name , &entry , &index );
     1553
     1554    // update child inode and dentry descriptors if sucess
     1555    if( error == 0 )
     1556    {
    15001557
    15011558#if DEBUG_FATFS_GET_DENTRY
    15021559cycle = (uint32_t)hal_get_cycles();
    15031560if( DEBUG_FATFS_GET_DENTRY < cycle )
    1504 printk("\n[%s]  thread[%x,%x] exit / child <%s> not found / cycle %d\n",
    1505 __FUNCTION__, this->process->pid, this->trdid, name, cycle );
    1506 #endif
    1507 
    1508         return -1;
    1509     }
    1510 
    1511     // get child inode cluster and local pointer
    1512     cxy_t          inode_cxy = GET_CXY( child_inode_xp );
    1513     vfs_inode_t  * inode_ptr = GET_PTR( child_inode_xp );
    1514 
    1515     // build extended pointer on parent dentried root
    1516     xptr_t parents_root_xp = XPTR( inode_cxy , &inode_ptr->parents );
     1561printk("\n[%s]  thread[%x,%x] exit / intialised child <%s> in %s / cycle %d\n",
     1562__FUNCTION__, this->process->pid, this->trdid, name, parent_name, cycle );
     1563#endif
     1564        // get relevant infos from FAT32 directory entry
     1565        cluster = (fatfs_get_record( DIR_FST_CLUS_HI , entry , 1 ) << 16) |
     1566                  (fatfs_get_record( DIR_FST_CLUS_LO , entry , 1 )      ) ;
     1567        is_dir  = (fatfs_get_record( DIR_ATTR        , entry , 1 ) & ATTR_DIRECTORY);
     1568        size    =  fatfs_get_record( DIR_FILE_SIZE   , entry , 1 );
     1569
     1570        // get child inode cluster and local pointer
     1571        cxy_t          inode_cxy = GET_CXY( child_inode_xp );
     1572        vfs_inode_t  * inode_ptr = GET_PTR( child_inode_xp );
     1573
     1574        // build extended pointer on root of list of prent dentries
     1575        xptr_t parents_root_xp = XPTR( inode_cxy , &inode_ptr->parents );
    15171576
    15181577// check child inode has at least one parent
    15191578assert( (xlist_is_empty( parents_root_xp ) == false ), "child inode must have one parent\n");
    15201579
    1521     // get dentry pointers and cluster
    1522     xptr_t         dentry_xp  = XLIST_FIRST( parents_root_xp , vfs_dentry_t , parents );
    1523     vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp );
    1524     cxy_t          dentry_cxy = GET_CXY( dentry_xp );
     1580        // get dentry pointers and cluster
     1581        xptr_t         dentry_xp  = XLIST_FIRST( parents_root_xp , vfs_dentry_t , parents );
     1582        vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp );
     1583        cxy_t          dentry_cxy = GET_CXY( dentry_xp );
    15251584
    15261585// check dentry descriptor in same cluster as parent inode
    15271586assert( (dentry_cxy == local_cxy) , "illegal dentry cluster\n" );
    15281587
    1529     // update the child inode "type", "size", and "extend" fields
    1530     vfs_inode_type_t type = (is_dir) ? INODE_TYPE_DIR : INODE_TYPE_FILE;
    1531 
    1532     hal_remote_s32( XPTR( inode_cxy , &inode_ptr->type   ) , type );
    1533     hal_remote_s32( XPTR( inode_cxy , &inode_ptr->size   ) , size );
    1534     hal_remote_s32( XPTR( inode_cxy , &inode_ptr->extend ) , cluster );
    1535 
    1536     // update the dentry "extend" field
    1537     dentry_ptr->extend = (void *)(intptr_t)dentry_id;
    1538 
    1539 #if DEBUG_FATFS_GET_DENTRY
     1588        // update the child inode "type", "size", and "extend" fields
     1589        vfs_inode_type_t type = (is_dir) ? INODE_TYPE_DIR : INODE_TYPE_FILE;
     1590
     1591        hal_remote_s32( XPTR( inode_cxy , &inode_ptr->type   ) , type );
     1592        hal_remote_s32( XPTR( inode_cxy , &inode_ptr->size   ) , size );
     1593        hal_remote_s32( XPTR( inode_cxy , &inode_ptr->extend ) , cluster );
     1594
     1595        // update the dentry "extend" field
     1596        dentry_ptr->extend = (void *)(intptr_t)index;
     1597
     1598        return 0;
     1599    }
     1600    else
     1601    {
     1602        return -1;
     1603    }
     1604
     1605}  // end fatfs_new_dentry()
     1606
     1607//////////////////////////////////////////////////
     1608error_t fatfs_update_dentry( vfs_inode_t  * inode,
     1609                             vfs_dentry_t * dentry,
     1610                             uint32_t       size )
     1611{
     1612    uint8_t  * entry;    // pointer on FAT32 directory entry (array of 32 bytes)
     1613    uint32_t   index;    // index of FAT32 directory entry in mapper
     1614    mapper_t * mapper;   // pointer on directory mapper
     1615    error_t    error;
     1616
     1617// check arguments
     1618assert( (inode  != NULL) , "inode is NULL\n" );
     1619assert( (dentry != NULL) , "dentry is NULL\n" );
     1620assert( (size   != 0   ) , "size is 0\n" );
     1621
     1622#if DEBUG_FATFS_UPDATE_DENTRY
     1623char       dir_name[CONFIG_VFS_MAX_NAME_LENGTH];
     1624uint32_t   cycle = (uint32_t)hal_get_cycles();
     1625thread_t * this  = CURRENT_THREAD;
     1626vfs_inode_get_name( XPTR( local_cxy , inode ) , dir_name );
     1627if( DEBUG_FATFS_UPDATE_DENTRY < cycle )
     1628printk("\n[%s]  thread[%x,%x] enter for entry <%s> in dir <%s> / cycle %d\n",
     1629__FUNCTION__, this->process->pid, this->trdid, dentry->name , dir_name , cycle );
     1630#endif
     1631
     1632    // get pointer and index of searched directory entry in mapper
     1633    mapper = inode->mapper;
     1634    error  = fatfs_scan_directory( mapper, dentry->name , &entry , &index );
     1635
     1636    // update size in mapper if found
     1637    if( error == 0 )
     1638    {
     1639
     1640#if DEBUG_FATFS_UPDATE_DENTRY
    15401641cycle = (uint32_t)hal_get_cycles();
    1541 if( DEBUG_FATFS_GET_DENTRY < cycle )
    1542 printk("\n[%s]  thread[%x,%x] exit / child <%s> loaded in <%s> / cycle %d\n",
    1543 __FUNCTION__, this->process->pid, this->trdid, name, parent_name, cycle );
    1544 #endif
    1545 
    1546     return 0;
    1547 
    1548 }  // end fatfs_get_dentry()
     1642if( DEBUG_FATFS_UPDATE_DENTRY < cycle )
     1643printk("\n[%s]  thread[%x,%x] exit / found entry <%s> in <%s> / cycle %d\n",
     1644__FUNCTION__, this->process->pid, this->trdid, dentry->name, dir_name, cycle );
     1645#endif
     1646        // set size in FAT32 directory entry
     1647        fatfs_set_record( DIR_FILE_SIZE , entry , 1 , size );
     1648
     1649        // get local pointer on modified page base
     1650        void * base = (void *)((intptr_t)entry & (~CONFIG_PPM_PAGE_MASK));
     1651
     1652        // get extended pointer on modified page descriptor
     1653        xptr_t    page_xp = ppm_base2page( XPTR( local_cxy , base ) );
     1654
     1655        // mark page as dirty
     1656        ppm_page_do_dirty( page_xp );
     1657
     1658        return 0;
     1659    }
     1660    else
     1661    {
     1662        return -1;
     1663    }
     1664
     1665}  // end fatfs_update_dentry()
    15491666
    15501667///////////////////////////////////////////////////////
     
    20562173assert( (inode_xp != XPTR_NULL) , "inode pointer is NULL\n" );
    20572174
    2058     // get first_cluster from inode extension
     2175    // get inode cluster and local pointer
    20592176    inode_ptr     = GET_PTR( inode_xp );
    20602177    inode_cxy     = GET_CXY( inode_xp );
     2178
     2179    // get first_cluster from inode extension
    20612180    first_xp      = XPTR( inode_cxy , &inode_ptr->extend );
    20622181    first_cluster = (uint32_t)(intptr_t)hal_remote_lpt( first_xp );
     
    20732192printk("\n[%s] thread[%x,%x] enter for <%s> / first_cluster %x / cycle %d\n",
    20742193__FUNCTION__ , this->process->pid, this->trdid, name, first_cluster, cycle );
     2194#endif
     2195
     2196#if (DEBUG_FATFS_RELEASE_INODE & 1)
     2197fatfs_display_fat( 0 , 512 );
    20752198#endif
    20762199
  • trunk/kernel/fs/fatfs.h

    r614 r623  
    309309
    310310/*****************************************************************************************
    311  * This function implements the generic vfs_fs_get_dentry() function for the FATFS.
    312  *****************************************************************************************
    313  * It initialises a new child (new inode/dentry couple in Inode Tree), identified
    314  * by the <child_inode_xp> argument, from the parent directory mapper, identified by the
    315  * <parent_inode> argument.
     311 * This function implements the generic vfs_fs_new_dentry() function for the FATFS.
     312 *****************************************************************************************
     313 * It initializes a new inode/dentry couple in Inode Tree, attached to the directory
     314 * identified by the <parent_inode> argument. The new directory entry is identified
     315 * by the <name> argument. The child inode descriptor identified by the <child_inode_xp>
     316 * argument, and the dentry descriptor must have been previously allocated.
    316317 * It scan the parent mapper to find the <name> argument.
    317318 * It set the "type", "size", and "extend" fields in inode descriptor.
     
    324325 * @ return 0 if success / return ENOENT if child not found.
    325326 ****************************************************************************************/
    326 error_t fatfs_get_dentry( struct vfs_inode_s * parent_inode,
     327error_t fatfs_new_dentry( struct vfs_inode_s * parent_inode,
    327328                          char               * name,
    328329                          xptr_t               child_inode_xp );
    329330
    330331/*****************************************************************************************
     332 * This function implements the generic vfs_fs_update_dentry() function for the FATFS.
     333 *****************************************************************************************
     334 * It update the size of a directory entry identified by the <dentry> argument in
     335 * the mapper of a directory identified by the <inode> argument, as defined by the <size>
     336 * argument.
     337 * It scan the mapper to find the entry identified by the dentry "name" field.
     338 * It set the "size" field in the in the directory mapper AND marks the page as DIRTY.
     339 * It must be called by a thread running in the cluster containing the directory inode.
     340 *****************************************************************************************
     341 * @ inode        : local pointer on inode (directory).
     342 * @ dentry       : local pointer on dentry (for name).
     343 * @ size         : new size value.
     344 * @ return 0 if success / return ENOENT if child not found.
     345 ****************************************************************************************/
     346error_t fatfs_update_dentry( struct vfs_inode_s  * inode,
     347                             struct vfs_dentry_s * dentry,
     348                             uint32_t              size );
     349
     350/*****************************************************************************************
    331351 * This function implements the generic vfs_fs_get_user_dir() function for the FATFS.
    332352 *****************************************************************************************
    333353 * It is called by the remote_dir_create() function to scan the mapper of a directory
    334  * identified by the <inode> argument and copy up to <max_dirent> valid dentries to a
     354 * identified by the <inode> argument, and copy up to <max_dirent> valid dentries to a
    335355 * local dirent array, defined by the <array> argument. The <min_dentry> argument defines
    336  * the index of the first dentry to copied to the target dirent array.
     356 * the index of the first dentry to be copied to the target dirent array.
    337357 * This function returns in the <entries> buffer the number of dentries actually written,
    338358 * and signals in the <done> buffer when the last valid entry has been found.
    339359 * If the <detailed> argument is true, a dentry/inode couple that does not exist in
    340  * the Inode Tree is dynamically created, and all dirent fiels are documented in the
     360 * the Inode Tree is dynamically created, and all dirent fields are documented in the
    341361 * dirent array. Otherwise, only the dentry name is documented.
    342362 * It must be called by a thread running in the cluster containing the directory inode.
     
    443463 * The page - and the mapper - can be located in another cluster than the calling thread.
    444464 * The pointer on the mapper and the page index in file are found in the page descriptor.
    445  * It is used for both for a regular file/directory mapper, and the FAT mapper.
     465 * It is used for both a regular file/directory mapper, and the FAT mapper.
    446466 * For the FAT mapper, it access the FATFS to get the location on IOC device.
    447467 * For a regular file, it access the FAT mapper to get the cluster index on IOC device.
  • trunk/kernel/fs/ramfs.c

    r602 r623  
    3535                     char   * ramfs_root_name )
    3636{
    37     xptr_t    unused_xp;   // required by vfs_add_child_in_parent()                     
     37    xptr_t        dentry_xp;     // unused but required by vfs_add_child_in_parent()
     38    xptr_t        inode_xp;
     39    vfs_inode_t * inode_ptr;
    3840 
    3941    cxy_t     cxy = cluster_random_select();
     
    4143    // create VFS dentry and VFS inode for RAMFS root directory
    4244    return  vfs_add_child_in_parent( cxy,
    43                                      INODE_TYPE_DIR,
    4445                                     FS_TYPE_RAMFS,
    4546                                     parent_inode_xp,
    4647                                     ramfs_root_name,
    47                                      &unused_xp,
    48                                      &unused_xp );
     48                                     &dentry_xp,
     49                                     &inode_xp );
     50    // update inode type field
     51    inode_ptr = GET_PTR( inode_xp );
     52    inode_ptr->type = INODE_TYPE_DIR;
    4953}
    5054
  • trunk/kernel/fs/vfs.c

    r614 r623  
    33 *
    44 * Author  Mohamed Lamine Karaoui (2015)
    5  *         Alain Greiner (2016,2017,2018)
     5 *         Alain Greiner (2016,2017,2018,2019)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    142142////////////////////////////////////////////////////
    143143error_t vfs_inode_create( vfs_fs_type_t     fs_type,
    144                           vfs_inode_type_t  inode_type,
    145144                          uint32_t          attr,
    146145                          uint32_t          rights,
     
    214213
    215214    // initialize inode descriptor
    216     inode->type       = inode_type;
     215    inode->type       = INODE_TYPE_FILE;     // default value
    217216    inode->inum       = inum;
    218217    inode->attr       = attr;
     
    228227    mapper->inode     = inode;
    229228 
    230     // initialise threads waiting queue
    231     // xlist_root_init( XPTR( local_cxy , &inode->wait_root ) );
    232 
    233229    // initialize chidren dentries xhtab
    234230    xhtab_init( &inode->children , XHTAB_DENTRY_TYPE );
     
    278274    vfs_inode_t * ptr = GET_PTR( inode_xp );
    279275
     276    // build extended pointers on lock & size
     277    xptr_t   lock_xp = XPTR( cxy , &ptr->size_lock );
     278    xptr_t   size_xp = XPTR( cxy , &ptr->size );
     279
     280    // take lock in read mode
     281    remote_rwlock_rd_acquire( lock_xp );
     282
    280283    // get size
    281     remote_rwlock_rd_acquire( XPTR( cxy , &ptr->size_lock ) );
    282     uint32_t size = hal_remote_l32( XPTR( cxy , &ptr->size ) );
    283     remote_rwlock_rd_release( XPTR( cxy , &ptr->size_lock ) );
     284    uint32_t size = hal_remote_l32( size_xp );
     285
     286    // release lock from read mode
     287    remote_rwlock_rd_release( lock_xp );
     288
    284289    return size;
    285290}
    286291
    287 ////////////////////////////////////////////
    288 void vfs_inode_set_size( xptr_t    inode_xp,
    289                          uint32_t  size )
     292///////////////////////////////////////////////
     293void vfs_inode_update_size( xptr_t    inode_xp,
     294                            uint32_t  size )
    290295{
    291296    // get inode cluster and local pointer
     
    293298    vfs_inode_t * ptr = GET_PTR( inode_xp );
    294299
    295     // set size
    296     remote_rwlock_wr_release( XPTR( cxy , &ptr->size_lock ) );
    297     hal_remote_s32( XPTR( cxy , &ptr->size ) , size );
    298     remote_rwlock_wr_release( XPTR( cxy , &ptr->size_lock ) );
     300    // build extended pointers on lock & size
     301    xptr_t   lock_xp = XPTR( cxy , &ptr->size_lock );
     302    xptr_t   size_xp = XPTR( cxy , &ptr->size );
     303
     304    // take lock in write mode
     305    remote_rwlock_wr_acquire( lock_xp );
     306
     307    // get current size
     308    uint32_t current_size = hal_remote_l32( size_xp );
     309
     310    // set size if required
     311    if( current_size < size ) hal_remote_s32( size_xp , size );
     312
     313    // release lock from write mode
     314    remote_rwlock_wr_release( lock_xp );
    299315}
    300316
     
    546562
    547563// check refcount
    548 assert( (file->refcount == 0) , "refcount non zero\n" );
     564// assert( (file->refcount == 0) , "refcount non zero\n" );
    549565
    550566        kmem_req_t req;
     
    554570
    555571#if DEBUG_VFS_CLOSE
     572char name[CONFIG_VFS_MAX_NAME_LENGTH];
     573vfs_file_get_name( XPTR( local_cxy , file ) , name );
    556574thread_t * this = CURRENT_THREAD;
    557575uint32_t cycle = (uint32_t)hal_get_cycles();
    558576if( DEBUG_VFS_CLOSE < cycle )
    559 printk("\n[%s] thread[%x,%x] deleted file %x in cluster %x / cycle %d\n",
    560 __FUNCTION__, this->process->pid, this->trdid, file, local_cxy, cycle );
     577printk("\n[%s] thread[%x,%x] deleted file <%s> in cluster %x / cycle %d\n",
     578__FUNCTION__, this->process->pid, this->trdid, name, local_cxy, cycle );
    561579#endif
    562580
     
    585603    hal_remote_atomic_add( XPTR( file_cxy , &file_ptr->refcount ) , -1 );
    586604}
     605
     606///////////////////////////////////////
     607void vfs_file_get_name( xptr_t file_xp,
     608                        char * name )
     609{
     610    // get cluster and local pointer on remote file
     611    vfs_file_t * file_ptr = GET_PTR( file_xp );
     612    cxy_t        file_cxy = GET_CXY( file_xp );
     613
     614    // get pointers on remote inode
     615    vfs_inode_t * inode_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) );
     616    xptr_t        inode_xp  = XPTR( file_cxy , inode_ptr );
     617
     618    // call the relevant function
     619    vfs_inode_get_name( inode_xp , name );
     620}
     621
    587622
    588623//////////////////////////////////////////////////////////////////////////////////////////
     
    889924}  // vfs_lseek()
    890925
    891 ///////////////////////////////////
     926////////////////////////////////////
    892927error_t vfs_close( xptr_t   file_xp,
    893928                   uint32_t file_id )
    894929{
    895     cluster_t  * cluster;          // local pointer on local cluster
    896     cxy_t        file_cxy;         // cluster containing the file descriptor.
    897     vfs_file_t * file_ptr;         // local ponter on file descriptor
    898     cxy_t        owner_cxy;        // process owner cluster
    899     lpid_t       lpid;             // process local index
    900     xptr_t       root_xp;          // root of list of process copies
    901     xptr_t       lock_xp;          // lock protecting the list of copies
    902     xptr_t       iter_xp;          // iterator on list of process copies
    903     xptr_t       process_xp;       // extended pointer on one process copy
    904     cxy_t        process_cxy;      // process copy cluster
    905     process_t  * process_ptr;      // process copy local pointer
    906 
    907 // check arguments
    908 assert( (file_xp != XPTR_NULL) , "file_xp == XPTR_NULL\n" );
    909 assert( (file_id < CONFIG_PROCESS_FILE_MAX_NR) , "illegal file_id\n" );
     930    cxy_t         file_cxy;         // cluster containing the file descriptor.
     931    vfs_file_t  * file_ptr;         // local ponter on file descriptor
     932    cxy_t         owner_cxy;        // process owner cluster
     933    pid_t         pid;              // process identifier
     934    lpid_t        lpid;             // process local index
     935    xptr_t        root_xp;          // root of xlist (processes , or dentries)
     936    xptr_t        lock_xp;          // lock protecting the xlist
     937    xptr_t        iter_xp;          // iterator on xlist
     938    mapper_t    * mapper_ptr;       // local pointer on associated mapper
     939    xptr_t        mapper_xp;        // extended pointer on mapper
     940    vfs_inode_t * inode_ptr;        // local pointer on associated inode
     941    xptr_t        inode_xp;         // extended pointer on inode
     942    uint32_t      size;             // current file size (from inode descriptor)
     943    error_t       error;
     944
     945    char          name[CONFIG_VFS_MAX_NAME_LENGTH];  // file name
     946
     947// check argument
     948assert( (file_xp != XPTR_NULL) , "file_xp is XPTR_NULL\n" );
    910949
    911950    thread_t  * this    = CURRENT_THREAD;
    912951    process_t * process = this->process;
    913 
     952    cluster_t * cluster = LOCAL_CLUSTER;
     953
     954    // get file name
     955    vfs_file_get_name( file_xp , name );
     956   
    914957#if DEBUG_VFS_CLOSE
    915958uint32_t cycle = (uint32_t)hal_get_cycles();
    916959if( DEBUG_VFS_CLOSE < cycle )
    917 printk("\n[%s] thread[%x,%x] enter / fdid %d / cycle %d\n",
    918 __FUNCTION__, process->pid, this->trdid, file_id, cycle );
    919 #endif
    920 
    921     // get local pointer on local cluster manager
    922     cluster = LOCAL_CLUSTER;
     960printk("\n[%s] thread[%x,%x] enter for <%s> / cycle %d\n",
     961__FUNCTION__, process->pid, this->trdid, name, cycle );
     962#endif
     963
     964    // get cluster and local pointer on remote file descriptor
     965    file_cxy = GET_CXY( file_xp );
     966    file_ptr = GET_PTR( file_xp );
     967
     968    //////// 1) update all dirty pages from mapper to device
     969
     970    // get pointers on mapper associated to file
     971    mapper_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->mapper ) );
     972    mapper_xp  = XPTR( file_cxy , mapper_ptr );
     973
     974    // copy all dirty pages from mapper to device
     975    if( file_cxy == local_cxy )
     976    {
     977        error = mapper_sync( mapper_ptr );
     978    }
     979    else
     980    {
     981        rpc_mapper_sync_client( file_cxy,
     982                                mapper_ptr,
     983                                &error );
     984    }
     985
     986    if( error )
     987    {
     988        printk("\n[ERROR] in %s : cannot synchronise dirty pages for <%s>\n",
     989        __FUNCTION__, name );
     990        return -1;
     991    }
     992
     993#if DEBUG_VFS_CLOSE
     994if( DEBUG_VFS_CLOSE < cycle )
     995printk("\n[%s] thread[%x,%x] synchronised mapper of <%s> to device\n",
     996__FUNCTION__, process->pid, this->trdid, name );
     997#endif
     998
     999    //////// 2) update file size in all parent directory mapper(s) and on device
     1000
     1001    // get pointers on remote inode
     1002    inode_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) );
     1003    inode_xp  = XPTR( file_cxy , inode_ptr );
     1004
     1005    // get file size from remote inode
     1006    size = hal_remote_l32( XPTR( file_cxy , &inode_ptr->size ) );
     1007
     1008    // get root of list of parents dentry
     1009    root_xp = XPTR( file_cxy , &inode_ptr->parents );
     1010
     1011    // loop on all parents
     1012    XLIST_FOREACH( root_xp , iter_xp )
     1013    {
     1014        // get pointers on parent directory dentry
     1015        xptr_t         parent_dentry_xp  = XLIST_ELEMENT( iter_xp , vfs_dentry_t , parents );
     1016        cxy_t          parent_cxy        = GET_CXY( parent_dentry_xp );
     1017        vfs_dentry_t * parent_dentry_ptr = GET_PTR( parent_dentry_xp );
     1018
     1019        // get local pointer on parent directory inode
     1020        vfs_inode_t * parent_inode_ptr = hal_remote_lpt( XPTR( parent_cxy,
     1021                                                         &parent_dentry_ptr->parent ) );
     1022
     1023        // get local pointer on parent directory mapper
     1024        mapper_t * parent_mapper_ptr = hal_remote_lpt( XPTR( parent_cxy,
     1025                                                       &parent_inode_ptr->mapper ) );
     1026 
     1027        // update dentry size in parent directory mapper
     1028        if( parent_cxy == local_cxy )
     1029        {
     1030            error = vfs_fs_update_dentry( parent_inode_ptr,
     1031                                          parent_dentry_ptr,
     1032                                          size );
     1033        }
     1034        else
     1035        {
     1036            rpc_vfs_fs_update_dentry_client( parent_cxy,
     1037                                             parent_inode_ptr,
     1038                                             parent_dentry_ptr,
     1039                                             size,
     1040                                             &error );
     1041        }
     1042
     1043        if( error )
     1044        {
     1045            printk("\n[ERROR] in %s : cannot update size in parent\n",
     1046            __FUNCTION__ );
     1047            return -1;
     1048        }
     1049
     1050#if DEBUG_VFS_CLOSE
     1051char parent_name[CONFIG_VFS_MAX_NAME_LENGTH];
     1052vfs_inode_get_name( XPTR( parent_cxy , parent_inode_ptr ) , parent_name );
     1053if( DEBUG_VFS_CLOSE < cycle )
     1054printk("\n[%s] thread[%x,%x] updated size of <%s> in parent <%s>\n",
     1055__FUNCTION__, process->pid, this->trdid, name, parent_name );
     1056#endif
     1057
     1058        // copy all dirty pages from parent mapper to device
     1059        if( parent_cxy == local_cxy )
     1060        {
     1061            error = mapper_sync( parent_mapper_ptr );
     1062        }
     1063        else
     1064        {
     1065            rpc_mapper_sync_client( parent_cxy,
     1066                                    parent_mapper_ptr,
     1067                                    &error );
     1068        }
     1069
     1070        if( error )
     1071        {
     1072            printk("\n[ERROR] in %s : cannot synchronise parent mapper to device\n",
     1073            __FUNCTION__ );
     1074            return -1;
     1075        }
     1076
     1077#if DEBUG_VFS_CLOSE
     1078if( DEBUG_VFS_CLOSE < cycle )
     1079printk("\n[%s] thread[%x,%x] synchonized mapper of parent <%s> to device\n",
     1080__FUNCTION__, process->pid, this->trdid, parent_name );
     1081#endif
     1082
     1083    }
     1084
     1085    //////// 3) loop on the process copies to reset all fd_array[file_id] entries
    9231086
    9241087    // get owner process cluster and lpid
    925     owner_cxy  = CXY_FROM_PID( process->pid );
    926     lpid       = LPID_FROM_PID( process->pid );
     1088    pid        = process->pid;
     1089    owner_cxy  = CXY_FROM_PID( pid );
     1090    lpid       = LPID_FROM_PID( pid );
    9271091
    9281092    // get extended pointers on copies root and lock
     
    9301094    lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[lpid] );
    9311095
    932     // 1) loop on the process descriptor copies to reset all fd_array[file_id] entries
    933 
    9341096    // take the lock protecting the list of copies
    9351097    remote_queuelock_acquire( lock_xp );
     
    9371099    XLIST_FOREACH( root_xp , iter_xp )
    9381100    {
    939         process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
    940         process_cxy = GET_CXY( process_xp );
    941         process_ptr = GET_PTR( process_xp );
    942 
    943 #if (DEBUG_VFS_CLOSE & 1 )
    944 if( DEBUG_VFS_CLOSE < cycle )
    945 printk("\n[%s]  reset fd_array[%d] for process %x in cluster %x\n",
    946 __FUNCTION__, file_id, process_ptr, process_cxy );
    947 #endif
    948 
    949 // fd_array lock is required for atomic write of a 64 bits word
    950 // xptr_t fd_array_lock_xp = XPTR( process_cxy , &process_ptr->fd_array.lock );
    951 
    952         xptr_t entry_xp         = XPTR( process_cxy , &process_ptr->fd_array.array[file_id] );
    953 
    954 // remote_rwlock_wr_acquire( fd_array_lock_xp );
    955 
     1101        xptr_t      process_xp  = XLIST_ELEMENT( iter_xp , process_t , copies_list );
     1102        cxy_t       process_cxy = GET_CXY( process_xp );
     1103        process_t * process_ptr = GET_PTR( process_xp );
     1104
     1105        xptr_t entry_xp = XPTR( process_cxy , &process_ptr->fd_array.array[file_id] );
    9561106        hal_remote_s64( entry_xp , XPTR_NULL );
    957        
    958 // remote_rwlock_wr_release( fd_array_lock_xp );
    959 
    9601107        vfs_file_count_down( file_xp );
    961 
    9621108        hal_fence();
    9631109    }   
     
    9661112    remote_queuelock_release( lock_xp );
    9671113
    968 #if (DEBUG_VFS_CLOSE & 1)
     1114#if DEBUG_VFS_CLOSE
    9691115if( DEBUG_VFS_CLOSE < cycle )
    970 printk("\n[%s] thread[%x,%x] reset all fd-array copies\n",
    971 __FUNCTION__, process->pid, this->trdid );
    972 #endif
    973 
    974     // 2) release memory allocated to file descriptor in remote cluster
    975 
    976     // get cluster and local pointer on remote file descriptor
    977     file_cxy = GET_CXY( file_xp );
    978     file_ptr = GET_PTR( file_xp );
     1116printk("\n[%s] thread[%x,%x] reset all fd-array copies for <%x>\n",
     1117__FUNCTION__, process->pid, this->trdid, name );
     1118#endif
     1119
     1120    //////// 4) release memory allocated to file descriptor in remote cluster
    9791121
    9801122    if( file_cxy == local_cxy )             // file cluster is local
     
    9901132cycle = (uint32_t)hal_get_cycles();
    9911133if( DEBUG_VFS_CLOSE < cycle )
    992 printk("\n[%s] thread[%x,%x] exit / fdid %d closed / cycle %d\n",
    993 __FUNCTION__, process->pid, this->trdid, file_id, cycle );
     1134printk("\n[%s] thread[%x,%x] exit / <%s> closed / cycle %d\n",
     1135__FUNCTION__, process->pid, this->trdid, name, cycle );
    9941136#endif
    9951137
     
    11201262    {
    11211263        error = vfs_inode_create( parent_fs_type,
    1122                                   INODE_TYPE_DIR,
    11231264                                  attr,
    11241265                                  rights,
     
    11311272        rpc_vfs_inode_create_client( inode_cxy,
    11321273                                     parent_fs_type,
    1133                                      INODE_TYPE_DIR,
    11341274                                     attr,
    11351275                                     rights,
     
    11521292    // get new inode local pointer
    11531293    inode_ptr = GET_PTR( inode_xp );
     1294
     1295    // update inode "type" field
     1296    hal_remote_s32( XPTR( inode_cxy , &inode_ptr->type ) , INODE_TYPE_DIR );
    11541297   
    11551298#if(DEBUG_VFS_MKDIR & 1)
     
    14551598    xptr_t            dentry_xp;          // extended pointer on dentry to unlink
    14561599    vfs_dentry_t    * dentry_ptr;         // local pointer on dentry to unlink
     1600    vfs_ctx_t       * ctx_ptr;            // local pointer on FS context
     1601    vfs_fs_type_t     fs_type;            // File system type
    14571602
    14581603    char              name[CONFIG_VFS_MAX_NAME_LENGTH];  // name of link to remove
     
    14661611vfs_inode_get_name( root_xp , root_name );
    14671612if( DEBUG_VFS_UNLINK < cycle )
    1468 printk("\n[%s] thread[%x,%x] enter / root <%s> / path <%s> / cycle %d\n",
     1613printk("\n[%s] thread[%x,%x] : enter for root <%s> / path <%s> / cycle %d\n",
    14691614__FUNCTION__, process->pid, this->trdid, root_name, path, cycle );
    14701615#endif
     
    15011646vfs_inode_get_name( parent_xp , parent_name );
    15021647if( DEBUG_VFS_UNLINK < cycle )
    1503 printk("\n[%s] thread[%x,%x] parent inode <%s> is (%x,%x)\n",
     1648printk("\n[%s] thread[%x,%x] : parent inode <%s> is (%x,%x)\n",
    15041649__FUNCTION__, process->pid, this->trdid, parent_name, parent_cxy, parent_ptr );
    15051650#endif
     
    15081653    xptr_t children_xp = XPTR( parent_cxy , &parent_ptr->children );
    15091654
    1510     // get extended pointer on dentry to unlink
     1655    // try to get extended pointer on dentry from Inode Tree
    15111656    dentry_xp = xhtab_lookup( children_xp , name );
    15121657   
    1513     if( dentry_xp == XPTR_NULL )
    1514     {
    1515         remote_rwlock_wr_release( lock_xp );
    1516         printk("\n[ERROR] in %s : cannot get target dentry <%s> in <%s>\n",
    1517         __FUNCTION__, name, path );
    1518         return -1;
    1519     }
    1520    
    1521     // get local pointer on dentry to unlink
    1522     dentry_ptr = GET_PTR( dentry_xp );
     1658    // when dentry not found in Inode Tree, try to get it from inode tree
     1659
     1660    if( dentry_xp == XPTR_NULL )           // miss target dentry in Inode Tree
     1661    {
    15231662
    15241663#if( DEBUG_VFS_UNLINK & 1 )
    15251664if( DEBUG_VFS_UNLINK < cycle )
    1526 printk("\n[%s] thread[%x,%x] dentry <%s> to unlink is (%x,%x)\n",
    1527 __FUNCTION__, process->pid, this->trdid, name, parent_cxy, dentry_ptr );
    1528 #endif
    1529 
    1530     // get pointer on target inode
    1531     inode_xp  = hal_remote_l64( XPTR( parent_cxy , &dentry_ptr->child_xp ) );
    1532     inode_cxy = GET_CXY( inode_xp );
    1533     inode_ptr = GET_PTR( inode_xp );
    1534  
     1665printk("\n[%s] thread[%x,%x] : inode <%s> not found => scan parent mapper\n",
     1666__FUNCTION__, process->pid, this->trdid, name );
     1667#endif
     1668        // get parent inode FS type
     1669        ctx_ptr    = hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->ctx ) );
     1670        fs_type    = hal_remote_l32( XPTR( parent_cxy , &ctx_ptr->type ) );
     1671
     1672        // select a cluster for new inode
     1673        inode_cxy = cluster_random_select();
     1674
     1675        // speculatively insert a new child dentry/inode couple in inode tree
     1676        error = vfs_add_child_in_parent( inode_cxy,
     1677                                         fs_type,
     1678                                         parent_xp,
     1679                                         name,
     1680                                         &dentry_xp,
     1681                                         &inode_xp );
     1682        if( error )
     1683        {
     1684            printk("\n[ERROR] in %s : cannot create inode <%s> in path <%s>\n",
     1685            __FUNCTION__ , name, path );
     1686
     1687            vfs_remove_child_from_parent( dentry_xp );
     1688            return -1;
     1689        }
     1690
     1691        // get local pointers on new dentry and new inode descriptors
     1692        inode_ptr  = GET_PTR( inode_xp );
     1693        dentry_ptr = GET_PTR( dentry_xp );
     1694
     1695        // scan parent mapper to find the missing dentry, and complete
     1696        // initialisation of new dentry and new inode descriptors In Inode Tree
     1697        if( parent_cxy == local_cxy )
     1698        {
     1699            error = vfs_fs_new_dentry( parent_ptr,
     1700                                       name,
     1701                                       inode_xp );
     1702        }
     1703        else
     1704        {
     1705            rpc_vfs_fs_new_dentry_client( parent_cxy,
     1706                                          parent_ptr,
     1707                                          name,
     1708                                          inode_xp,
     1709                                          &error );
     1710        }
     1711
     1712        if ( error )   // dentry not found in parent mapper
     1713        {
     1714            printk("\n[ERROR] in %s : cannot get dentry <%s> in path <%s>\n",
     1715            __FUNCTION__ , name, path );
     1716            return -1;
     1717        }
     1718
     1719#if (DEBUG_VFS_UNLINK & 1)
     1720if( DEBUG_VFS_UNLINK < cycle )
     1721printk("\n[%s] thread[%x,%x] : created missing inode & dentry <%s> in cluster %x\n",
     1722__FUNCTION__, process->pid, this->trdid, name, inode_cxy );
     1723#endif
     1724
     1725    }
     1726    else                                  // found target dentry in Inode Tree
     1727    {
     1728        dentry_ptr = GET_PTR( dentry_xp );
     1729       
     1730        // get pointer on target inode from dentry
     1731        inode_xp  = hal_remote_l64( XPTR( parent_cxy , &dentry_ptr->child_xp ) );
     1732        inode_cxy = GET_CXY( inode_xp );
     1733        inode_ptr = GET_PTR( inode_xp );
     1734    }
     1735
     1736    // At this point the Inode Tree contains the target dentry and child inode
     1737    // we can safely remove this dentry from both the parent mapper, and the Inode Tree.
     1738
    15351739#if( DEBUG_VFS_UNLINK & 1 )
    1536 char inode_name[CONFIG_VFS_MAX_NAME_LENGTH];
    1537 vfs_inode_get_name( inode_xp , inode_name );
    15381740if( DEBUG_VFS_UNLINK < cycle )
    1539 printk("\n[%s] thread[%x,%x] target inode <%s> is (%x,%x) / cycle %d\n",
    1540 __FUNCTION__, process->pid, this->trdid, inode_name, inode_cxy, inode_ptr, cycle );
     1741printk("\n[%s] thread[%x,%x] : dentry (%x,%x) / inode (%x,%x)\n",
     1742__FUNCTION__, process->pid, this->trdid, parent_cxy, dentry_ptr, inode_cxy, inode_ptr );
    15411743#endif
    15421744
     
    15451747    inode_links  = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->links ) );
    15461748
    1547 // check target inode links counter
    1548 assert( (inode_links >= 1), "illegal inode links count %d for <%s>\n", inode_links, path );
    1549 
    15501749    ///////////////////////////////////////////////////////////////////////
    15511750    if( (inode_type == INODE_TYPE_FILE) || (inode_type == INODE_TYPE_DIR) )
    15521751    {
     1752
     1753#if( DEBUG_VFS_UNLINK & 1 )
     1754if( DEBUG_VFS_UNLINK < cycle )
     1755printk("\n[%s] thread[%x,%x] : unlink inode <%s> / type %s / %d links\n",
     1756__FUNCTION__, process->pid, this->trdid, name, vfs_inode_type_str(inode_type), inode_links );
     1757#endif
     1758
    15531759        // 1. Release clusters allocated to target inode
    15541760        //    and synchronize the FAT on IOC device if last link.
     
    15571763            // build extended pointer on target inode "children" number
    15581764            xptr_t inode_children_xp = XPTR( inode_cxy , &inode_ptr->children.items );
     1765
     1766printk("\n@@@ in %s : children_xp = (%x,%x)\n",
     1767__FUNCTION__, inode_cxy, &inode_ptr->children.items );
    15591768
    15601769            // get target inode number of children
     
    17131922
    17141923}  // end vfs_stat()
    1715 
    1716 /////////////////////////////////////////////
    1717 error_t vfs_readdir( xptr_t          file_xp,
    1718                      struct dirent * k_dirent )
    1719 {
    1720     assert( false , "not implemented file_xp: %x, k_dirent ptr %x\n",
    1721       file_xp, k_dirent );
    1722     return 0;
    1723 }
    1724 
    1725 ////////////////////////////////////
    1726 error_t vfs_rmdir( xptr_t   file_xp,
    1727                    char   * path )
    1728 {
    1729     assert( false , "not implemented file_xp: %x, path <%s>\n",
    1730       file_xp, path );
    1731     return 0;
    1732 }
    17331924
    17341925////////////////////////////////////
     
    21952386    cxy_t              child_cxy;    // cluster for child inode
    21962387    vfs_inode_t      * child_ptr;    // local pointer on child inode
    2197     vfs_inode_type_t   child_type;   // child inode type
    21982388    vfs_fs_type_t      fs_type;      // File system type
    21992389    vfs_ctx_t        * ctx_ptr;      // local pointer on FS context
     
    23192509                child_cxy = cluster_random_select();
    23202510
    2321                 // define child inode type
    2322                 if( dir ) child_type = INODE_TYPE_DIR;
    2323                 else      child_type = INODE_TYPE_FILE;
    2324  
    23252511                // insert a new child dentry/inode couple in inode tree
    23262512                error = vfs_add_child_in_parent( child_cxy,
    2327                                                  child_type,
    23282513                                                 fs_type,
    23292514                                                 parent_xp,
     
    23502535                if( parent_cxy == local_cxy )
    23512536                {
    2352                     error = vfs_fs_get_dentry( parent_ptr,
     2537                    error = vfs_fs_new_dentry( parent_ptr,
    23532538                                               name,
    23542539                                               child_xp );
     
    23562541                else
    23572542                {
    2358                     rpc_vfs_fs_get_dentry_client( parent_cxy,
     2543                    rpc_vfs_fs_new_dentry_client( parent_cxy,
    23592544                                                  parent_ptr,
    23602545                                                  name,
     
    29613146////////////////////////////////////////////////////////////////////
    29623147error_t vfs_add_child_in_parent( cxy_t              child_cxy,
    2963                                  vfs_inode_type_t   child_type,
    29643148                                 vfs_fs_type_t      fs_type,
    29653149                                 xptr_t             parent_inode_xp,
     
    30383222    {
    30393223        error = vfs_inode_create( fs_type,
    3040                                   child_type,
    30413224                                  attr,
    30423225                                  mode,
     
    30493232        rpc_vfs_inode_create_client( child_cxy,
    30503233                                     fs_type,
    3051                                      child_type,
    30523234                                     attr,
    30533235                                     mode,
     
    33093491
    33103492////////////////////////////////////////////////
    3311 error_t vfs_fs_get_dentry( vfs_inode_t * parent,
     3493error_t vfs_fs_new_dentry( vfs_inode_t * parent,
    33123494                           char        * name,
    33133495                           xptr_t        child_xp )
     
    33253507    if( fs_type == FS_TYPE_FATFS )
    33263508    {
    3327         error = fatfs_get_dentry( parent , name , child_xp );
     3509        error = fatfs_new_dentry( parent , name , child_xp );
    33283510    }
    33293511    else if( fs_type == FS_TYPE_RAMFS )
     
    33423524    return error;
    33433525
    3344 } // end vfs_fs_get_dentry()
     3526} // end vfs_fs_new_dentry()
     3527
     3528///////////////////////////////////////////////////
     3529error_t vfs_fs_update_dentry( vfs_inode_t  * inode,
     3530                              vfs_dentry_t * dentry,
     3531                              uint32_t       size )
     3532{
     3533    error_t error = 0;
     3534
     3535// check arguments
     3536assert( (inode  != NULL) , "inode  pointer is NULL\n");
     3537assert( (dentry != NULL) , "dentry pointer is NULL\n");
     3538
     3539    // get parent inode FS type
     3540    vfs_fs_type_t fs_type = inode->ctx->type;
     3541
     3542    // call relevant FS function
     3543    if( fs_type == FS_TYPE_FATFS )
     3544    {
     3545        error = fatfs_update_dentry( inode , dentry , size );
     3546    }
     3547    else if( fs_type == FS_TYPE_RAMFS )
     3548    {
     3549        assert( false , "should not be called for RAMFS\n" );
     3550    }
     3551    else if( fs_type == FS_TYPE_DEVFS )
     3552    {
     3553        assert( false , "should not be called for DEVFS\n" );
     3554    }
     3555    else
     3556    {
     3557        assert( false , "undefined file system type\n" );
     3558    }
     3559
     3560    return error;
     3561
     3562} // end vfs_fs_update_dentry()
    33453563
    33463564///////////////////////////////////////////////////
  • trunk/kernel/fs/vfs.h

    r614 r623  
    108108/******************************************************************************************
    109109 * This structure define a VFS inode.
    110  * An inode has several children dentries (if it is a directory), an can have several
     110 * An inode can have several children dentries (if it is a directory), an can have several
    111111 * parents dentries (if it hass several aliases links):
    112112 * - The "parents" field is the root of the xlist of parents dentries, and the "links"
     
    166166        remote_rwlock_t    size_lock;        /*! protect read/write to size                  */
    167167        remote_rwlock_t    main_lock;        /*! protect inode tree traversal and modifs     */
    168 //  list_entry_t       list;             /*! member of set of inodes in same cluster     */
    169 //  list_entry_t       wait_root;        /*! root of threads waiting on this inode       */
    170168        struct mapper_s  * mapper;           /*! associated file cache                       */
    171169        void             * extend;           /*! fs_type_specific inode extension            */
     
    195193
    196194/******************************************************************************************
    197  * This structure defines a directory entry.
     195 Rpt* This structure defines a directory entry.
    198196 * A dentry contains the name of a remote file/dir, an extended pointer on the
    199197 * inode representing this file/dir, a local pointer on the inode representing
     
    321319 *****************************************************************************************/
    322320error_t vfs_inode_create( vfs_fs_type_t     fs_type,
    323                           vfs_inode_type_t  inode_type,
    324321                          uint32_t          attr,
    325322                          uint32_t          rights,
     
    349346
    350347/******************************************************************************************
    351  * This function set the <size> of a file/dir to a remote inode,
    352  * taking the remote_rwlock protecting <size> in WRITE_MODE.
     348 * This function updates the "size" field of a remote inode identified by <inode_xp>.
     349 * It takes the rwlock protecting the file size in WRITE_MODE, and set the "size" field
     350 * when the current size is smaller than the requested <size> argument.
    353351 *****************************************************************************************
    354352 * @ inode_xp  : extended pointer on the remote inode.
    355  * @ size      : value to be written.
    356  *****************************************************************************************/
    357 void vfs_inode_set_size( xptr_t   inode_xp,
    358                          uint32_t size );
     353 * @ size      : requested size value.
     354 *****************************************************************************************/
     355void vfs_inode_update_size( xptr_t   inode_xp,
     356                            uint32_t size );
    359357
    360358/******************************************************************************************
     
    451449 * This function releases memory allocated to a local file descriptor.
    452450 * It must be executed by a thread running in the cluster containing the inode,
    453  * and the file refcount must be zero.
    454  * If the client thread is not running in the owner cluster, it must use the
    455  * rpc_vfs_file_destroy_client() function.
     451 * and the file refcount must be zero. Use the RPC_VFS_FILE_DESTROY if required.
    456452 ******************************************************************************************
    457453 * @ file  : local pointer on file descriptor.
     
    465461void vfs_file_count_up  ( xptr_t   file_xp );
    466462void vfs_file_count_down( xptr_t   file_xp );
     463
     464/******************************************************************************************
     465 * This debug function copies the name of a the file identified by <file_xp>
     466 * argument to a local buffer identified by the <name> argument.
     467 * The local buffer size must be at least CONFIG_VFS_MAX_NAME_LENGTH.
     468 *****************************************************************************************
     469 * @ file_xp  : extended pointer on the remote inode.
     470 * @ name     : local buffer pointer.
     471 *****************************************************************************************/
     472void vfs_file_get_name( xptr_t inode_xp,
     473                        char * name );
    467474
    468475
     
    537544 * Only the distributed Inode Tree is modified: it does NOT modify the parent mapper,
    538545 * and does NOT update the FS on IOC device.
     546 * It set the inode type to the default INODE_TYPE_FILE value
    539547 * It can be executed by any thread running in any cluster (can be different from both
    540548 * the child cluster and the parent cluster).
     
    552560 ******************************************************************************************
    553561 * @ child_inode_cxy  : [in]  target cluster for child inode.
    554  * @ child_inode_type : [in]  child inode type
    555562 * @ fs_type          : [in]  child inode FS type.
    556563 * @ parent_inode_xp  : [in]  extended pointer on parent inode.
     
    561568 *****************************************************************************************/
    562569error_t vfs_add_child_in_parent( cxy_t              child_inode_cxy,
    563                                  vfs_inode_type_t   child_inode_type,
    564570                                 vfs_fs_type_t      fs_type,
    565571                                 xptr_t             parent_inode_xp,
     
    729735/******************************************************************************************
    730736 * This function close the - non-replicated - file descriptor identified by the <file_xp>
    731  * and <file_id> arguments.
    732  * 1) All entries in the fd_array copies are directly reset by the calling thread,
     737 * and <file_id> arguments. The <file_id> is required to reset the fd_array[] slot.
     738 * It can be called by a thread running in any cluster, and executes the following actions:
     739 * 1) It access the block device to updates all dirty pages from the mapper associated
     740 *    to the file, and removes these pages from the dirty list, using an RPC if required.
     741 * 2) It updates the file size in all parent directory mapper(s), and update the modified
     742 *    pages on the block device, using RPCs if required.
     743 * 3) All entries in the fd_array copies are directly reset by the calling thread,
    733744 *    using remote accesses.
    734  * 2) The memory allocated to file descriptor in cluster containing the inode is released.
    735  *    It requires a RPC if cluster containing the file descriptor is remote.
    736  ******************************************************************************************
    737  * @ file_xp     : extended pointer on the file descriptor in owner cluster.
    738  * @ file_id     : file descriptor index in fd_array.
     745 * 4) The memory allocated to file descriptor in cluster containing the inode is released,
     746 *    using an RPC if cluster containing the file descriptor is remote.
     747 ******************************************************************************************
     748 * @ file_xp     : extended pointer on the file descriptor.
     749 * @ file_id     : file descriptor index in fd_array[].
    739750 * @ returns 0 if success / -1 if error.
    740751 *****************************************************************************************/
     
    877888/******************************************************************************************
    878889 * This function makes the I/O operation to move one page identified by the <page_xp>
    879  * argument to/from the IOC device from/to the mapper, as defined by <cmd_type>.
     890 * argument to/from the IOC device from/to the mapper, as defined by the <cmd_type>.
    880891 * Depending on the file system type, it calls the proper, FS specific function.
    881892 * It is used in case of MISS on the mapper, or when a dirty page in the mapper must
     
    918929 * Finally, it synchronously updates the parent directory on IOC device.
    919930 *
     931 * Depending on the file system type, it calls the relevant, FS specific function.
    920932 * It must be executed by a thread running in the cluster containing the parent directory.
    921  * It can be the RPC_VFS_VS_REMOVE_DENTRY. This function does NOT take any lock.
     933 * It can be the RPC_VFS_FS_REMOVE_DENTRY. This function does NOT take any lock.
    922934 ******************************************************************************************
    923935 * @ parent  : local pointer on parent (directory) inode.
     
    933945 * and updates both the child inode descriptor, identified by the <child_xp> argument,
    934946 * and the associated dentry descriptor :
    935  * - It set the "size", and "extend" fields in inode descriptor.
     947 * - It set the "size", "type", and "extend" fields in inode descriptor.
    936948 * - It set the "extend" field in dentry descriptor.
    937949 * It is called by the vfs_lookup() function in case of miss.
     
    939951 * Depending on the file system type, it calls the relevant, FS specific function.
    940952 * It must be called by a thread running in the cluster containing the parent inode.
    941  * This function does NOT take any lock.
     953 * It can be the RPC_VFS_FS_NEW_DENTRY. This function does NOT take any lock.
    942954 ******************************************************************************************
    943955 * @ parent    : local pointer on parent inode (directory).
    944956 * @ name      : child name.
    945957 * @ child_xp  : extended pointer on remote child inode (file or directory)
    946  * @ return 0 if success / return ENOENT if not found.
    947  *****************************************************************************************/
    948 error_t vfs_fs_get_dentry( vfs_inode_t * parent,
     958 * @ return 0 if success / return -1 if dentry not found.
     959 *****************************************************************************************/
     960error_t vfs_fs_new_dentry( vfs_inode_t * parent,
    949961                           char        * name,
    950962                           xptr_t        child_xp );
     963
     964/******************************************************************************************
     965 * This function scan the mapper of an an existing inode directory, identified by
     966 * the <inode> argument, to find a directory entry identified by the <dentry> argument,
     967 * and update the size for this directory entry in mapper, as defined by <size>.
     968 * The searched "name" is defined in the <dentry> argument, that must be in the same
     969 * cluster as the parent inode. It is called by the vfs_close() function.
     970 *
     971 * Depending on the file system type, it calls the relevant, FS specific function.
     972 * It must be called by a thread running in the cluster containing the parent inode.
     973 * It can be the RPC_VFS_FS_UPDATE_DENTRY. This function does NOT take any lock.
     974 ******************************************************************************************
     975 * @ parent    : local pointer on parent inode (directory).
     976 * @ dentry    : local pointer on dentry.
     977 * @ size      : new size value (bytes).
     978 * @ return 0 if success / return ENOENT if not found.
     979 *****************************************************************************************/
     980error_t vfs_fs_update_dentry( vfs_inode_t  * inode,
     981                              vfs_dentry_t * dentry,
     982                              uint32_t       size );
    951983
    952984/******************************************************************************************
  • trunk/kernel/kern/kernel_init.c

    r619 r623  
    33 *
    44 * Authors :  Mohamed Lamine Karaoui (2015)
    5  *            Alain Greiner  (2016,2017,2018)
     5 *            Alain Greiner  (2016,2017,2018,2019)
    66 *
    77 * Copyright (c) Sorbonne Universites
     
    113113cxy_t                local_cxy                               CONFIG_CACHE_LINE_ALIGNED;
    114114
    115 // This variable is used for CP0 cores synchronisation in kernel_init()
     115// This variable is used for core[0] cores synchronisation in kernel_init()
    116116__attribute__((section(".kdata")))
    117117xbarrier_t           global_barrier                          CONFIG_CACHE_LINE_ALIGNED;
     
    126126
    127127// kernel_init is the entry point defined in hal/tsar_mips32/kernel.ld
    128 // It is used by the bootloader.
     128// It is used by the bootloader to tranfer control to kernel.
    129129extern void kernel_init( boot_info_t * info );
    130130
     
    466466// These chdev descriptors are distributed on all clusters, using a modulo on a global
    467467// index, identically computed in all clusters.
    468 // This function is executed in all clusters by the CP0 core, that computes a global index
    469 // for all external chdevs. Each CP0 core creates only the chdevs that must be placed in
     468// This function is executed in all clusters by the core[0] core, that computes a global index
     469// for all external chdevs. Each core[0] core creates only the chdevs that must be placed in
    470470// the local cluster, because the global index matches the local index.
    471471// The relevant entries in all copies of the devices directory are initialised.
     
    626626
    627627///////////////////////////////////////////////////////////////////////////////////////////
    628 // This function is called by CP0 in cluster 0 to allocate memory and initialize the PIC
     628// This function is called by core[0] in cluster 0 to allocate memory and initialize the PIC
    629629// device, namely the informations attached to the external IOPIC controller, that
    630630// must be replicated in all clusters (struct iopic_input).
     
    791791
    792792///////////////////////////////////////////////////////////////////////////////////////////
    793 // This function is called by all CP0s in all cluster to complete the PIC device
     793// This function is called by all core[0]s in all cluster to complete the PIC device
    794794// initialisation, namely the informations attached to the LAPIC controller.
    795795// This initialisation must be done after the IOPIC initialisation, but before other
     
    899899///////////////////////////////////////////////////////////////////////////////////////////
    900900// This function is the entry point for the kernel initialisation.
    901 // It is executed by all cores in all clusters, but only core[0], called CP0,
    902 // initializes the shared resources such as the cluster manager, or the local peripherals.
     901// It is executed by all cores in all clusters, but only core[0] initializes
     902// the shared resources such as the cluster manager, or the local peripherals.
    903903// To comply with the multi-kernels paradigm, it accesses only local cluster memory, using
    904904// only information contained in the local boot_info_t structure, set by the bootloader.
    905 // Only CP0 in cluster 0 print the log messages.
     905// Only core[0] in cluster 0 print the log messages.
    906906///////////////////////////////////////////////////////////////////////////////////////////
    907907// @ info    : pointer on the local boot-info structure.
     
    925925
    926926    /////////////////////////////////////////////////////////////////////////////////
    927     // STEP 0 : Each core get its core identifier from boot_info, and makes
     927    // STEP 1 : Each core get its core identifier from boot_info, and makes
    928928    //          a partial initialisation of its private idle thread descriptor.
    929     //          CP0 initializes the "local_cxy" global variable.
    930     //          CP0 in cluster IO initializes the TXT0 chdev to print log messages.
     929    //          core[0] initializes the "local_cxy" global variable.
     930    //          core[0] in cluster[0] initializes the TXT0 chdev for log messages.
    931931    /////////////////////////////////////////////////////////////////////////////////
    932932
     
    936936                                  &core_gid );
    937937
    938     // all CP0s initialize cluster identifier
     938    // core[0] initialize cluster identifier
    939939    if( core_lid == 0 ) local_cxy = info->cxy;
    940940
     
    956956#endif
    957957
    958     // all CP0s initialize cluster info
     958    // core[0] initializes cluster info
    959959    if( core_lid == 0 ) cluster_info_init( info );
    960960
    961     // CP0 in cluster 0 initialises TXT0 chdev descriptor
     961    // core[0] in cluster[0] initialises TXT0 chdev descriptor
    962962    if( (core_lid == 0) && (core_cxy == 0) ) txt0_device_init( info );
     963
     964    // all cores check identifiers
     965    if( error )
     966    {
     967        printk("\n[PANIC] in %s : illegal core : gid %x / cxy %x / lid %d",
     968        __FUNCTION__, core_lid, core_cxy, core_lid );
     969        hal_core_sleep();
     970    }
    963971
    964972    /////////////////////////////////////////////////////////////////////////////////
     
    970978#if DEBUG_KERNEL_INIT
    971979if( (core_lid ==  0) & (local_cxy == 0) )
    972 printk("\n[%s] : exit barrier 0 : TXT0 initialized / cycle %d\n",
     980printk("\n[%s] : exit barrier 1 : TXT0 initialized / cycle %d\n",
    973981__FUNCTION__, (uint32_t)hal_get_cycles() );
    974982#endif
    975983
    976     /////////////////////////////////////////////////////////////////////////////
    977     // STEP 1 : all cores check core identifier.
    978     //          CP0 initializes the local cluster manager.
    979     //          This includes the memory allocators.
    980     /////////////////////////////////////////////////////////////////////////////
    981 
    982     // all cores check identifiers
    983     if( error )
    984     {
    985         printk("\n[PANIC] in %s : illegal core : gid %x / cxy %x / lid %d",
    986         __FUNCTION__, core_lid, core_cxy, core_lid );
    987         hal_core_sleep();
    988     }
    989 
    990     // all CP0s initialise DQDT (only CPO in cluster 0 build the quad-tree)
     984    /////////////////////////////////////////////////////////////////////////////////
     985    // STEP 2 : core[0] initializes the cluter manager,
     986    //          including the physical memory allocator.
     987    /////////////////////////////////////////////////////////////////////////////////
     988
     989    // core[0] initialises DQDT (only core[0] in cluster 0 build the quad-tree)
    991990    if( core_lid == 0 ) dqdt_init();
    992991   
    993     // all CP0s initialize other cluster manager complex structures
     992    // core[0] initialize other cluster manager complex structures
    994993    if( core_lid == 0 )
    995994    {
     
    10121011#if DEBUG_KERNEL_INIT
    10131012if( (core_lid ==  0) & (local_cxy == 0) )
    1014 printk("\n[%s] : exit barrier 1 : clusters initialised / cycle %d\n",
     1013printk("\n[%s] : exit barrier 2 : cluster manager initialized / cycle %d\n",
    10151014__FUNCTION__, (uint32_t)hal_get_cycles() );
    10161015#endif
    10171016
    10181017    /////////////////////////////////////////////////////////////////////////////////
    1019     // STEP 2 : CP0 initializes the process_zero descriptor.
    1020     //          CP0 in cluster 0 initializes the IOPIC device.
     1018    // STEP 3 : core[0] initializes the process_zero descriptor,
     1019    //          including the kernel VMM (both GPT and VSL)
    10211020    /////////////////////////////////////////////////////////////////////////////////
    10221021
     
    10251024    core    = &cluster->core_tbl[core_lid];
    10261025
    1027     // all CP0s initialize the process_zero descriptor
    1028     if( core_lid == 0 ) process_zero_create( &process_zero );
    1029 
    1030     // CP0 in cluster 0 initializes the PIC chdev,
     1026    // core[0] initializes the process_zero descriptor,
     1027    if( core_lid == 0 ) process_zero_create( &process_zero , info );
     1028
     1029    /////////////////////////////////////////////////////////////////////////////////
     1030    if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
     1031                                        (info->x_size * info->y_size) );
     1032    barrier_wait( &local_barrier , info->cores_nr );
     1033    /////////////////////////////////////////////////////////////////////////////////
     1034
     1035#if DEBUG_KERNEL_INIT
     1036if( (core_lid ==  0) & (local_cxy == 0) )
     1037printk("\n[%s] : exit barrier 3 : kernel processs initialized / cycle %d\n",
     1038__FUNCTION__, (uint32_t)hal_get_cycles() );
     1039#endif
     1040
     1041    /////////////////////////////////////////////////////////////////////////////////
     1042    // STEP 4 : all cores initialize their private MMU
     1043    //          core[0] in cluster 0 initializes the IOPIC device.
     1044    /////////////////////////////////////////////////////////////////////////////////
     1045
     1046    // all cores initialise their MMU
     1047    hal_mmu_init( &process_zero.vmm.gpt );
     1048
     1049    // core[0] in cluster[0] initializes the PIC chdev,
    10311050    if( (core_lid == 0) && (local_cxy == 0) ) iopic_init( info );
    10321051   
     
    10391058#if DEBUG_KERNEL_INIT
    10401059if( (core_lid ==  0) & (local_cxy == 0) )
    1041 printk("\n[%s] : exit barrier 2 : PIC initialised / cycle %d\n",
     1060printk("\n[%s] : exit barrier 4 : MMU and IOPIC initialized / cycle %d\n",
    10421061__FUNCTION__, (uint32_t)hal_get_cycles() );
    10431062#endif
    10441063
    10451064    ////////////////////////////////////////////////////////////////////////////////
    1046     // STEP 3 : CP0 initializes the distibuted LAPIC descriptor.
    1047     //          CP0 initializes the internal chdev descriptors
    1048     //          CP0 initialize the local external chdev descriptors
     1065    // STEP 5 : core[0] initializes the distibuted LAPIC descriptor.
     1066    //          core[0] initializes the internal chdev descriptors
     1067    //          core[0] initialize the local external chdev descriptors
    10491068    ////////////////////////////////////////////////////////////////////////////////
    10501069
    1051     // all CP0s initialize their local LAPIC extension,
     1070    // all core[0]s initialize their local LAPIC extension,
    10521071    if( core_lid == 0 ) lapic_init( info );
    10531072
    1054     // CP0 scan the internal (private) peripherals,
     1073    // core[0] scan the internal (private) peripherals,
    10551074    // and allocates memory for the corresponding chdev descriptors.
    10561075    if( core_lid == 0 ) internal_devices_init( info );
    10571076       
    10581077
    1059     // All CP0s contribute to initialise external peripheral chdev descriptors.
    1060     // Each CP0[cxy] scan the set of external (shared) peripherals (but the TXT0),
     1078    // All core[0]s contribute to initialise external peripheral chdev descriptors.
     1079    // Each core[0][cxy] scan the set of external (shared) peripherals (but the TXT0),
    10611080    // and allocates memory for the chdev descriptors that must be placed
    10621081    // on the (cxy) cluster according to the global index value.
     
    10721091#if DEBUG_KERNEL_INIT
    10731092if( (core_lid ==  0) & (local_cxy == 0) )
    1074 printk("\n[%s] : exit barrier 3 : all chdevs initialised / cycle %d\n",
     1093printk("\n[%s] : exit barrier 5 : all chdevs initialised / cycle %d\n",
    10751094__FUNCTION__, (uint32_t)hal_get_cycles() );
    10761095#endif
     
    10821101   
    10831102    /////////////////////////////////////////////////////////////////////////////////
    1084     // STEP 4 : All cores enable IPI (Inter Procesor Interrupt),
     1103    // STEP 6 : All cores enable IPI (Inter Procesor Interrupt),
    10851104    //          Alh cores initialize IDLE thread.
    1086     //          Only CP0 in cluster 0 creates the VFS root inode.
     1105    //          Only core[0] in cluster[0] creates the VFS root inode.
    10871106    //          It access the boot device to initialize the file system context.
    10881107    /////////////////////////////////////////////////////////////////////////////////
     
    11071126#endif
    11081127
    1109     // CPO in cluster 0 creates the VFS root
     1128    // core[O] in cluster[0] creates the VFS root
    11101129    if( (core_lid ==  0) && (local_cxy == 0 ) )
    11111130    {
     
    11371156            // 4. create VFS root inode in cluster 0
    11381157            error = vfs_inode_create( FS_TYPE_FATFS,                       // fs_type
    1139                                       INODE_TYPE_DIR,                      // inode_type
    11401158                                      0,                                   // attr
    11411159                                      0,                                   // rights
     
    11501168            }
    11511169
    1152             // 5. update FATFS root inode extension 
     1170            // 5. update FATFS root inode "type" and "extend" fields 
    11531171            cxy_t         vfs_root_cxy = GET_CXY( vfs_root_inode_xp );
    11541172            vfs_inode_t * vfs_root_ptr = GET_PTR( vfs_root_inode_xp );
     1173            hal_remote_s32( XPTR( vfs_root_cxy , &vfs_root_ptr->extend ), INODE_TYPE_DIR );
    11551174            hal_remote_spt( XPTR( vfs_root_cxy , &vfs_root_ptr->extend ),
    11561175                            (void*)(intptr_t)root_dir_cluster );
     
    11891208#if DEBUG_KERNEL_INIT
    11901209if( (core_lid ==  0) & (local_cxy == 0) )
    1191 printk("\n[%s] : exit barrier 4 : VFS root (%x,%x) in cluster 0 / cycle %d\n",
     1210printk("\n[%s] : exit barrier 6 : VFS root (%x,%x) in cluster 0 / cycle %d\n",
    11921211__FUNCTION__, GET_CXY(process_zero.vfs_root_xp),
    11931212GET_PTR(process_zero.vfs_root_xp), (uint32_t)hal_get_cycles() );
     
    11951214
    11961215    /////////////////////////////////////////////////////////////////////////////////
    1197     // STEP 5 : Other CP0s allocate memory for the selected FS context,
    1198     //          and initialise both the local FS context and the local VFS context
    1199     //          from values stored in cluster 0.
     1216    // STEP 7 : In all other clusters than cluster[0], the core[0] allocates memory
     1217    //          for the selected FS context, and initialise the local FS context and
     1218    //          the local VFS context from values stored in cluster 0.
    12001219    //          They get the VFS root inode extended pointer from cluster 0.
    12011220    /////////////////////////////////////////////////////////////////////////////////
     
    12591278#if DEBUG_KERNEL_INIT
    12601279if( (core_lid ==  0) & (local_cxy == 1) )
    1261 printk("\n[%s] : exit barrier 5 : VFS root (%x,%x) in cluster 1 / cycle %d\n",
     1280printk("\n[%s] : exit barrier 7 : VFS root (%x,%x) in cluster 1 / cycle %d\n",
    12621281__FUNCTION__, GET_CXY(process_zero.vfs_root_xp),
    12631282GET_PTR(process_zero.vfs_root_xp), (uint32_t)hal_get_cycles() );
     
    12651284
    12661285    /////////////////////////////////////////////////////////////////////////////////
    1267     // STEP 6 : CP0 in cluster 0 makes the global DEVFS tree initialisation:
     1286    // STEP 8 : core[0] in cluster 0 makes the global DEVFS initialisation:
    12681287    //          It initializes the DEVFS context, and creates the DEVFS
    12691288    //          "dev" and "external" inodes in cluster 0.
     
    13091328#if DEBUG_KERNEL_INIT
    13101329if( (core_lid ==  0) & (local_cxy == 0) )
    1311 printk("\n[%s] : exit barrier 6 : DEVFS root initialized in cluster 0 / cycle %d\n",
     1330printk("\n[%s] : exit barrier 8 : DEVFS root initialized in cluster 0 / cycle %d\n",
    13121331__FUNCTION__, (uint32_t)hal_get_cycles() );
    13131332#endif
    13141333
    13151334    /////////////////////////////////////////////////////////////////////////////////
    1316     // STEP 7 : All CP0s complete in parallel the DEVFS tree initialization.
    1317     //          Each CP0 get the "dev" and "external" extended pointers from
     1335    // STEP 9 : All core[0]s complete in parallel the DEVFS initialization.
     1336    //          Each core[0] get the "dev" and "external" extended pointers from
    13181337    //          values stored in cluster 0.
    1319     //          Then each CP0 in cluster(i) creates the DEVFS "internal" directory,
     1338    //          Then each core[0] in cluster(i) creates the DEVFS "internal" directory,
    13201339    //          and creates the pseudo-files for all chdevs in cluster (i).
    13211340    /////////////////////////////////////////////////////////////////////////////////
     
    13461365#if DEBUG_KERNEL_INIT
    13471366if( (core_lid ==  0) & (local_cxy == 0) )
    1348 printk("\n[%s] : exit barrier 7 : DEV initialized in cluster 0 / cycle %d\n",
     1367printk("\n[%s] : exit barrier 9 : DEVFS initialized in cluster 0 / cycle %d\n",
    13491368__FUNCTION__, (uint32_t)hal_get_cycles() );
    13501369#endif
    13511370
    1352     /////////////////////////////////////////////////////////////////////////////////
    1353     // STEP 8 : CP0 in cluster 0 creates the first user process (process_init)
     1371#if( DEBUG_KERNEL_INIT & 1 )
     1372if( (core_lid ==  0) & (local_cxy == 0) )
     1373vfs_display( vfs_root_inode_xp );
     1374#endif
     1375
     1376    /////////////////////////////////////////////////////////////////////////////////
     1377    // STEP 10 : core[0] in cluster 0 creates the first user process (process_init).
     1378    //           This include the first user process VMM (GPT and VSL) creation.
     1379    //           Finally, it prints the ALMOS-MKH banner.
    13541380    /////////////////////////////////////////////////////////////////////////////////
    13551381
    13561382    if( (core_lid == 0) && (local_cxy == 0) )
    13571383    {
    1358 
    1359 #if( DEBUG_KERNEL_INIT & 1 )
    1360 vfs_display( vfs_root_inode_xp );
    1361 #endif
    1362 
    13631384       process_init_create();
    13641385    }
    1365 
    1366     /////////////////////////////////////////////////////////////////////////////////
    1367     if( core_lid == 0 ) xbarrier_wait( XPTR( 0 , &global_barrier ),
    1368                                         (info->x_size * info->y_size) );
    1369     barrier_wait( &local_barrier , info->cores_nr );
    1370     /////////////////////////////////////////////////////////////////////////////////
    1371 
    1372 #if DEBUG_KERNEL_INIT
    1373 if( (core_lid ==  0) & (local_cxy == 0) )
    1374 printk("\n[%s] : exit barrier 8 : process init created / cycle %d\n",
    1375 __FUNCTION__, (uint32_t)hal_get_cycles() );
    1376 #endif
    13771386
    13781387#if (DEBUG_KERNEL_INIT & 1)
     
    13811390#endif
    13821391
    1383     /////////////////////////////////////////////////////////////////////////////////
    1384     // STEP 9 : CP0 in cluster 0 print banner
    1385     /////////////////////////////////////////////////////////////////////////////////
    1386    
    13871392    if( (core_lid == 0) && (local_cxy == 0) )
    13881393    {
    13891394        print_banner( (info->x_size * info->y_size) , info->cores_nr );
     1395    }
    13901396
    13911397#if( DEBUG_KERNEL_INIT & 1 )
     1398if( (core_lid ==  0) & (local_cxy == 0) )
    13921399printk("\n\n***** memory fooprint for main kernel objects\n\n"
    13931400                   " - thread descriptor  : %d bytes\n"
     
    14371444#endif
    14381445
    1439     }
     1446    // each core updates the register(s) definig the kernel
     1447    // entry points for interrupts, exceptions and syscalls...
     1448    hal_set_kentry();
    14401449
    14411450    // each core activates its private TICK IRQ
     
    14481457    /////////////////////////////////////////////////////////////////////////////////
    14491458
    1450 #if DEBUG_KERNEL_INIT
     1459#if( DEBUG_KERNEL_INIT & 1 )
    14511460thread_t * this = CURRENT_THREAD;
    14521461printk("\n[%s] : thread[%x,%x] on core[%x,%d] jumps to thread_idle_func() / cycle %d\n",
  • trunk/kernel/kern/printk.c

    r583 r623  
    4848
    4949    va_list    args;      // printf arguments
    50     uint32_t   ps;        // write pointer to the string buffer
     50    uint32_t   ps;        // pointer to the string buffer
    5151
    5252    ps = 0;   
     
    5757    while ( *format != 0 )
    5858    {
    59 
    6059        if (*format == '%')   // copy argument to string
    6160        {
     
    9897                break;
    9998            }
    100             case ('d'):             // decimal signed integer
     99            case ('b'):             // excactly 2 digits hexadecimal integer
     100            {
     101                int  val = va_arg( args, int );
     102                int  val_lsb = val & 0xF;
     103                int  val_msb = (val >> 4) & 0xF;
     104                buf[0] = HexaTab[val_msb];
     105                buf[1] = HexaTab[val_lsb];
     106                len  = 2;
     107                pbuf = buf;
     108                break;
     109            }
     110            case ('d'):             // up to 10 digits decimal signed integer
    101111            {
    102112                int val = va_arg( args, int );
     
    108118                for(i = 0; i < 10; i++)
    109119                {
    110 
    111120                    buf[9 - i] = HexaTab[val % 10];
    112121                    if (!(val /= 10)) break;
     
    116125                break;
    117126            }
    118             case ('u'):             // decimal unsigned integer
     127            case ('u'):             // up to 10 digits decimal unsigned integer
    119128            {
    120129                uint32_t val = va_arg( args, uint32_t );
     
    128137                break;
    129138            }
    130             case ('x'):             // 32 bits hexadecimal
    131             case ('l'):             // 64 bits hexadecimal
     139            case ('x'):             // up to 8 digits hexadecimal
     140            case ('l'):             // up to 16 digits hexadecimal
    132141            {
    133142                uint32_t imax;
     
    157166                break;
    158167            }
    159             case ('X'):             // 32 bits hexadecimal on 8 characters
     168            case ('X'):             // exactly 8 digits hexadecimal
    160169            {
    161170                uint32_t val = va_arg( args , uint32_t );
     
    238247            case ('c'):             /* char conversion */
    239248            {
    240                 int val = va_arg( *args , int );
     249                int  val = va_arg( *args , int );
    241250                len = 1;
    242                 buf[0] = val;
     251                buf[0] = (char)val;
    243252                pbuf = &buf[0];
    244253                break;
    245254            }
    246             case ('d'):             /* 32 bits decimal signed  */
     255            case ('b'):             // excactly 2 digits hexadecimal
     256            {
     257                int  val = va_arg( *args, int );
     258                int  val_lsb = val & 0xF;
     259                int  val_msb = (val >> 4) & 0xF;
     260                buf[0] = HexaTab[val_msb];
     261                buf[1] = HexaTab[val_lsb];
     262                len  = 2;
     263                pbuf = buf;
     264                break;
     265            }
     266            case ('d'):             /* up to 10 digits signed decimal */
    247267            {
    248268                int val = va_arg( *args , int );
     
    261281                break;
    262282            }
    263             case ('u'):             /* 32 bits decimal unsigned */
     283            case ('u'):             /* up to 10 digits unsigned decimal */
    264284            {
    265285                uint32_t val = va_arg( *args , uint32_t );
     
    273293                break;
    274294            }
    275             case ('x'):             /* 32 bits hexadecimal unsigned */
     295            case ('x'):             /* up to 8 digits hexadecimal */
    276296            {
    277297                uint32_t val = va_arg( *args , uint32_t );
     
    286306                break;
    287307            }
    288             case ('X'):             /* 32 bits hexadecimal unsigned  on 10 char */
     308            case ('X'):             /* exactly 8 digits hexadecimal */
    289309            {
    290310                uint32_t val = va_arg( *args , uint32_t );
     
    299319                break;
    300320            }
    301             case ('l'):            /* 64 bits hexadecimal unsigned */
    302             {
    303                 unsigned long long val = va_arg( *args , unsigned long long );
     321            case ('l'):            /* up to 16 digits hexadecimal */
     322            {
     323                uint64_t val = va_arg( *args , uint64_t );
    304324                dev_txt_sync_write( "0x" , 2 );
    305325                for(i = 0; i < 16; i++)
     
    312332                break;
    313333            }
    314             case ('L'):           /* 64 bits hexadecimal unsigned on 18 char */
    315             {
    316                 unsigned long long val = va_arg( *args , unsigned long long );
     334            case ('L'):           /* exactly 16 digits hexadecimal */
     335            {
     336                uint64_t val = va_arg( *args , uint64_t );
    317337                dev_txt_sync_write( "0x" , 2 );
    318338                for(i = 0; i < 16; i++)
     
    525545}
    526546
     547/////////////////////////////
     548void putb( char     * string,
     549           uint8_t  * buffer,
     550           uint32_t   size )
     551{
     552    uint32_t line;
     553    uint32_t byte = 0;
     554
     555    // get pointers on TXT0 chdev
     556    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
     557    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
     558    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     559
     560    // get extended pointer on remote TXT0 chdev lock
     561    xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     562
     563    // get TXT0 lock
     564    remote_busylock_acquire( lock_xp );
     565
     566    // display string on TTY0
     567    nolock_printk("\n***** %s *****\n", string );
     568
     569    for ( line = 0 ; line < (size>>4) ; line++ )
     570    {
     571         nolock_printk(" %X | %b %b %b %b | %b %b %b %b | %b %b %b %b | %b %b %b %b \n",
     572         byte,
     573         buffer[byte+ 0],buffer[byte+ 1],buffer[byte+ 2],buffer[byte+ 3],
     574         buffer[byte+ 4],buffer[byte+ 5],buffer[byte+ 6],buffer[byte+ 7],
     575         buffer[byte+ 8],buffer[byte+ 9],buffer[byte+10],buffer[byte+11],
     576         buffer[byte+12],buffer[byte+13],buffer[byte+14],buffer[byte+15] );
     577
     578         byte += 16;
     579    }
     580
     581    // release TXT0 lock
     582    remote_busylock_release( lock_xp );
     583}
     584
     585
    527586
    528587// Local Variables:
  • trunk/kernel/kern/printk.h

    r583 r623  
    2424///////////////////////////////////////////////////////////////////////////////////
    2525// The printk.c and printk.h files define the functions used by the kernel
    26 // to display messages on a text terminal.
    27 // Two access modes are supported:
    28 // - The printk() function displays kernel messages on the kernel terminal TXT0,
    29 //   using a busy waiting policy: It calls directly the relevant TXT driver,
    30 //   after taking the TXT0 busylock for exclusive access to the TXT0 terminal.
    31 // - The user_printk() function displays messages on the calling thread private
    32 //   terminal, using a descheduling policy: it register the request in the selected
    33 //   TXT chdev waiting queue and deschedule. The calling thread is reactivated by
    34 //   the IRQ signalling completion.
    35 // Both functions use the generic TXT device to call the proper implementation
    36 // dependant TXT driver.
    37 // Finally these files define a set of conditional trace <***_dmsg> for debug.
     26// to display messages on the kernel terminal TXT0, using a busy waiting policy.
     27// It calls synchronously the TXT0 driver, without descheduling.
    3828///////////////////////////////////////////////////////////////////////////////////
    3929
     
    4434#include <stdarg.h>
    4535
    46 #include <hal_special.h> // hal_get_cycles()
     36#include <hal_special.h>
    4737
    4838/**********************************************************************************
    4939 * This function build a formatted string.
    5040 * The supported formats are defined below :
    51  *   %c : single character
    52  *   %d : signed decimal 32 bits integer
    53  *   %u : unsigned decimal 32 bits integer
    54  *   %x : hexadecimal 32 bits integer
    55  *   %l : hexadecimal 64 bits integer
     41 *   %b : exactly 2 digits hexadecimal integer (8 bits)
     42 *   %c : single ascii character (8 bits)
     43 *   %d : up to 10 digits decimal integer (32 bits)
     44 *   %u : up to 10 digits unsigned decimal (32 bits)
     45 *   %x : up to 8 digits hexadecimal integer (32 bits)
     46 *   %X : exactly 8 digits hexadecimal integer (32 bits)
     47 *   %l : up to 16 digits hexadecimal integer (64 bits)
     48 *   %L : exactly 16 digits hexadecimal integer (64 bits)
    5649 *   %s : NUL terminated character string
    5750 **********************************************************************************
     
    153146void putl( uint64_t val );
    154147
     148/**********************************************************************************
     149 * This debug function displays on the kernel TXT0 terminal the content of an
     150 * array of bytes defined by <buffer> and <size> arguments (16 bytes per line).
     151 * The <string> argument is displayed before the buffer content.
     152 * The line format is an address folowed by 16 (hexa) bytes.
     153 **********************************************************************************
     154 * @ string   : buffer name or identifier.
     155 * @ buffer   : local pointer on bytes array.
     156 * @ size     : number of bytes bytes to display.
     157 *********************************************************************************/
     158void putb( char     * string,
     159           uint8_t  * buffer,
     160           uint32_t   size );
     161
     162
    155163
    156164#endif  // _PRINTK_H
  • trunk/kernel/kern/process.c

    r619 r623  
    2929#include <hal_uspace.h>
    3030#include <hal_irqmask.h>
     31#include <hal_vmm.h>
    3132#include <errno.h>
    3233#include <printk.h>
     
    486487    }
    487488
    488     // FIXME decrement the refcount on file pointer by vfs_bin_xp [AG]
     489    // FIXME decrement the refcount on file pointer for vfs_bin_xp [AG]
     490
    489491    // FIXME close all open files [AG]
     492
    490493    // FIXME synchronize dirty files [AG]
    491494
     
    14871490        printk("\n[ERROR] in %s : cannot initialise VMM for %s\n", __FUNCTION__ , path );
    14881491        vfs_close( file_xp , file_id );
    1489         // FIXME restore old process VMM
     1492        // FIXME restore old process VMM [AG]
    14901493        return -1;
    14911494    }
     
    15051508                printk("\n[ERROR] in %s : failed to access <%s>\n", __FUNCTION__ , path );
    15061509        vfs_close( file_xp , file_id );
    1507         // FIXME restore old process VMM
     1510        // FIXME restore old process VMM [AG]
    15081511        return -1;
    15091512        }
     
    15351538
    15361539
    1537 ///////////////////////////////////////////////
    1538 void process_zero_create( process_t * process )
     1540////////////////////////////////////////////////
     1541void process_zero_create( process_t   * process,
     1542                          boot_info_t * info )
    15391543{
    15401544    error_t error;
     
    15661570    process->parent_xp  = XPTR( local_cxy , process );
    15671571    process->term_state = 0;
     1572
     1573    // initialise kernel GPT and VSL, depending on architecture
     1574    hal_vmm_kernel_init( info );
    15681575
    15691576    // reset th_tbl[] array and associated fields
  • trunk/kernel/kern/process.h

    r618 r623  
    7373 * is always stored in the same cluster as the inode associated to the file.
    7474 * A free entry in this array contains the XPTR_NULL value.
    75  * The array size is defined by a the CONFIG_PROCESS_FILE_MAX_NR parameter.
     75 * The array size is defined by the CONFIG_PROCESS_FILE_MAX_NR parameter.
    7676 *
    7777 * NOTE: - Only the fd_array[] in the reference process contains a complete list of open
     
    7979 *       - the fd_array[] in a process copy is simply a cache containing a subset of the
    8080 *         open files to speed the fdid to xptr translation, but the "lock" and "current
    81  *         fields should not be used.
     81 *         fields are not used.
    8282 *       - all modifications made by the process_fd_remove() are done in reference cluster
    8383 *         and reported in all process_copies.
     
    200200
    201201/*********************************************************************************************
    202  * This function initialize, in each cluster, the kernel "process_zero", that is the owner
    203  * of all kernel threads in a given cluster. It is called by the kernel_init() function.
     202 * This function initialize, in each cluster, the kernel "process_zero", that contains
     203 * all kernel threads in a given cluster. It is called by the kernel_init() function.
    204204 * The process_zero descriptor is allocated as a global variable in file kernel_init.c
    205205 * Both the PID and PPID fields are set to zero, the ref_xp is the local process_zero,
    206206 * and the parent process is set to XPTR_NULL. The th_tbl[] is initialized as empty.
    207  *********************************************************************************************
    208  * @ process      : [in] pointer on local process descriptor to initialize.
    209  ********************************************************************************************/
    210 void process_zero_create( process_t * process );
     207 * The process GPT is initialised as required by the target architecture.
     208 * The "kcode" and "kdata" segments are registered in the process VSL.
     209 *********************************************************************************************
     210 * @ process  : [in] pointer on process descriptor to initialize.
     211 * @ info     : pointer on local boot_info_t (for kernel segments base and size).
     212 ********************************************************************************************/
     213void process_zero_create( process_t   * process,
     214                          boot_info_t * info );
    211215
    212216/*********************************************************************************************
     
    428432 * identified by the <process_xp> argument, register the <file_xp> argument in the
    429433 * allocated slot, and return the slot index in the <fdid> buffer.
    430  * It can be called by any thread in any cluster, because it uses portable remote access
     434 * It can be called by any thread in any cluster, because it uses remote access
    431435 * primitives to access the reference process descriptor.
    432436 * It takes the lock protecting the reference fd_array against concurrent accesses.
  • trunk/kernel/kern/rpc.c

    r619 r623  
    22 * rpc.c - RPC operations implementation.
    33 *
    4  * Author    Alain Greiner (2016,2017,2018)
     4 * Author    Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c)  UPMC Sorbonne Universites
     
    5858    &rpc_thread_user_create_server,        // 6
    5959    &rpc_thread_kernel_create_server,      // 7
    60     &rpc_undefined,                        // 8    unused slot       
     60    &rpc_vfs_fs_update_dentry_server,      // 8
    6161    &rpc_process_sigaction_server,         // 9
    6262
     
    6767    &rpc_vfs_file_create_server,           // 14
    6868    &rpc_vfs_file_destroy_server,          // 15
    69     &rpc_vfs_fs_get_dentry_server,         // 16
     69    &rpc_vfs_fs_new_dentry_server,         // 16
    7070    &rpc_vfs_fs_add_dentry_server,         // 17
    7171    &rpc_vfs_fs_remove_dentry_server,      // 18
     
    7676    &rpc_kcm_alloc_server,                 // 22
    7777    &rpc_kcm_free_server,                  // 23
    78     &rpc_undefined,                        // 24   unused slot
     78    &rpc_mapper_sync_server,               // 24
    7979    &rpc_mapper_handle_miss_server,        // 25
    8080    &rpc_vmm_delete_vseg_server,           // 26
     
    9494    "THREAD_USER_CREATE",        // 6
    9595    "THREAD_KERNEL_CREATE",      // 7
    96     "undefined",                 // 8
     96    "VFS_FS_UPDATE_DENTRY",      // 8
    9797    "PROCESS_SIGACTION",         // 9
    9898
     
    112112    "KCM_ALLOC",                 // 22
    113113    "KCM_FREE",                  // 23
    114     "undefined",                 // 24
     114    "MAPPER_SYNC",               // 24
    115115    "MAPPER_HANDLE_MISS",        // 25
    116116    "VMM_DELETE_VSEG",           // 26
     
    921921
    922922/////////////////////////////////////////////////////////////////////////////////////////
    923 // [7]      Marshaling functions attached to RPC_THREAD_KERNEL_CREATE (blocking)
     923// [7]      Marshaling functions attached to RPC_THREAD_KERNEL_CREATE
    924924/////////////////////////////////////////////////////////////////////////////////////////
    925925
     
    10131013
    10141014/////////////////////////////////////////////////////////////////////////////////////////
    1015 // [8]   undefined slot
    1016 /////////////////////////////////////////////////////////////////////////////////////////
    1017 
     1015// [8]   Marshaling functions attached to RPC_VRS_FS_UPDATE_DENTRY
     1016/////////////////////////////////////////////////////////////////////////////////////////
     1017
     1018/////////////////////////////////////////////////////////
     1019void rpc_vfs_fs_update_dentry_client( cxy_t          cxy,
     1020                                      vfs_inode_t  * inode,
     1021                                      vfs_dentry_t * dentry,
     1022                                      uint32_t       size,
     1023                                      error_t      * error )
     1024{
     1025#if DEBUG_RPC_VFS_FS_UPDATE_DENTRY
     1026thread_t * this = CURRENT_THREAD;
     1027uint32_t cycle = (uint32_t)hal_get_cycles();
     1028if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY )
     1029printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
     1030__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     1031#endif
     1032
     1033    uint32_t responses = 1;
     1034
     1035    // initialise RPC descriptor header
     1036    rpc_desc_t  rpc;
     1037    rpc.index    = RPC_VFS_FS_UPDATE_DENTRY;
     1038    rpc.blocking = true;
     1039    rpc.rsp      = &responses;
     1040
     1041    // set input arguments in RPC descriptor
     1042    rpc.args[0] = (uint64_t)(intptr_t)inode;
     1043    rpc.args[1] = (uint64_t)(intptr_t)dentry;
     1044    rpc.args[2] = (uint64_t)size;
     1045
     1046    // register RPC request in remote RPC fifo
     1047    rpc_send( cxy , &rpc );
     1048
     1049    // get output values from RPC descriptor
     1050    *error   = (error_t)rpc.args[3];
     1051
     1052#if DEBUG_RPC_VFS_FS_UPDATE_DENTRY
     1053cycle = (uint32_t)hal_get_cycles();
     1054if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY )
     1055printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
     1056__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     1057#endif
     1058}
     1059
     1060/////////////////////////////////////////////////
     1061void rpc_vfs_fs_update_dentry_server( xptr_t xp )
     1062{
     1063#if DEBUG_RPC_VFS_FS_UPDATE_DENTRY
     1064thread_t * this = CURRENT_THREAD;
     1065uint32_t cycle = (uint32_t)hal_get_cycles();
     1066if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY )
     1067printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
     1068__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     1069#endif
     1070
     1071    error_t        error;
     1072    vfs_inode_t  * inode;
     1073    vfs_dentry_t * dentry;
     1074    uint32_t       size;
     1075
     1076    // get client cluster identifier and pointer on RPC descriptor
     1077    cxy_t        client_cxy  = GET_CXY( xp );
     1078    rpc_desc_t * desc        = GET_PTR( xp );
     1079
     1080    // get input arguments
     1081    inode  = (vfs_inode_t*)(intptr_t) hal_remote_l64(XPTR(client_cxy , &desc->args[0]));
     1082    dentry = (vfs_dentry_t*)(intptr_t)hal_remote_l64(XPTR(client_cxy , &desc->args[1]));
     1083    size   = (uint32_t)               hal_remote_l64(XPTR(client_cxy , &desc->args[2]));
     1084
     1085    // call the kernel function
     1086    error = vfs_fs_update_dentry( inode , dentry , size );
     1087
     1088    // set output argument
     1089    hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
     1090
     1091#if DEBUG_RPC_VFS_FS_UPDATE_DENTRY
     1092cycle = (uint32_t)hal_get_cycles();
     1093if( cycle > DEBUG_RPC_VFS_FS_UPDATE_DENTRY )
     1094printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
     1095__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     1096#endif
     1097}
    10181098
    10191099/////////////////////////////////////////////////////////////////////////////////////////
     
    11101190void rpc_vfs_inode_create_client( cxy_t          cxy,     
    11111191                                  uint32_t       fs_type,    // in
    1112                                   uint32_t       inode_type, // in
    11131192                                  uint32_t       attr,       // in
    11141193                                  uint32_t       rights,     // in
     
    11361215    // set input arguments in RPC descriptor
    11371216    rpc.args[0] = (uint64_t)fs_type;
    1138     rpc.args[1] = (uint64_t)inode_type;
    1139     rpc.args[2] = (uint64_t)attr;
    1140     rpc.args[3] = (uint64_t)rights;
    1141     rpc.args[4] = (uint64_t)uid;
    1142     rpc.args[5] = (uint64_t)gid;
     1217    rpc.args[1] = (uint64_t)attr;
     1218    rpc.args[2] = (uint64_t)rights;
     1219    rpc.args[3] = (uint64_t)uid;
     1220    rpc.args[4] = (uint64_t)gid;
    11431221
    11441222    // register RPC request in remote RPC fifo
     
    11461224
    11471225    // get output values from RPC descriptor
    1148     *inode_xp = (xptr_t)rpc.args[6];
    1149     *error    = (error_t)rpc.args[7];
     1226    *inode_xp = (xptr_t)rpc.args[5];
     1227    *error    = (error_t)rpc.args[6];
    11501228
    11511229#if DEBUG_RPC_VFS_INODE_CREATE
     
    11691247
    11701248    uint32_t         fs_type;
    1171     uint32_t         inode_type;
    11721249    uint32_t         attr;
    11731250    uint32_t         rights;
     
    11831260    // get input arguments from client rpc descriptor
    11841261    fs_type    = (uint32_t)  hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
    1185     inode_type = (uint32_t)  hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) );
    1186     attr       = (uint32_t)  hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) );
    1187     rights     = (uint32_t)  hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) );
    1188     uid        = (uid_t)     hal_remote_l64( XPTR( client_cxy , &desc->args[4] ) );
    1189     gid        = (gid_t)     hal_remote_l64( XPTR( client_cxy , &desc->args[5] ) );
     1262    attr       = (uint32_t)  hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) );
     1263    rights     = (uint32_t)  hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) );
     1264    uid        = (uid_t)     hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) );
     1265    gid        = (gid_t)     hal_remote_l64( XPTR( client_cxy , &desc->args[4] ) );
    11901266
    11911267    // call local kernel function
    11921268    error = vfs_inode_create( fs_type,
    1193                               inode_type,
    11941269                              attr,
    11951270                              rights,
     
    11991274
    12001275    // set output arguments
    1201     hal_remote_s64( XPTR( client_cxy , &desc->args[6] ) , (uint64_t)inode_xp );
    1202     hal_remote_s64( XPTR( client_cxy , &desc->args[7] ) , (uint64_t)error );
     1276    hal_remote_s64( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)inode_xp );
     1277    hal_remote_s64( XPTR( client_cxy , &desc->args[6] ) , (uint64_t)error );
    12031278
    12041279#if DEBUG_RPC_VFS_INODE_CREATE
     
    16011676
    16021677/////////////////////////////////////////////////////////
    1603 void rpc_vfs_fs_get_dentry_client( cxy_t         cxy,
     1678void rpc_vfs_fs_new_dentry_client( cxy_t         cxy,
    16041679                                   vfs_inode_t * parent_inode,    // in
    16051680                                   char        * name,            // in
     
    16431718
    16441719//////////////////////////////////////////////
    1645 void rpc_vfs_fs_get_dentry_server( xptr_t xp )
     1720void rpc_vfs_fs_new_dentry_server( xptr_t xp )
    16461721{
    16471722#if DEBUG_RPC_VFS_FS_GET_DENTRY
     
    16741749
    16751750    // call the kernel function
    1676     error = vfs_fs_get_dentry( parent , name_copy , child_xp );
     1751    error = vfs_fs_new_dentry( parent , name_copy , child_xp );
    16771752
    16781753    // set output argument
     
    22452320
    22462321/////////////////////////////////////////////////////////////////////////////////////////
    2247 // [24]          undefined slot
    2248 /////////////////////////////////////////////////////////////////////////////////////////
     2322// [25]          Marshaling functions attached to RPC_MAPPER_SYNC
     2323/////////////////////////////////////////////////////////////////////////////////////////
     2324
     2325///////////////////////////////////////////////////
     2326void rpc_mapper_sync_client( cxy_t             cxy,
     2327                             struct mapper_s * mapper,
     2328                             error_t         * error )
     2329{
     2330#if DEBUG_RPC_MAPPER_SYNC
     2331thread_t * this = CURRENT_THREAD;
     2332uint32_t cycle = (uint32_t)hal_get_cycles();
     2333if( cycle > DEBUG_RPC_MAPPER_SYNC )
     2334printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
     2335__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     2336#endif
     2337
     2338    uint32_t responses = 1;
     2339
     2340    // initialise RPC descriptor header
     2341    rpc_desc_t  rpc;
     2342    rpc.index    = RPC_MAPPER_SYNC;
     2343    rpc.blocking = true;
     2344    rpc.rsp      = &responses;
     2345
     2346    // set input arguments in RPC descriptor
     2347    rpc.args[0] = (uint64_t)(intptr_t)mapper;
     2348
     2349    // register RPC request in remote RPC fifo
     2350    rpc_send( cxy , &rpc );
     2351
     2352    // get output values from RPC descriptor
     2353    *error   = (error_t)rpc.args[1];
     2354
     2355#if DEBUG_RPC_MAPPER_SYNC
     2356cycle = (uint32_t)hal_get_cycles();
     2357if( cycle > DEBUG_RPC_MAPPER_SYNC )
     2358printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
     2359__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     2360#endif
     2361}
     2362
     2363////////////////////////////////////////
     2364void rpc_mapper_sync_server( xptr_t xp )
     2365{
     2366#if DEBUG_RPC_MAPPER_SYNC
     2367thread_t * this = CURRENT_THREAD;
     2368uint32_t cycle = (uint32_t)hal_get_cycles();
     2369if( cycle > DEBUG_RPC_MAPPER_SYNC )
     2370printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
     2371__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     2372#endif
     2373
     2374    mapper_t * mapper;
     2375    error_t    error;
     2376
     2377    // get client cluster identifier and pointer on RPC descriptor
     2378    cxy_t        client_cxy  = GET_CXY( xp );
     2379    rpc_desc_t * desc        = GET_PTR( xp );
     2380
     2381    // get arguments from client RPC descriptor
     2382    mapper  = (mapper_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
     2383
     2384    // call local kernel function
     2385    error = mapper_sync( mapper );
     2386
     2387    // set output argument to client RPC descriptor
     2388    hal_remote_s64( XPTR( client_cxy , &desc->args[1] ) , (uint64_t)error );
     2389
     2390#if DEBUG_RPC_MAPPER_SYNC
     2391cycle = (uint32_t)hal_get_cycles();
     2392if( cycle > DEBUG_RPC_MAPPER_SYNC )
     2393printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
     2394__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     2395#endif
     2396}
    22492397
    22502398/////////////////////////////////////////////////////////////////////////////////////////
  • trunk/kernel/kern/rpc.h

    r619 r623  
    6868    RPC_THREAD_USER_CREATE        = 6,
    6969    RPC_THREAD_KERNEL_CREATE      = 7,
    70     RPC_UNDEFINED_8               = 8,
     70    RPC_VFS_FS_UPDATE_DENTRY      = 8,
    7171    RPC_PROCESS_SIGACTION         = 9,
    7272
     
    8686    RPC_KCM_ALLOC                 = 22,
    8787    RPC_KCM_FREE                  = 23,
    88     RPC_UNDEFINED_24              = 24,
     88    RPC_MAPPER_SYNC               = 24,
    8989    RPC_MAPPER_HANDLE_MISS        = 25,
    9090    RPC_VMM_DELETE_VSEG           = 26,
     
    305305
    306306/***********************************************************************************
    307  * [8] undefined slot
    308  **********************************************************************************/
    309 
    310 /***********************************************************************************
    311  * [9] The RPC_PROCESS_SIGACTION allows any client thread to request to any cluster
    312  * execute a given sigaction, defined by the <action_type> for a given process,
     307 * [8] The RPC_VFS_FS_UPDATE_DENTRY allows a client thread to request a remote
     308 * cluster to update the <size> field of a directory entry in the mapper of a
     309 * remote directory inode, identified by the <inode> local pointer.
     310 * The target entry name is identified by the <dentry> local pointer.
     311 ***********************************************************************************
     312 * @ cxy     : server cluster identifier.
     313 * @ inode   : [in] local pointer on remote directory inode.
     314 * @ dentry  : [in] local pointer on remote dentry.
     315 * @ size    : [in] new size value.
     316 * @ error   : [out] error status (0 if success).
     317 **********************************************************************************/
     318void rpc_vfs_fs_update_dentry_client( cxy_t                 cxy,
     319                                      struct vfs_inode_s  * inode,
     320                                      struct vfs_dentry_s * dentry,
     321                                      uint32_t              size,
     322                                      error_t             * error );
     323
     324void rpc_vfs_fs_update_dentry_server( xptr_t xp );
     325
     326/***********************************************************************************
     327 * [9] The RPC_PROCESS_SIGACTION allows a client thread to request a remote cluster
     328 * to execute a given sigaction, defined by the <action_type> for a given process,
    313329 * identified by the <pid> argument.
    314330 ***********************************************************************************
     
    340356void rpc_vfs_inode_create_client( cxy_t      cxy,
    341357                                  uint32_t   fs_type,
    342                                   uint32_t   inode_type,
    343358                                  uint32_t   attr,   
    344359                                  uint32_t   rights, 
     
    423438
    424439/***********************************************************************************
    425  * [16] The RPC_VFS_FS_GET_DENTRY calls the vfs_fs_get_dentry()
     440 * [16] The RPC_VFS_FS_GET_DENTRY calls the vfs_fs_new_dentry()
    426441 * function in a remote cluster containing a parent inode directory to scan the
    427442 * associated mapper, find a directory entry identified by its name, and update
     
    434449 * @ error          : [out] error status (0 if success).
    435450 **********************************************************************************/
    436 void rpc_vfs_fs_get_dentry_client( cxy_t                cxy,
     451void rpc_vfs_fs_new_dentry_client( cxy_t                cxy,
    437452                                   struct vfs_inode_s * parent_inode,
    438453                                   char               * name,
     
    440455                                   error_t            * error );
    441456
    442 void rpc_vfs_fs_get_dentry_server( xptr_t xp );
     457void rpc_vfs_fs_new_dentry_server( xptr_t xp );
    443458
    444459/***********************************************************************************
     
    564579
    565580/***********************************************************************************
    566  * [24] undefined slot
    567  **********************************************************************************/
     581 * [24] The RPC_MAPPER_SYNC allows a client thread to synchronize on disk
     582 * all dirty pages of a remote mapper.
     583 ***********************************************************************************
     584 * @ cxy       : server cluster identifier.
     585 * @ mapper    : [in] local pointer on mapper in server cluster.
     586 * @ error       : [out] error status (0 if success).
     587 **********************************************************************************/
     588void rpc_mapper_sync_client( cxy_t             cxy,
     589                             struct mapper_s * mapper,
     590                             error_t         * error );
     591
     592void rpc_mapper_sync_server( xptr_t xp );
    568593
    569594/***********************************************************************************
  • trunk/kernel/kern/thread.c

    r620 r623  
    13821382                               const char * string )
    13831383{
     1384
    13841385    cxy_t      thread_cxy = GET_CXY( thread_xp );
    13851386    thread_t * thread_ptr = GET_PTR( thread_xp );
    13861387
    1387 #if( DEBUG_BUSYLOCK )
    1388 
    1389     xptr_t    iter_xp;
    1390 
    1391     // get relevant info from target trhead descriptor
     1388#if DEBUG_BUSYLOCK
     1389
     1390    xptr_t     iter_xp;
     1391
     1392    // get relevant info from target thread descriptor
    13921393    uint32_t    locks   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->busylocks ) );
    13931394    trdid_t     trdid   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     
    14291430    remote_busylock_release( txt0_lock_xp );
    14301431
     1432#else
     1433
     1434printk("\n[ERROR] in %s : set DEBUG_BUSYLOCK in kernel_config.h for %s / thread(%x,%x)\n",
     1435__FUNCTION__, string, thread_cxy, thread_ptr );
     1436
     1437#endif
     1438
    14311439    return;
    14321440
    1433 #endif
    1434 
    1435     // display a warning
    1436     printk("\n[WARNING] set DEBUG_BUSYLOCK in kernel_config.h to display busylocks" );
    1437 
    14381441}  // end thread_display_busylock()
    14391442
  • trunk/kernel/kernel_config.h

    r620 r623  
    8181#define DEBUG_FATFS_FREE_CLUSTERS         0
    8282#define DEBUG_FATFS_GET_CLUSTER           0
    83 #define DEBUG_FATFS_GET_DENTRY            0
    8483#define DEBUG_FATFS_GET_USER_DIR          0
    8584#define DEBUG_FATFS_MOVE_PAGE             0
    86 #define DEBUG_FATFS_RELEASE_INODE         0
     85#define DEBUG_FATFS_NEW_DENTRY            0
     86#define DEBUG_FATFS_RELEASE_INODE         1
    8787#define DEBUG_FATFS_REMOVE_DENTRY         0
    8888#define DEBUG_FATFS_SYNC_FAT              0
    8989#define DEBUG_FATFS_SYNC_FSINFO           0
    9090#define DEBUG_FATFS_SYNC_INODE            0
     91#define DEBUG_FATFS_UPDATE_DENTRY         0
    9192
    9293#define DEBUG_HAL_GPT_SET_PTE             0
     
    112113#define DEBUG_MAPPER_MOVE_USER            0
    113114#define DEBUG_MAPPER_MOVE_KERNEL          0
     115#define DEBUG_MAPPER_SYNC                 0
    114116
    115117#define DEBUG_MUTEX                       0
     
    130132#define DEBUG_PROCESS_ZERO_CREATE         0
    131133
    132 #define DEBUG_QUEUELOCK_TYPE              0    // lock type (0 is undefined)
     134#define DEBUG_QUEUELOCK_TYPE              0    // lock type (0 : undefined / 1000 : all types)
    133135
    134136#define DEBUG_RPC_CLIENT_GENERIC          0
     
    157159#define DEBUG_RPC_VMM_DELETE_VSEG         0
    158160
    159 #define DEBUG_RWLOCK_TYPE                 0    // lock type (0 is undefined)
     161#define DEBUG_RWLOCK_TYPE                 0    // lock type (0 : undefined / 1000 : all types)
    160162
    161163#define DEBUG_SCHED_HANDLE_SIGNALS        2
     
    234236#define DEBUG_VFS_OPENDIR                 0
    235237#define DEBUG_VFS_STAT                    0
    236 #define DEBUG_VFS_UNLINK                  0
     238#define DEBUG_VFS_UNLINK                  1
    237239
    238240#define DEBUG_VMM_CREATE_VSEG             0
     
    247249#define DEBUG_VMM_MMAP_ALLOC              0
    248250#define DEBUG_VMM_PAGE_ALLOCATE           0
     251#define DEBUG_VMM_RESIZE_VSEG             0
    249252#define DEBUG_VMM_SET_COW                 0
    250253#define DEBUG_VMM_UPDATE_PTE              0
  • trunk/kernel/libk/busylock.h

    r563 r623  
    3434 * a shared object located in a given cluster, made by thread(s) running in same cluster.
    3535 * It uses a busy waiting policy when the lock is taken by another thread, and should
    36  * be used to execute very short actions, such as basic allocators, or to protect
    37  * higher level synchronisation objects, such as queuelock or rwlock.
    38  * WARNING: a thread cannot yield when it is owning a busylock (local or remote).
     36 * be used to execute very short actions, such as accessing basic allocators, or higher
     37 * level synchronisation objects (barriers, queuelocks, or rwlocks).
     38 * WARNING: a thread cannot yield when it is owning a busylock.
    3939 *
    4040 * - To acquire the lock, we use a ticket policy to avoid starvation: the calling thread
  • trunk/kernel/libk/grdxt.h

    r610 r623  
    132132 * @ start_key  : key starting value for the scan.
    133133 * @ found_key  : [out] buffer for found key value.
    134  * return pointer on first valid item if found / return NULL if not found.
     134 * @ return pointer on first valid item if found / return NULL if not found.
    135135 ******************************************************************************************/
    136136void * grdxt_get_first( grdxt_t  * rt,
  • trunk/kernel/libk/queuelock.c

    r610 r623  
    6666    busylock_acquire( &lock->lock );
    6767
     68#if DEBUG_QUEUELOCK_TYPE
     69uint32_t   lock_type = lock->lock.type;
     70#endif
     71
    6872    // block and deschedule if lock already taken
    6973    while( lock->taken )
     
    7175
    7276#if DEBUG_QUEUELOCK_TYPE
    73 uint32_t   lock_type = lock->lock.type;
    74 if( DEBUG_QUEUELOCK_TYPE == lock_type )
     77if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
    7578printk("\n[%s ] thread[%x,%x] BLOCK on q_lock %s [%x,%x]\n",
    7679__FUNCTION__, this->process->pid, this->trdid,
     
    97100
    98101#if DEBUG_QUEUELOCK_TYPE
    99 if( DEBUG_QUEUELOCK_TYPE == lock_type )
     102if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
    100103printk("\n[%s] thread[%x,%x] ACQUIRE q_lock %s [%x,%x]\n",
    101104__FUNCTION__, this->process->pid, this->trdid,
     
    123126uint32_t   lock_type = lock->lock.type;
    124127thread_t * this      = CURRENT_THREAD;
    125 if( DEBUG_QUEUELOCK_TYPE == lock_type )
     128if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
    126129printk("\n[%s] thread[%x,%x] RELEASE q_lock %s [%x,%x]\n",
    127130__FUNCTION__, this->process->pid, this->trdid,
     
    139142
    140143#if DEBUG_QUEUELOCK_TYPE
    141 if( DEBUG_QUEUELOCK_TYPE == lock_type )
     144if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
    142145printk("\n[%s] thread[%x,%x] UNBLOCK thread [%x,%x] / q_lock %s [%x,%x]\n",
    143146__FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid,
  • trunk/kernel/libk/remote_barrier.c

    r619 r623  
    245245}  // end generic_barrier_wait()
    246246
    247 
     247/////////////////////////////////////////////////////
     248void generic_barrier_display( xptr_t gen_barrier_xp )
     249{
     250    // get cluster and local pointer
     251    generic_barrier_t * gen_barrier_ptr = GET_PTR( gen_barrier_xp );
     252    cxy_t               gen_barrier_cxy = GET_CXY( gen_barrier_xp );
     253
     254    // get barrier type and extend pointer
     255    bool_t  is_dqt = hal_remote_l32( XPTR( gen_barrier_cxy , &gen_barrier_ptr->is_dqt ) );
     256    void  * extend = hal_remote_lpt( XPTR( gen_barrier_cxy , &gen_barrier_ptr->extend ) );
     257
     258    // buil extended pointer on the implementation specific barrier descriptor
     259    xptr_t barrier_xp = XPTR( gen_barrier_cxy , extend );
     260
     261    // display barrier state
     262    if( is_dqt ) dqt_barrier_display( barrier_xp );
     263    else         simple_barrier_display( barrier_xp );
     264}
    248265
    249266
     
    454471
    455472}  // end simple_barrier_wait()
     473
     474/////////////////////////////////////////////////
     475void simple_barrier_display( xptr_t  barrier_xp )
     476{
     477    // get cluster and local pointer on simple barrier
     478    simple_barrier_t * barrier_ptr = GET_PTR( barrier_xp );
     479    cxy_t              barrier_cxy = GET_CXY( barrier_xp );
     480
     481    // get barrier global parameters
     482    uint32_t current  = hal_remote_l32( XPTR( barrier_cxy , &barrier_ptr->current ) );
     483    uint32_t arity    = hal_remote_l32( XPTR( barrier_cxy , &barrier_ptr->arity   ) );
     484
     485    printk("\n***** simple barrier : %d arrived threads on %d *****\n",
     486    current, arity );
     487
     488}   // end simple_barrier_display()
     489
     490
    456491
    457492
     
    493528
    494529// check x_size and y_size arguments
    495 assert( (z <= 16) , "DQT dqth larger than (16*16)\n");
     530assert( (z <= 16) , "DQT mesh size larger than (16*16)\n");
    496531
    497532// check RPC descriptor size
     
    9731008}  // end dqt_barrier_wait()
    9741009
    975 
    976 ////////////////////////////////////////////////////////////////////////////////////////////
    977 //          DQT static functions
    978 ////////////////////////////////////////////////////////////////////////////////////////////
    979 
    980 
    981 //////////////////////////////////////////////////////////////////////////////////////////
    982 // This recursive function decrements the distributed "count" variables,
    983 // traversing the DQT from bottom to root.
    984 // The last arrived thread reset the local node before returning.
    985 //////////////////////////////////////////////////////////////////////////////////////////
    986 static void dqt_barrier_increment( xptr_t  node_xp )
    987 {
    988     uint32_t   expected;
    989     uint32_t   sense;
    990     uint32_t   arity;
    991 
    992     thread_t * this = CURRENT_THREAD;
    993 
    994     // get node cluster and local pointer
    995     dqt_node_t * node_ptr = GET_PTR( node_xp );
    996     cxy_t        node_cxy = GET_CXY( node_xp );
    997 
    998     // build relevant extended pointers
    999     xptr_t  arity_xp   = XPTR( node_cxy , &node_ptr->arity );
    1000     xptr_t  sense_xp   = XPTR( node_cxy , &node_ptr->sense );
    1001     xptr_t  current_xp = XPTR( node_cxy , &node_ptr->current );
    1002     xptr_t  lock_xp    = XPTR( node_cxy , &node_ptr->lock );
    1003     xptr_t  root_xp    = XPTR( node_cxy , &node_ptr->root );
    1004 
    1005 #if DEBUG_BARRIER_WAIT
    1006 uint32_t   cycle = (uint32_t)hal_get_cycles();
    1007 uint32_t   level = hal_remote_l32( XPTR( node_cxy, &node_ptr->level ) );
    1008 if( cycle > DEBUG_BARRIER_WAIT )
    1009 printk("\n[%s] thread[%x,%x] increments DQT node(%d,%d,%d) / cycle %d\n",
    1010 __FUNCTION__ , this->process->pid, this->trdid,
    1011 HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level );
    1012 #endif
    1013 
    1014     // get extended pointer on parent node
    1015     xptr_t  parent_xp  = hal_remote_l64( XPTR( node_cxy , &node_ptr->parent_xp ) );
    1016 
    1017     // take busylock
    1018     remote_busylock_acquire( lock_xp );
    1019    
    1020     // get sense and arity values from barrier descriptor
    1021     sense = hal_remote_l32( sense_xp );
    1022     arity = hal_remote_l32( arity_xp );
    1023 
    1024     // compute expected value
    1025     expected = (sense == 0) ? 1 : 0;
    1026 
    1027     // increment current number of arrived threads / get value before increment
    1028     uint32_t current = hal_remote_atomic_add( current_xp , 1 );
    1029 
    1030     // last arrived thread reset the local node, makes the recursive call
    1031     // on parent node, and reactivates all waiting thread when returning.
    1032     // other threads block, register in queue, and deschedule.
    1033 
    1034     if ( current == (arity - 1) )                        // last thread 
    1035     {
    1036 
    1037 #if DEBUG_BARRIER_WAIT
    1038 if( cycle > DEBUG_BARRIER_WAIT )
    1039 printk("\n[%s] thread[%x,%x] reset DQT node(%d,%d,%d)\n",
    1040 __FUNCTION__ , this->process->pid, this->trdid,
    1041 HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level );
    1042 #endif
    1043         // reset the current node
    1044         hal_remote_s32( sense_xp   , expected );
    1045         hal_remote_s32( current_xp , 0 );
    1046 
    1047         // release busylock protecting the current node
    1048         remote_busylock_release( lock_xp );
    1049 
    1050         // recursive call on parent node when current node is not the root
    1051         if( parent_xp != XPTR_NULL) dqt_barrier_increment( parent_xp );
    1052 
    1053         // unblock all waiting threads on this node
    1054         while( xlist_is_empty( root_xp ) == false )
    1055         {
    1056             // get pointers on first waiting thread
    1057             xptr_t     thread_xp  = XLIST_FIRST( root_xp , thread_t , wait_list );
    1058             cxy_t      thread_cxy = GET_CXY( thread_xp );
    1059             thread_t * thread_ptr = GET_PTR( thread_xp );
    1060 
    1061 #if (DEBUG_BARRIER_WAIT & 1)
    1062 trdid_t     trdid   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
    1063 process_t * process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) );
    1064 pid_t       pid     = hal_remote_l32( XPTR( thread_cxy , &process->pid ) );
    1065 if( cycle > DEBUG_BARRIER_WAIT )
    1066 printk("\n[%s] thread[%x,%x] unblock thread[%x,%x]\n",
    1067 __FUNCTION__, this->process->pid, this->trdid, pid, trdid );
    1068 #endif
    1069             // remove waiting thread from queue
    1070             xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_list ) );
    1071 
    1072             // unblock waiting thread
    1073             thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC );
    1074         }
    1075     }
    1076     else                                               // not the last thread
    1077     {
    1078         // get extended pointer on xlist entry from thread
    1079         xptr_t  entry_xp = XPTR( local_cxy , &this->wait_list );
    1080        
    1081         // register calling thread in barrier waiting queue
    1082         xlist_add_last( root_xp , entry_xp );
    1083 
    1084         // block calling thread
    1085         thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_USERSYNC );
    1086 
    1087         // release busylock protecting the remote_barrier
    1088         remote_busylock_release( lock_xp );
    1089 
    1090 #if DEBUG_BARRIER_WAIT
    1091 if( cycle > DEBUG_BARRIER_WAIT )
    1092 printk("\n[%s] thread[%x,%x] blocks on node(%d,%d,%d)\n",
    1093 __FUNCTION__ , this->process->pid, this->trdid,
    1094 HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level );
    1095 #endif
    1096         // deschedule
    1097         sched_yield("blocked on barrier");
    1098     }
    1099 
    1100     return;
    1101 
    1102 } // end dqt_barrier_decrement()
    1103 
    1104 #if DEBUG_BARRIER_CREATE
    1105 
    1106 ////////////////////////////////////////////////////////////////////////////////////////////
    1107 // This debug function displays all DQT nodes in all clusters.
    1108 ////////////////////////////////////////////////////////////////////////////////////////////
    1109 // @ barrier_xp   : extended pointer on DQT barrier descriptor.
    1110 ////////////////////////////////////////////////////////////////////////////////////////////
    1111 static void dqt_barrier_display( xptr_t  barrier_xp )
     1010//////////////////////////////////////////////
     1011void dqt_barrier_display( xptr_t  barrier_xp )
    11121012{
    11131013    // get cluster and local pointer on DQT barrier
     
    11471047                     uint32_t level = hal_remote_l32( XPTR( node_cxy , &node_ptr->level       ));
    11481048                     uint32_t arity = hal_remote_l32( XPTR( node_cxy , &node_ptr->arity       ));
     1049                     uint32_t count = hal_remote_l32( XPTR( node_cxy , &node_ptr->current     ));
    11491050                     xptr_t   pa_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->parent_xp   ));
    11501051                     xptr_t   c0_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->child_xp[0] ));
     
    11531054                     xptr_t   c3_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->child_xp[3] ));
    11541055
    1155                      printk("   . level %d : (%x,%x) / arity %d / P(%x,%x) / C0(%x,%x)"
     1056                     printk("   . level %d : (%x,%x) / %d on %d / P(%x,%x) / C0(%x,%x)"
    11561057                            " C1(%x,%x) / C2(%x,%x) / C3(%x,%x)\n",
    1157                      level, node_cxy, node_ptr, arity,
     1058                     level, node_cxy, node_ptr, count, arity,
    11581059                     GET_CXY(pa_xp), GET_PTR(pa_xp),
    11591060                     GET_CXY(c0_xp), GET_PTR(c0_xp),
     
    11671068}   // end dqt_barrier_display()
    11681069
    1169 #endif
     1070
     1071//////////////////////////////////////////////////////////////////////////////////////////
     1072// This static (recursive) function is called by the dqt_barrier_wait() function.
     1073// It traverses the DQT from bottom to root, and decrements the "current" variables.
     1074// For each traversed node, it blocks and deschedules if it is not the last expected
     1075//  thread. The last arrived thread reset the local node before returning.
     1076//////////////////////////////////////////////////////////////////////////////////////////
     1077static void dqt_barrier_increment( xptr_t  node_xp )
     1078{
     1079    uint32_t   expected;
     1080    uint32_t   sense;
     1081    uint32_t   arity;
     1082
     1083    thread_t * this = CURRENT_THREAD;
     1084
     1085    // get node cluster and local pointer
     1086    dqt_node_t * node_ptr = GET_PTR( node_xp );
     1087    cxy_t        node_cxy = GET_CXY( node_xp );
     1088
     1089    // build relevant extended pointers
     1090    xptr_t  arity_xp   = XPTR( node_cxy , &node_ptr->arity );
     1091    xptr_t  sense_xp   = XPTR( node_cxy , &node_ptr->sense );
     1092    xptr_t  current_xp = XPTR( node_cxy , &node_ptr->current );
     1093    xptr_t  lock_xp    = XPTR( node_cxy , &node_ptr->lock );
     1094    xptr_t  root_xp    = XPTR( node_cxy , &node_ptr->root );
     1095
     1096#if DEBUG_BARRIER_WAIT
     1097uint32_t   cycle = (uint32_t)hal_get_cycles();
     1098uint32_t   level = hal_remote_l32( XPTR( node_cxy, &node_ptr->level ) );
     1099if( cycle > DEBUG_BARRIER_WAIT )
     1100printk("\n[%s] thread[%x,%x] increments DQT node(%d,%d,%d) / cycle %d\n",
     1101__FUNCTION__ , this->process->pid, this->trdid,
     1102HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level );
     1103#endif
     1104
     1105    // get extended pointer on parent node
     1106    xptr_t  parent_xp  = hal_remote_l64( XPTR( node_cxy , &node_ptr->parent_xp ) );
     1107
     1108    // take busylock
     1109    remote_busylock_acquire( lock_xp );
     1110   
     1111    // get sense and arity values from barrier descriptor
     1112    sense = hal_remote_l32( sense_xp );
     1113    arity = hal_remote_l32( arity_xp );
     1114
     1115    // compute expected value
     1116    expected = (sense == 0) ? 1 : 0;
     1117
     1118    // increment current number of arrived threads / get value before increment
     1119    uint32_t current = hal_remote_atomic_add( current_xp , 1 );
     1120
     1121    // last arrived thread reset the local node, makes the recursive call
     1122    // on parent node, and reactivates all waiting thread when returning.
     1123    // other threads block, register in queue, and deschedule.
     1124
     1125    if ( current == (arity - 1) )                        // last thread 
     1126    {
     1127
     1128#if DEBUG_BARRIER_WAIT
     1129if( cycle > DEBUG_BARRIER_WAIT )
     1130printk("\n[%s] thread[%x,%x] reset DQT node(%d,%d,%d)\n",
     1131__FUNCTION__ , this->process->pid, this->trdid,
     1132HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level );
     1133#endif
     1134        // reset the current node
     1135        hal_remote_s32( sense_xp   , expected );
     1136        hal_remote_s32( current_xp , 0 );
     1137
     1138        // release busylock protecting the current node
     1139        remote_busylock_release( lock_xp );
     1140
     1141        // recursive call on parent node when current node is not the root
     1142        if( parent_xp != XPTR_NULL) dqt_barrier_increment( parent_xp );
     1143
     1144        // unblock all waiting threads on this node
     1145        while( xlist_is_empty( root_xp ) == false )
     1146        {
     1147            // get pointers on first waiting thread
     1148            xptr_t     thread_xp  = XLIST_FIRST( root_xp , thread_t , wait_list );
     1149            cxy_t      thread_cxy = GET_CXY( thread_xp );
     1150            thread_t * thread_ptr = GET_PTR( thread_xp );
     1151
     1152#if (DEBUG_BARRIER_WAIT & 1)
     1153trdid_t     trdid   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     1154process_t * process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) );
     1155pid_t       pid     = hal_remote_l32( XPTR( thread_cxy , &process->pid ) );
     1156if( cycle > DEBUG_BARRIER_WAIT )
     1157printk("\n[%s] thread[%x,%x] unblock thread[%x,%x]\n",
     1158__FUNCTION__, this->process->pid, this->trdid, pid, trdid );
     1159#endif
     1160            // remove waiting thread from queue
     1161            xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_list ) );
     1162
     1163            // unblock waiting thread
     1164            thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC );
     1165        }
     1166    }
     1167    else                                               // not the last thread
     1168    {
     1169        // get extended pointer on xlist entry from thread
     1170        xptr_t  entry_xp = XPTR( local_cxy , &this->wait_list );
     1171       
     1172        // register calling thread in barrier waiting queue
     1173        xlist_add_last( root_xp , entry_xp );
     1174
     1175        // block calling thread
     1176        thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_USERSYNC );
     1177
     1178        // release busylock protecting the remote_barrier
     1179        remote_busylock_release( lock_xp );
     1180
     1181#if DEBUG_BARRIER_WAIT
     1182if( cycle > DEBUG_BARRIER_WAIT )
     1183printk("\n[%s] thread[%x,%x] blocks on node(%d,%d,%d)\n",
     1184__FUNCTION__ , this->process->pid, this->trdid,
     1185HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level );
     1186#endif
     1187        // deschedule
     1188        sched_yield("blocked on barrier");
     1189    }
     1190
     1191    return;
     1192
     1193} // end dqt_barrier_decrement()
     1194
     1195
  • trunk/kernel/libk/remote_barrier.h

    r619 r623  
    4242 * used by the kernel. ALMOS-MKH uses only the barrier virtual address as an identifier.
    4343 * For each user barrier, ALMOS-MKH creates a kernel structure, dynamically allocated
    44  * by the "generic_barrier_create()" function, destroyed by the "remote_barrier_destroy()"
    45  * function, and used by the "generic_barrier_wait()" function.
     44 * by the generic_barrier_create() function, destroyed by the generic_barrier_destroy()
     45 * function, and used by the generic_barrier_wait() function.
    4646 *
    4747 * Implementation note:
     
    5858 *    (x_size * ysize) mesh, including cluster (0,0), with nthreads per cluster, and called
    5959 *    DQT : Distributed Quad Tree. This DQT implementation supposes a regular architecture,
     60                     uint32_t arity = hal_remote_l32( XPTR( node_cxy , &node_ptr->arity       ));
    6061 *    and a strong contraint on the threads placement: exactly "nthreads" threads per
    6162 *    cluster in the (x_size * y_size) mesh.
     
    141142
    142143
    143 
     144/*****************************************************************************************
     145 * This debug function uses remote accesses to display the current state of a generic
     146 * barrier identified by the <gen_barrier_xp> argument.
     147 * It calls the relevant function (simple or DQT) to display relevant information.
     148 * It can be called by a thread running in any cluster.
     149 *****************************************************************************************
     150 * @ barrier_xp   : extended pointer on generic barrier descriptor.
     151 ****************************************************************************************/
     152
     153void generic_barrier_display( xptr_t gen_barrier_xp );
    144154
    145155
     
    192202void simple_barrier_wait( xptr_t   barrier_xp );
    193203
     204/*****************************************************************************************
     205 * This debug function uses remote accesses to display the current state of a simple
     206 * barrier identified by the <barrier_xp> argument.
     207 * It can be called by a thread running in any cluster.
     208 *****************************************************************************************
     209 * @ barrier_xp   : extended pointer on simple barrier descriptor.
     210 ****************************************************************************************/
     211void simple_barrier_display( xptr_t barrier_xp );
    194212
    195213
     
    281299void dqt_barrier_wait( xptr_t   barrier_xp );
    282300
    283 
     301/*****************************************************************************************
     302 * This debug function uses remote accesses to display the current state of all
     303 * ditributed nodes in a DQT barrier identified by the <barrier_xp> argument.
     304 * It can be called by a thread running in any cluster.
     305 *****************************************************************************************
     306 * @ barrier_xp   : extended pointer on DQT barrier descriptor.
     307 ****************************************************************************************/
     308void dqt_barrier_display( xptr_t barrier_xp );
    284309
    285310#endif  /* _REMOTE_BARRIER_H_ */
  • trunk/kernel/libk/remote_queuelock.c

    r610 r623  
    9191
    9292#if DEBUG_QUEUELOCK_TYPE
    93 if( DEBUG_QUEUELOCK_TYPE == lock_type )
     93if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
    9494printk("\n[%s] thread[%x,%x] BLOCK on q_lock %s [%x,%x]\n",
    9595__FUNCTION__, this->process->pid, this->trdid,
     
    117117
    118118#if DEBUG_QUEUELOCK_TYPE
    119 if( DEBUG_QUEUELOCK_TYPE == lock_type )
     119if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
    120120printk("\n[%s] thread[%x,%x] ACQUIRE q_lock %s [%x,%x]\n",
    121121__FUNCTION__, this->process->pid, this->trdid,
     
    152152thread_t * this      = CURRENT_THREAD;
    153153uint32_t   lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) );
    154 if( DEBUG_QUEUELOCK_TYPE == lock_type )
     154if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
    155155printk("\n[%s] thread[%x,%x] RELEASE q_lock %s (%x,%x)\n",
    156156__FUNCTION__, this->process->pid, this->trdid,
     
    171171
    172172#if DEBUG_QUEUELOCK_TYPE
    173 if( DEBUG_QUEUELOCK_TYPE == lock_type )
     173if( (DEBUG_QUEUELOCK_TYPE == lock_type) || (DEBUG_QUEUELOCK_TYPE == 1000) )
    174174{
    175175    trdid_t     trdid   = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
  • trunk/kernel/libk/remote_rwlock.c

    r610 r623  
    5555#if DEBUG_RWLOCK_TYPE
    5656thread_t * this = CURRENT_THREAD;
    57 if( type == DEBUG_RWLOCK_TYPE )
     57if( DEBUG_RWLOCK_TYPE == type )
    5858printk("\n[%s] thread[%x,%x] initialise lock %s [%x,%x]\n",
    5959__FUNCTION__, this->process->pid, this->trdid,
     
    9393
    9494#if DEBUG_RWLOCK_TYPE
    95 if( lock_type == DEBUG_RWLOCK_TYPE )
     95if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    9696printk("\n[%s] thread[%x,%x] READ BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n",
    9797__FUNCTION__, this->process->pid, this->trdid,
     
    124124
    125125#if DEBUG_RWLOCK_TYPE
    126 if( lock_type == DEBUG_RWLOCK_TYPE )
     126if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    127127printk("\n[%s] thread[%x,%x] READ ACQUIRE rwlock %s [%x,%x] / taken = %d / count = %d\n",
    128128__FUNCTION__, this->process->pid, this->trdid,
     
    166166
    167167#if DEBUG_RWLOCK_TYPE
    168 if( lock_type == DEBUG_RWLOCK_TYPE )
     168if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    169169printk("\n[%s] thread[%x,%x] WRITE BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n",
    170170__FUNCTION__, this->process->pid, this->trdid,
     
    196196
    197197#if DEBUG_RWLOCK_TYPE
    198 if( lock_type == DEBUG_RWLOCK_TYPE )
     198if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    199199printk("\n[%s] thread[%x,%x] WRITE ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n",
    200200__FUNCTION__, this->process->pid, this->trdid,
     
    235235uint32_t   lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) );
    236236xptr_t     taken_xp  = XPTR( lock_cxy , &lock_ptr->taken );
    237 if( lock_type == DEBUG_RWLOCK_TYPE )
     237if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    238238printk("\n[%s] thread[%x,%x] READ RELEASE rwlock %s [%x,%x] / taken %d / count %d\n",
    239239__FUNCTION__, this->process->pid, this->trdid,
     
    258258
    259259#if DEBUG_RWLOCK_TYPE
    260 if( lock_type == DEBUG_RWLOCK_TYPE )
     260if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    261261{
    262262    trdid_t     trdid     = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     
    289289
    290290#if DEBUG_RWLOCK_TYPE
    291 if( lock_type == DEBUG_RWLOCK_TYPE )
     291if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    292292{
    293293    trdid_t     trdid     = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     
    334334uint32_t   lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->lock.type ) );
    335335xptr_t     count_xp  = XPTR( lock_cxy , &lock_ptr->count );
    336 if( lock_type == DEBUG_RWLOCK_TYPE )
     336if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    337337printk("\n[%s] thread[%x,%x] WRITE RELEASE rwlock %s [%x,%x] / taken %d / count %d\n",
    338338__FUNCTION__, this->process->pid, this->trdid,
     
    356356
    357357#if DEBUG_RWLOCK_TYPE
    358 if( lock_type == DEBUG_RWLOCK_TYPE )
     358if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    359359{
    360360    trdid_t     trdid     = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
     
    386386
    387387#if DEBUG_RWLOCK_TYPE
    388 if( lock_type == DEBUG_RWLOCK_TYPE )
     388if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    389389{
    390390    trdid_t     trdid     = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) );
  • trunk/kernel/libk/rwlock.c

    r610 r623  
    7171    busylock_acquire( &lock->lock );
    7272
     73#if DEBUG_RWLOCK_TYPE
     74uint32_t lock_type = lock->lock.type;
     75#endif
     76
    7377    // block and deschedule if lock already taken
    7478    while( lock->taken )
     
    7680
    7781#if DEBUG_RWLOCK_TYPE
    78 uint32_t lock_type = lock->lock.type;
    79 if( DEBUG_RWLOCK_TYPE == lock_type )
     82if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    8083printk("\n[%s] thread[%x,%x] READ BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n",
    8184__FUNCTION__, this->process->pid, this->trdid,
     
    102105
    103106#if DEBUG_RWLOCK_TYPE
    104 if( DEBUG_RWLOCK_TYPE == lock_type )
     107if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    105108printk("\n[%s] thread[%x,%x] READ ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n",
    106109__FUNCTION__, this->process->pid, this->trdid,
     
    124127    busylock_acquire( &lock->lock );
    125128
     129#if DEBUG_RWLOCK_TYPE
     130uint32_t lock_type = lock->lock.type;
     131#endif
     132
    126133    // block and deschedule if lock already taken or existing read access
    127134    while( lock->taken || lock->count )
     
    129136
    130137#if DEBUG_RWLOCK_TYPE
    131 uint32_t lock_type = lock->lock.type;
    132 if( DEBUG_RWLOCK_TYPE == lock_type )
     138if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    133139printk("\n[%s] thread[%x,%x] WRITE BLOCK on rwlock %s [%x,%x] / taken %d / count %d\n",
    134140__FUNCTION__, this->process->pid, this->trdid,
     
    155161
    156162#if DEBUG_RWLOCK_TYPE
    157 if( DEBUG_RWLOCK_TYPE == lock_type )
     163if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    158164printk("\n[%s] thread[%x,%x] WRITE ACQUIRE rwlock %s [%x,%x] / taken %d / count %d\n",
    159165__FUNCTION__, this->process->pid, this->trdid,
     
    181187thread_t * this = CURRENT_THREAD;
    182188uint32_t lock_type = lock->lock.type;
    183 if( DEBUG_RWLOCK_TYPE == lock_type )
     189if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    184190printk("\n[%s] thread[%x,%x] READ RELEASE rwlock %s [%x,%x] / taken %d / count %d\n",
    185191__FUNCTION__, this->process->pid, this->trdid,
     
    195201
    196202#if DEBUG_RWLOCK_TYPE
    197 if( DEBUG_RWLOCK_TYPE == lock_type )
     203if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    198204printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n",
    199205__FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid,
     
    217223
    218224#if DEBUG_RWLOCK_TYPE
    219 if( DEBUG_RWLOCK_TYPE == lock_type )
     225if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    220226printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n",
    221227__FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid,
     
    251257thread_t * this = CURRENT_THREAD;
    252258uint32_t lock_type = lock->lock.type;
    253 if( DEBUG_RWLOCK_TYPE == lock_type )
     259if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    254260printk("\n[%s] thread[%x,%x] WRITE RELEASE rwlock %s [%x,%x] / taken %d / count %d\n",
    255261__FUNCTION__, this->process->pid, this->trdid,
     
    264270
    265271#if DEBUG_RWLOCK_TYPE
    266 if( DEBUG_RWLOCK_TYPE == lock_type )
     272if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    267273printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n",
    268274__FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid,
     
    285291
    286292#if DEBUG_RWLOCK_TYPE
    287 if( DEBUG_RWLOCK_TYPE == lock_type )
     293if( (DEBUG_RWLOCK_TYPE == lock_type) || (DEBUG_RWLOCK_TYPE == 1000) )
    288294printk("\n[%s] thread[%x,%x] UNBLOCK thread[%x,%x] / rwlock %s [%x,%x]\n",
    289295__FUNCTION__, this->process->pid, this->trdid, thread->process->pid, thread->trdid,
  • trunk/kernel/libk/user_dir.h

    r614 r623  
    8686 * - the allocation of one or several physical pages in reference cluster to store
    8787 *   all directory entries in an array of 64 bytes dirent structures,
    88  * - the initialisation of this array from informations found in the Inode Tree.
     88 * - the initialisation of this array from informations found in the directory mapper.
    8989 * - the creation of an ANON vseg containing this dirent array in reference process VMM,
    9090 *   and the mapping of the relevant physical pages in this vseg.
  • trunk/kernel/mm/mapper.c

    r614 r623  
    33 *
    44 * Authors   Mohamed Lamine Karaoui (2015)
    5  *           Alain Greiner (2016,2017,2018)
     5 *           Alain Greiner (2016,2017,2018,2019)
    66 *
    77 * Copyright (c)  UPMC Sorbonne Universites
     
    261261vfs_inode_t * inode = mapper->inode;
    262262vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
    263 // if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    264 // if( (page_id == 1) && (cycle > 10000000) )
     263if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    265264printk("\n[%s] enter for page %d in <%s> / cycle %d",
    266265__FUNCTION__, page_id, name, cycle );
     
    322321#if DEBUG_MAPPER_HANDLE_MISS
    323322cycle = (uint32_t)hal_get_cycles();
    324 // if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    325 // if( (page_id == 1) && (cycle > 10000000) )
     323if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    326324printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d",
    327325__FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle );
     
    442440            ppm_page_do_dirty( page_xp );
    443441            hal_copy_from_uspace( map_ptr , buf_ptr , page_count );
     442
     443putb(" in mapper_move_user()" , map_ptr , page_count );
     444
    444445        }
    445446
     
    645646
    646647}  // end mapper_remote_set_32()
     648
     649/////////////////////////////////////////
     650error_t mapper_sync( mapper_t *  mapper )
     651{
     652    page_t   * page;                // local pointer on current page descriptor
     653    xptr_t     page_xp;             // extended pointer on current page descriptor
     654    grdxt_t  * rt;                  // pointer on radix_tree descriptor
     655    uint32_t   start_key;           // start page index in mapper
     656    uint32_t   found_key;           // current page index in mapper
     657    error_t    error;
     658
     659#if DEBUG_MAPPER_SYNC
     660thread_t * this  = CURRENT_THREAD;
     661uint32_t   cycle = (uint32_t)hal_get_cycles();
     662char       name[CONFIG_VFS_MAX_NAME_LENGTH];
     663vfs_inode_get_name( XPTR( local_cxy , mapper->inode ) , name );
     664#endif
     665
     666    // get pointer on radix tree
     667    rt        = &mapper->rt;
     668
     669    // initialise loop variable
     670    start_key = 0;
     671
     672    // scan radix-tree until last page found
     673    while( 1 )
     674    {
     675        // get page descriptor from radix tree
     676        page = (page_t *)grdxt_get_first( rt , start_key , &found_key );
     677         
     678        if( page == NULL ) break;
     679
     680assert( (page->index == found_key ), __FUNCTION__, "wrong page descriptor index" );
     681assert( (page->order == 0),          __FUNCTION__, "mapper page order must be 0" );
     682
     683        // build extended pointer on page descriptor
     684        page_xp = XPTR( local_cxy , page );
     685
     686        // synchronize page if dirty
     687        if( (page->flags & PG_DIRTY) != 0 )
     688        {
     689
     690#if DEBUG_MAPPER_SYNC
     691if( cycle > DEBUG_MAPPER_SYNC )
     692printk("\n[%s] thread[%x,%x] synchonise page %d of <%s> to device\n",
     693__FUNCTION__, this->process->pid, this->trdid, page->index, name );
     694#endif
     695            // copy page to file system
     696            error = vfs_fs_move_page( page_xp , IOC_WRITE );
     697
     698            if( error )
     699            {
     700                printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n",
     701                __FUNCTION__, page->index );
     702                return -1;
     703            }
     704
     705            // remove page from PPM dirty list
     706            ppm_page_undo_dirty( page_xp );
     707        }
     708        else
     709        {
     710
     711#if DEBUG_MAPPER_SYNC
     712if( cycle > DEBUG_MAPPER_SYNC )
     713printk("\n[%s] thread[%x,%x] skip page %d for <%s>\n",
     714__FUNCTION__, this->process->pid, this->trdid, page->index, name );
     715#endif
     716        }
     717
     718        // update loop variable
     719        start_key = page->index + 1;
     720    }  // end while
     721
     722    return 0;
     723
     724}  // end mapper_sync()
    647725
    648726//////////////////////////////////////////////////
  • trunk/kernel/mm/mapper.h

    r614 r623  
    33 *
    44 * Authors   Mohamed Lamine Karaoui (2015)
    5  *           Alain Greiner (2016,2017,2018)
     5 *           Alain Greiner (2016,2017,2018,2019)
    66 *
    77 * Copyright (c)  UPMC Sorbonne Universites
     
    4848 *   "readers", and only one "writer".
    4949 * - A "reader" thread, calling the mapper_remote_get_page() function to get a page
    50  *   descriptor pointer from the page index in file, can be remote (running in any cluster).
     50 *   descriptor pointer from the page index in file, can be running in any cluster.
    5151 * - A "writer" thread, calling the mapper_handle_miss() function to handle a page miss
    5252 *   must be local (running in the mapper cluster).
    53  * - The vfs_mapper_move_page() function access the file system to handle a mapper miss,
     53 * - The vfs_fs_move_page() function access the file system to handle a mapper miss,
    5454 *   or update a dirty page on device.
    5555 * - The vfs_mapper_load_all() functions is used to load all pages of a directory
     
    6363 *
    6464 * TODO : the mapper being only used to implement the VFS cache(s), the mapper.c
    65  *        and mapper.h file should be trandfered to the vfs directory.
     65 *        and mapper.h file should be trandfered to the fs directory.
    6666 ******************************************************************************************/
    6767
     
    230230
    231231/*******************************************************************************************
     232 * This scans all pages present in the mapper identified by the <mapper> argument,
     233 * and synchronize all pages maked as dirty" on disk.
     234 * These pages are unmarked and removed from the local PPM dirty_list.
     235 * This function must be called by a local thread running in same cluster as the mapper.
     236 * A remote thread must call the RPC_MAPPER_SYNC function.
     237 *******************************************************************************************
     238 * @ mapper     : [in]  local pointer on local mapper.
     239 * @ returns 0 if success / return -1 if error.
     240 ******************************************************************************************/
     241error_t mapper_sync( mapper_t *  mapper );
     242
     243/*******************************************************************************************
    232244 * This debug function displays the content of a given page of a given mapper.
    233245 * - the mapper is identified by the <mapper_xp> argument.
  • trunk/kernel/mm/page.h

    r612 r623  
    4141#define PG_INIT             0x0001     // page descriptor has been initialised
    4242#define PG_RESERVED         0x0002     // cannot be allocated by PPM
    43 #define PG_FREE             0x0004     // page can be allocated by PPM
     43#define PG_FREE             0x0004     // page not yet allocated by PPM
    4444#define PG_DIRTY            0x0040     // page has been written
    4545#define PG_COW          0x0080     // page is copy-on-write
  • trunk/kernel/mm/ppm.h

    r611 r623  
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Alain Greiner    (2016,2017,2018)
     5 *          Alain Greiner    (2016,2017,2018,2019)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    3737 * This structure defines the Physical Pages Manager in a cluster.
    3838 * In each cluster, the physical memory bank starts at local physical address 0 and
    39  * contains an integer number of pages, defined by the <pages_nr> field in the
     39 * contains an integer number of small pages, defined by the <pages_nr> field in the
    4040 * boot_info structure. It is split in three parts:
    4141 *
    4242 * - the "kernel_code" section contains the kernel code, loaded by the boot-loader.
    43  *   It starts at PPN = 0 and the size is defined by the <pages_offset> field in the
    44  *   boot_info structure.
    45  * - the "pages_tbl" section contains the physical page descriptors array. It starts
    46  *   at PPN = pages_offset, and it contains one entry per small physical page in cluster.
     43 *   It starts at local PPN = 0 and the size is defined by the <pages_offset> field
     44 *   in the boot_info structure.
     45 * - the local "pages_tbl" section contains the physical page descriptors array.
     46 *   It starts at local PPN = pages_offset, and it contains one entry per small page.
    4747 *   It is created and initialized by the hal_ppm_create() function.
    4848 * - The "kernel_heap" section contains all physical pages that are are not in the
    49  *   kernel_code and pages_tbl sections, and that have not been reserved by the
    50  *   architecture specific bootloader. The reserved pages are defined in the boot_info
    51  *   structure.
     49 *   "kernel_code" and "pages_tbl" sections, and that have not been reserved.
     50 *   The reserved pages are defined in the boot_info structure.
    5251 *
    5352 * The main service provided by the PMM is the dynamic allocation of physical pages
     
    6059 *
    6160 * Another service is to register the dirty pages in a specific dirty_list, that is
    62  * also rooted in the PPM, in order to be able to save all dirty pages on disk.
     61 * also rooted in the PPM, in order to be able to synchronize all dirty pages on disk.
    6362 * This dirty list is protected by a specific remote_queuelock, because it can be
    6463 * modified by a remote thread, but it contains only local pages.
     
    198197 *   . if page already dirty => do nothing
    199198 *   . it page not dirty => set the PG_DIRTY flag and register page in PPM dirty list.
    200  * - it releases the busylock protcting the page flags.
     199 * - it releases the busylock protecting the page flags.
    201200 * - it releases the queuelock protecting the PPM dirty_list.
    202201 *****************************************************************************************
     
    214213 *   . if page not dirty => do nothing
    215214 *   . it page dirty => reset the PG_DIRTY flag and remove page from PPM dirty list.
    216  * - it releases the busylock protcting the page flags.
     215 * - it releases the busylock protecting the page flags.
    217216 * - it releases the queuelock protecting the PPM dirty_list.
    218217 *****************************************************************************************
  • trunk/kernel/mm/vmm.c

    r621 r623  
    5959{
    6060    error_t   error;
    61     vseg_t  * vseg_kentry;
    6261    vseg_t  * vseg_args;
    6362    vseg_t  * vseg_envs;
     
    9190(CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) ,
    9291"STACK zone too small\n");
    93 
    94     // register kentry vseg in VSL
    95     base = CONFIG_VMM_KENTRY_BASE << CONFIG_PPM_PAGE_SHIFT;
    96     size = CONFIG_VMM_KENTRY_SIZE << CONFIG_PPM_PAGE_SHIFT;
    97 
    98     vseg_kentry = vmm_create_vseg( process,
    99                                    VSEG_TYPE_CODE,
    100                                    base,
    101                                    size,
    102                                    0,             // file_offset unused
    103                                    0,             // file_size unused
    104                                    XPTR_NULL,     // mapper_xp unused
    105                                    local_cxy );
    106 
    107     if( vseg_kentry == NULL )
    108     {
    109         printk("\n[ERROR] in %s : cannot register kentry vseg\n", __FUNCTION__ );
    110         return -1;
    111     }
    112 
    113     vmm->kent_vpn_base = base;
    11492
    11593    // register args vseg in VSL
     
    162140
    163141    if( error )
    164     printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );
     142    {
     143        printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );
     144        return -1;
     145    }
    165146
    166147    // initialize GPT lock
    167148    remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );
    168149
    169     // architecture specic GPT initialisation
    170     // (For TSAR, identity map the kentry_vseg)
    171     error = hal_vmm_init( vmm );
    172 
    173     if( error )
    174     printk("\n[ERROR] in %s : cannot initialize GPT\n", __FUNCTION__ );
     150    // update process VMM with kernel vsegs
     151    error = hal_vmm_kernel_update( process );
     152
     153    if( error )
     154    {
     155        printk("\n[ERROR] in %s : cannot update GPT for kernel vsegs\n", __FUNCTION__ );
     156        return -1;
     157    }
    175158
    176159    // initialize STACK allocator
     
    326309    }
    327310
    328     // release physical memory allocated for vseg descriptor if no MMAP type
    329     if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) )
     311    // release physical memory allocated for vseg if no MMAP and no kernel type
     312    if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) &&
     313        (type != VSEG_TYPE_KCODE) && (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) )
    330314    {
    331315        vseg_free( vseg );
     
    606590    child_vmm->vsegs_nr = 0;
    607591
    608     // create child GPT
     592    // create the child GPT
    609593    error = hal_gpt_create( &child_vmm->gpt );
    610594
     
    639623#endif
    640624
    641         // all parent vsegs - but STACK - must be copied in child VSL
    642         if( type != VSEG_TYPE_STACK )
     625        // all parent vsegs - but STACK and kernel vsegs - must be copied in child VSL
     626        if( (type != VSEG_TYPE_STACK) && (type != VSEG_TYPE_KCODE) &&
     627            (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) )
    643628        {
    644629            // allocate memory for a new child vseg
     
    726711    remote_rwlock_rd_release( parent_lock_xp );
    727712
    728     // initialize child GPT (architecture specic)
    729     // => For TSAR, identity map the kentry_vseg
    730     error = hal_vmm_init( child_vmm );
     713    // update child VMM with kernel vsegs
     714    error = hal_vmm_kernel_update( child_process );
    731715
    732716    if( error )
    733717    {
    734         printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );
     718        printk("\n[ERROR] in %s : cannot update child VMM\n", __FUNCTION__ );
    735719        return -1;
    736720    }
     
    10981082        base = vpn_base << CONFIG_PPM_PAGE_SHIFT;
    10991083    }
    1100     else    // VSEG_TYPE_DATA or VSEG_TYPE_CODE
     1084    else    // VSEG_TYPE_DATA, VSEG_TYPE_CODE or KERNEL vseg
    11011085    {
    11021086        uint32_t vpn_min = base >> CONFIG_PPM_PAGE_SHIFT;
     
    11781162    xptr_t      lock_xp;    // extended pointer on lock protecting forks counter
    11791163    uint32_t    forks;      // actual number of pendinf forks
     1164    uint32_t    type;       // vseg type
    11801165
    11811166#if DEBUG_VMM_DELETE_VSEG
     
    11901175    process = cluster_get_local_process_from_pid( pid );
    11911176
    1192     if( process == NULL ) return;
     1177    if( process == NULL )
     1178    {
     1179        printk("\n[ERRORR] in %s : cannot get local process descriptor\n",
     1180        __FUNCTION__ );
     1181        return;
     1182    }
    11931183
    11941184    // get pointers on local process VMM an GPT
     
    11991189    vseg = vmm_vseg_from_vaddr( vmm , vaddr );
    12001190
    1201     if( vseg == NULL ) return;
    1202 
    1203     // loop to invalidate all vseg PTEs in GPT
     1191    if( vseg == NULL )
     1192    {
     1193        printk("\n[ERRORR] in %s : cannot get vseg descriptor\n",
     1194        __FUNCTION__ );
     1195        return;
     1196    }
     1197
     1198    // get relevant vseg infos
     1199    type    = vseg->type;
    12041200    vpn_min = vseg->vpn_base;
    12051201    vpn_max = vpn_min + vseg->vpn_size;
     1202
     1203    // loop to invalidate all vseg PTEs in GPT
    12061204        for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
    12071205    {
     
    12161214printk("- unmap vpn %x / ppn %x / vseg %s \n" , vpn , ppn, vseg_type_str(vseg->type) );
    12171215#endif
    1218 
    1219 // check small page
    1220 assert( (attr & GPT_SMALL) , "an user vseg must use small pages" );
    1221 
    12221216            // unmap GPT entry in local GPT
    12231217            hal_gpt_reset_pte( gpt , vpn );
    12241218
    1225             // handle pending forks counter if
    1226             // 1) not identity mapped
    1227             // 2) reference cluster
    1228             if( ((vseg->flags & VSEG_IDENT)  == 0) &&
    1229                 (GET_CXY( process->ref_xp ) == local_cxy) )
     1219            // the allocated page is not released to KMEM for kernel vseg
     1220            if( (type != VSEG_TYPE_KCODE) &&
     1221                (type != VSEG_TYPE_KDATA) &&
     1222                (type != VSEG_TYPE_KDEV ) )
    12301223            {
     1224
     1225// FIXME This code must be completely re-written, as the actual release must depend on
     1226// - the vseg type
     1227// - the reference cluster
     1228// - the page refcount and/or the forks counter
     1229
    12311230                // get extended pointer on physical page descriptor
    12321231                page_xp  = ppm_ppn2page( ppn );
     
    12381237                lock_xp  = XPTR( page_cxy , &page_ptr->lock );
    12391238
     1239                // get the lock protecting the page
    12401240                remote_busylock_acquire( lock_xp );
     1241
    12411242                // get pending forks counter
    12421243                forks = hal_remote_l32( forks_xp );
     1244
    12431245                if( forks )  // decrement pending forks counter
    12441246                {
     
    12631265#endif
    12641266                }
     1267
     1268                // release the lock protecting the page
    12651269                remote_busylock_release( lock_xp );
    12661270            }
     
    13111315    // return failure
    13121316    remote_rwlock_rd_release( lock_xp );
     1317
    13131318    return NULL;
    13141319
     
    13251330    vpn_t     vpn_max;
    13261331
     1332#if DEBUG_VMM_RESIZE_VSEG
     1333uint32_t   cycle = (uint32_t)hal_get_cycles();
     1334thread_t * this  = CURRENT_THREAD;
     1335if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1336printk("\n[%s] thread[%x,%x] enter / process %x / base %x / size %d / cycle %d\n",
     1337__FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle );
     1338#endif
     1339
    13271340    // get pointer on process VMM
    13281341    vmm_t * vmm = &process->vmm;
     
    13341347        vseg_t * vseg = vmm_vseg_from_vaddr( vmm , base );
    13351348
    1336         if( vseg == NULL)  return EINVAL;
    1337 
    1338     // get extended pointer on VSL lock
    1339     xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
    1340 
    1341     // get lock protecting VSL
    1342         remote_rwlock_wr_acquire( lock_xp );
    1343 
     1349        if( vseg == NULL)
     1350    {
     1351        printk("\n[ERROR] in %s : vseg(%x,%d) not found\n",
     1352        __FUNCTION__, base , size );
     1353        return -1;
     1354    }
     1355
     1356    // resize depends on unmapped region base and size
    13441357        if( (vseg->min > addr_min) || (vseg->max < addr_max) )        // not included in vseg
    13451358    {
     1359        printk("\n[ERROR] in %s : unmapped region[%x->%x[ not included in vseg[%x->%x[\n",
     1360        __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
     1361
    13461362        error = -1;
    13471363    }
    13481364        else if( (vseg->min == addr_min) && (vseg->max == addr_max) )  // vseg must be deleted
    13491365    {
     1366
     1367#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1368if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1369printk("\n[%s] unmapped region[%x->%x[ equal vseg[%x->%x[\n",
     1370__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
     1371#endif
    13501372        vmm_delete_vseg( process->pid , vseg->min );
     1373
     1374#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1375if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1376printk("\n[%s] thread[%x,%x] deleted vseg\n",
     1377__FUNCTION__, this->process->pid, this->trdid );
     1378#endif
    13511379        error = 0;
    13521380    }
    13531381        else if( vseg->min == addr_min )                               // vseg must be resized
    13541382    {
    1355         // update vseg base address
     1383
     1384#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1385if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1386printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
     1387__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
     1388#endif
     1389        // update vseg min address
    13561390        vseg->min = addr_max;
    13571391
     
    13611395        vseg->vpn_base = vpn_min;
    13621396        vseg->vpn_size = vpn_max - vpn_min + 1;
     1397
     1398#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1399if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1400printk("\n[%s] thread[%x,%x] changed vseg_min\n",
     1401__FUNCTION__, this->process->pid, this->trdid );
     1402#endif
    13631403        error = 0;
    13641404    }
    13651405        else if( vseg->max == addr_max )                              // vseg must be resized
    13661406    {
     1407
     1408#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1409if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1410printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
     1411__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
     1412#endif
    13671413        // update vseg max address
    13681414        vseg->max = addr_min;
     
    13731419        vseg->vpn_base = vpn_min;
    13741420        vseg->vpn_size = vpn_max - vpn_min + 1;
     1421
     1422#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1423if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1424printk("\n[%s] thread[%x,%x] changed vseg_max\n",
     1425__FUNCTION__, this->process->pid, this->trdid );
     1426#endif
    13751427        error = 0;
     1428
    13761429    }
    13771430    else                                                          // vseg cut in three regions
    13781431    {
     1432
     1433#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1434if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1435printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
     1436__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
     1437#endif
    13791438        // resize existing vseg
    13801439        vseg->max = addr_min;
     
    13961455                               vseg->cxy );
    13971456
    1398         if( new == NULL ) error = EINVAL;
     1457#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1458if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1459printk("\n[%s] thread[%x,%x] replaced vseg by two smal vsegs\n",
     1460__FUNCTION__, this->process->pid, this->trdid );
     1461#endif
     1462
     1463        if( new == NULL ) error = -1;
    13991464        else              error = 0;
    14001465    }
    14011466
    1402     // release VMM lock
    1403         remote_rwlock_wr_release( lock_xp );
     1467#if DEBUG_VMM_RESIZE_VSEG
     1468if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1469printk("\n[%s] thread[%x,%x] exit / process %x / base %x / size %d / cycle %d\n",
     1470__FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle );
     1471#endif
    14041472
    14051473        return error;
  • trunk/kernel/mm/vmm.h

    r614 r623  
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
    55 *           Mohamed Lamine Karaoui (2015)
    6  *           Alain Greiner (2016,2017,2018)
     6 *           Alain Greiner (2016,2017,2018,2019)
    77 *
    88 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/mm/vseg.c

    r595 r623  
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
    55 *           Mohamed Lamine Karaoui (2015)
    6  *           Alain Greiner (2016,2018,2019)
     6 *           Alain Greiner (2016,2017,2018,2019)
    77 *
    88 * Copyright (c) UPMC Sorbonne Universites
     
    5555        else if( vseg_type == VSEG_TYPE_FILE   ) return "FILE";
    5656        else if( vseg_type == VSEG_TYPE_REMOTE ) return "REMO";
     57        else if( vseg_type == VSEG_TYPE_KCODE  ) return "KCOD";
     58        else if( vseg_type == VSEG_TYPE_KDATA  ) return "KDAT";
     59        else if( vseg_type == VSEG_TYPE_KDEV   ) return "KDEV";
    5760    else                                     return "undefined";
    5861}
     
    142145                      VSEG_CACHE   ;
    143146    }
     147    else if( type == VSEG_TYPE_KCODE )
     148    {
     149        vseg->flags = VSEG_EXEC    |
     150                      VSEG_CACHE   |
     151                      VSEG_PRIVATE ;
     152    }
     153    else if( type == VSEG_TYPE_KDATA )
     154    {
     155        vseg->flags = VSEG_CACHE   |
     156                      VSEG_WRITE   ;
     157    }
     158    else if( type == VSEG_TYPE_KDEV )
     159    {
     160        vseg->flags = VSEG_WRITE   ;
     161    }
    144162    else
    145163    {
     
    158176
    159177    // initialize vseg with remote_read access
    160     vseg->type        =           hal_remote_l32 ( XPTR( cxy , &ptr->type        ) );
     178    vseg->type        =           hal_remote_l32( XPTR( cxy , &ptr->type        ) );
    161179    vseg->min         = (intptr_t)hal_remote_lpt( XPTR( cxy , &ptr->min         ) );
    162180    vseg->max         = (intptr_t)hal_remote_lpt( XPTR( cxy , &ptr->max         ) );
    163     vseg->vpn_base    =           hal_remote_l32 ( XPTR( cxy , &ptr->vpn_base    ) );
    164     vseg->vpn_size    =           hal_remote_l32 ( XPTR( cxy , &ptr->vpn_size    ) );
    165     vseg->flags       =           hal_remote_l32 ( XPTR( cxy , &ptr->flags       ) );
    166     vseg->file_offset =           hal_remote_l32 ( XPTR( cxy , &ptr->file_offset ) );
    167     vseg->file_size   =           hal_remote_l32 ( XPTR( cxy , &ptr->file_size   ) );
     181    vseg->vpn_base    =           hal_remote_l32( XPTR( cxy , &ptr->vpn_base    ) );
     182    vseg->vpn_size    =           hal_remote_l32( XPTR( cxy , &ptr->vpn_size    ) );
     183    vseg->flags       =           hal_remote_l32( XPTR( cxy , &ptr->flags       ) );
     184    vseg->file_offset =           hal_remote_l32( XPTR( cxy , &ptr->file_offset ) );
     185    vseg->file_size   =           hal_remote_l32( XPTR( cxy , &ptr->file_size   ) );
    168186        vseg->mapper_xp   = (xptr_t)  hal_remote_l64( XPTR( cxy , &ptr->mapper_xp   ) );
    169187
    170188    switch (vseg->type)
    171189    {
    172         case VSEG_TYPE_DATA:
     190        case VSEG_TYPE_DATA:      // unused
    173191        {
    174192            vseg->cxy = 0xffff;
    175193            break;
    176194        }
    177         case VSEG_TYPE_CODE:
     195        case VSEG_TYPE_CODE:      // always local
    178196        case VSEG_TYPE_STACK:
     197        case VSEG_TYPE_KCODE:
    179198        {
    180199            vseg->cxy = local_cxy;
    181200            break;
    182201        }
    183         case VSEG_TYPE_ANON:
     202        case VSEG_TYPE_ANON:      // intrinsic
    184203        case VSEG_TYPE_FILE:
    185204        case VSEG_TYPE_REMOTE:
     205        case VSEG_TYPE_KDEV:
     206        case VSEG_TYPE_KDATA:
    186207        {
    187208            vseg->cxy = (cxy_t) hal_remote_l32( XPTR(cxy, &ptr->cxy) );
  • trunk/kernel/mm/vseg.h

    r611 r623  
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
    55 *           Mohamed Lamine Karaoui (2015)
    6  *           Alain Greiner (2016)
     6 *           Alain Greiner (2016,2017,2018,2019)
    77 *
    88 * Copyright (c) UPMC Sorbonne Universites
     
    3535
    3636/*******************************************************************************************
    37  * This enum defines the vseg types for an user process.
     37 * This enum defines the vseg types.
     38 * Note : the KDATA and KDEV types are not used by the TSAR HAL, because the accesses
     39 *        to kernel data or kernel devices are done through the DATA extension address
     40 *        register, but these types are probably required by the I86 HAL [AG].
    3841 ******************************************************************************************/
    3942
    4043typedef enum
    4144{
    42     VSEG_TYPE_CODE   = 0,          /*! executable user code   / private / localized       */
    43     VSEG_TYPE_DATA   = 1,          /*! initialized user data  / public  / distributed     */
    44     VSEG_TYPE_STACK  = 2,          /*! execution user stack   / private / localized       */
    45     VSEG_TYPE_ANON   = 3,          /*! anonymous mmap         / public  / localized       */
    46     VSEG_TYPE_FILE   = 4,          /*! file mmap              / public  / localized       */
    47     VSEG_TYPE_REMOTE = 5,          /*! remote mmap            / public  / localized       */
     45    VSEG_TYPE_CODE   = 0,          /*! executable user code     / private / localized     */
     46    VSEG_TYPE_DATA   = 1,          /*! initialized user data    / public  / distributed   */
     47    VSEG_TYPE_STACK  = 2,          /*! execution user stack     / private / localized     */
     48    VSEG_TYPE_ANON   = 3,          /*! anonymous mmap           / public  / localized     */
     49    VSEG_TYPE_FILE   = 4,          /*! file mmap                / public  / localized     */
     50    VSEG_TYPE_REMOTE = 5,          /*! remote mmap              / public  / localized     */
     51
     52    VSEG_TYPE_KCODE  = 6,          /*! executable kernel code   / private / localized     */
     53    VSEG_TYPE_KDATA  = 7,          /*! initialized kernel data  / private / localized     */
     54    VSEG_TYPE_KDEV   = 8,          /*! kernel peripheral device / public  / localized     */
    4855}
    4956vseg_type_t;
     
    6067#define VSEG_PRIVATE  0x0010       /*! should not be accessed from another cluster        */
    6168#define VSEG_DISTRIB  0x0020       /*! physically distributed on all clusters             */
    62 #define VSEG_IDENT    0x0040       /*! identity mapping                                   */
    6369
    6470/*******************************************************************************************
  • trunk/kernel/syscalls/shared_include/shared_almos.h

    r611 r623  
    5353    DISPLAY_BUSYLOCKS         = 8,
    5454    DISPLAY_MAPPER            = 9,
     55    DISPLAY_BARRIER           = 10,
    5556}
    5657display_type_t;
  • trunk/kernel/syscalls/shared_include/shared_mman.h

    r594 r623  
    22 * shred_mman.h - Shared structures & mnemonics used by the <mman.h> user library.
    33 *
    4  * Author  Alain Greiner (2016,2017,2018)
     4 * Author  Alain Greiner (2016,2017,2018,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2626
    2727/*******************************************************************************************
    28  * These structure are used by the mmap() syscall().
     28 * This structure is used by the mmap() syscall().
    2929 ******************************************************************************************/
    3030
  • trunk/kernel/syscalls/sys_creat.c

    r457 r623  
    22 * sys_creat.c - create a file
    33 *
    4  * Author    Alain Greiner (2016,2017)
     4 * Author    Alain Greiner (2016,2017,2017,2019)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/syscalls/sys_display.c

    r619 r623  
    3131#include <string.h>
    3232#include <shared_syscalls.h>
     33#include <remote_barrier.h>
    3334#include <vfs.h>
    3435#include <mapper.h>
     
    5354    else if( type == DISPLAY_BUSYLOCKS         ) return "BUSYLOCKS";
    5455    else if( type == DISPLAY_MAPPER            ) return "MAPPER";
     56    else if( type == DISPLAY_BARRIER           ) return "BARRIER";
    5557    else                                         return "undefined";
    5658}
     
    8183#endif
    8284
    83     ////////////////////////////
    84     if( type == DISPLAY_STRING )
     85    switch( type )
    8586    {
    86         char      kbuf[512];
    87         uint32_t  length;
    88 
    89         char    * string = (char *)arg0;
    90 
    91         // check string in user space
    92         error = vmm_get_vseg( process , (intptr_t)arg0 , &vseg );
    93 
    94         if( error )
    95         {
     87        ////////////////////
     88        case DISPLAY_STRING:
     89        {
     90            char      kbuf[512];
     91            uint32_t  length;
     92
     93            char    * string = (char *)arg0;
     94
     95            // check string in user space
     96            error = vmm_get_vseg( process , (intptr_t)arg0 , &vseg );
     97
     98            if( error )
     99            {
    96100
    97101#if DEBUG_SYSCALLS_ERROR
     
    99103__FUNCTION__ , (intptr_t)arg0 );
    100104#endif
     105                this->errno = EINVAL;
     106                return -1;
     107            }
     108
     109            // ckeck string length
     110            length = hal_strlen_from_uspace( string );
     111
     112            if( length >= 512 )
     113            {
     114
     115#if DEBUG_SYSCALLS_ERROR
     116printk("\n[ERROR] in %s for STRING : string length %d too large\n",
     117__FUNCTION__ , length );
     118#endif
     119                this->errno = EINVAL;
     120                return -1;
     121            }
     122
     123            // copy string to kernel space
     124            hal_strcpy_from_uspace( kbuf , string , 512 );
     125
     126            // print message on TXT0 kernel terminal
     127            printk("\n%s / cycle %d\n", kbuf, (uint32_t)hal_get_cycles() );
     128
     129            break;
     130        }
     131        /////////////////
     132        case DISPLAY_VMM:
     133        {
     134            cxy_t cxy = (cxy_t)arg0;
     135            pid_t pid = (pid_t)arg1;
     136
     137            // check cxy argument
     138                if( cluster_is_undefined( cxy ) )
     139            {
     140
     141#if DEBUG_SYSCALLS_ERROR
     142printk("\n[ERROR] in %s for VMM : process %x in cluster %x not found\n",
     143__FUNCTION__ , pid , cxy );
     144#endif
     145                this->errno = EINVAL;
     146                return -1;
     147            }
     148
     149            // get extended pointer on process PID in cluster CXY
     150            xptr_t process_xp = cluster_get_process_from_pid_in_cxy( cxy , pid );
     151
     152                if( process_xp == XPTR_NULL )
     153            {
     154
     155#if DEBUG_SYSCALLS_ERROR
     156printk("\n[ERROR] in %s for VMM : process %x in cluster %x not found\n",
     157__FUNCTION__ , pid , cxy );
     158#endif
     159                this->errno = EINVAL;
     160                return -1;
     161            }
     162
     163            // get local pointer on process
     164            process_t * process = (process_t *)GET_PTR( process_xp );
     165
     166            // call kernel function
     167            if( cxy == local_cxy )
     168            {
     169                    vmm_display( process , true );
     170            }
     171            else
     172            {
     173                rpc_vmm_display_client( cxy , process , true );
     174            }
     175
     176            break;
     177        }
     178        ///////////////////
     179        case DISPLAY_SCHED:
     180        {
     181            cxy_t cxy = (cxy_t)arg0;
     182            lid_t lid = (lid_t)arg1;
     183
     184            // check cxy argument
     185                if( cluster_is_undefined( cxy ) )
     186            {
     187
     188#if DEBUG_SYSCALLS_ERROR
     189printk("\n[ERROR] in %s for SCHED : illegal cxy argument %x\n",
     190__FUNCTION__ , cxy );
     191#endif
     192                this->errno = EINVAL;
     193                return -1;
     194            }
     195
     196            // check lid argument
     197            if( lid >= LOCAL_CLUSTER->cores_nr )
     198            {
     199
     200#if DEBUG_SYSCALLS_ERROR
     201printk("\n[ERROR] in %s for SCHED : illegal lid argument %x\n",
     202__FUNCTION__ , lid );
     203#endif
     204                this->errno = EINVAL;
     205                return -1;
     206            }
     207
     208            if( cxy == local_cxy )
     209            {
     210                    sched_display( lid );
     211            }
     212            else
     213            {
     214                sched_remote_display( cxy , lid );
     215            }
     216
     217            break;
     218        }
     219        ///////////////////////////////
     220        case DISPLAY_CLUSTER_PROCESSES:
     221        {
     222            cxy_t  cxy   = (cxy_t)arg0;
     223            bool_t owned = (bool_t)arg1;
     224
     225            // check cxy argument
     226                if( cluster_is_undefined( cxy ) )
     227            {
     228
     229#if DEBUG_SYSCALLS_ERROR
     230printk("\n[ERROR] in %s for CLUSTER_PROCESSES : illegal cxy argument %x\n",
     231__FUNCTION__ , cxy );
     232#endif
     233                this->errno = EINVAL;
     234                return -1;
     235            }
     236
     237            cluster_processes_display( cxy , owned );
     238
     239            break;
     240        }
     241        /////////////////
     242        case DISPLAY_VFS:
     243        {
     244            vfs_display( process->vfs_root_xp );
     245
     246            break;
     247        }
     248        ///////////////////
     249        case DISPLAY_CHDEV:
     250        {
     251            chdev_dir_display();
     252
     253            break;
     254        }
     255        ///////////////////////////
     256        case DISPLAY_TXT_PROCESSES:
     257        {
     258            uint32_t txt_id = (uint32_t)arg0;
     259
     260            // check argument
     261                if( txt_id >= LOCAL_CLUSTER->nb_txt_channels )
     262            {
     263
     264#if DEBUG_SYSCALLS_ERROR
     265printk("\n[ERROR] in %s for TXT_PROCESSES : illegal txt_id argument %d\n",
     266__FUNCTION__ , txt_id );
     267#endif
     268                this->errno = EINVAL;
     269                return -1;
     270            }
     271
     272            process_txt_display( txt_id );
     273
     274            break;
     275        }
     276        //////////////////
     277        case DISPLAY_DQDT:
     278        {
     279            dqdt_display();
     280
     281            break;
     282        }
     283        ///////////////////////
     284        case DISPLAY_BUSYLOCKS:
     285        {
     286            pid_t   pid   = (pid_t)arg0;
     287            trdid_t trdid = (trdid_t)arg1;
     288
     289            // get extended pointer on target thread
     290            xptr_t thread_xp = thread_get_xptr( pid , trdid );
     291
     292            if( thread_xp == XPTR_NULL )
     293            {
     294
     295#if DEBUG_SYSCALLS_ERROR
     296printk("\n[ERROR] in %s for BUSYLOCKS : thread[%x,%x] not found\n",
     297__FUNCTION__ , pid, trdid );
     298#endif
     299                this->errno = EINVAL;
     300                return -1;
     301            }
     302
     303            thread_display_busylocks( thread_xp , __FUNCTION__ );
     304
     305            break;
     306        }
     307        ////////////////////
     308        case DISPLAY_MAPPER:
     309        {
     310            xptr_t        root_inode_xp;
     311            xptr_t        inode_xp;
     312            cxy_t         inode_cxy;
     313            vfs_inode_t * inode_ptr;
     314            xptr_t        mapper_xp;
     315            mapper_t    * mapper_ptr;
     316
     317            char          kbuf[CONFIG_VFS_MAX_PATH_LENGTH];
     318
     319            char     * path    = (char *)arg0;
     320            uint32_t   page_id = (uint32_t)arg1;
     321            uint32_t   nbytes  = (uint32_t)arg2;
     322
     323            // check pathname length
     324            if( hal_strlen_from_uspace( path ) >= CONFIG_VFS_MAX_PATH_LENGTH )
     325            {
     326
     327#if DEBUG_SYSCALLS_ERROR
     328printk("\n[ERROR] in %s for MAPPER : pathname too long\n",
     329 __FUNCTION__ );
     330#endif
     331                this->errno = ENFILE;
     332                return -1;
     333            }
     334
     335            // copy pathname in kernel space
     336            hal_strcpy_from_uspace( kbuf , path , CONFIG_VFS_MAX_PATH_LENGTH );
     337
     338            // compute root inode for pathname
     339            if( kbuf[0] == '/' )                        // absolute path
     340            {
     341                // use extended pointer on VFS root inode
     342                root_inode_xp = process->vfs_root_xp;
     343            }
     344            else                                        // relative path
     345            {
     346                // get cluster and local pointer on reference process
     347                xptr_t      ref_xp  = process->ref_xp;
     348                process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
     349                cxy_t       ref_cxy = GET_CXY( ref_xp );
     350
     351                // get extended pointer on CWD inode
     352                root_inode_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->cwd_xp ) );
     353            }
     354
     355            // get extended pointer on target inode
     356            error = vfs_lookup( root_inode_xp,
     357                                kbuf,
     358                                0,
     359                                &inode_xp,
     360                                NULL );
     361            if( error )
     362                {
     363
     364#if DEBUG_SYSCALLS_ERROR
     365printk("\n[ERROR] in %s for MAPPER : cannot found inode <%s>\n",
     366__FUNCTION__ , kbuf );
     367#endif
     368                        this->errno = ENFILE;
     369                        return -1;
     370                }
     371   
     372            // get target inode cluster and local pointer
     373            inode_cxy = GET_CXY( inode_xp );
     374            inode_ptr = GET_PTR( inode_xp );
     375
     376            // get extended pointer on target mapper
     377            mapper_ptr = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) );
     378            mapper_xp  = XPTR( inode_cxy , mapper_ptr );
     379
     380            // display mapper
     381            error = mapper_display_page( mapper_xp , page_id , nbytes );
     382
     383            if( error )
     384                {
     385
     386#if DEBUG_SYSCALLS_ERROR
     387printk("\n[ERROR] in %s for MAPPER : cannot display page %d\n",
     388__FUNCTION__ , page_id );
     389#endif
     390                        this->errno = ENFILE;
     391                        return -1;
     392                }
     393
     394            break;
     395        }
     396        /////////////////////
     397        case DISPLAY_BARRIER:
     398        {
     399            // get target process PID
     400            pid_t pid = (pid_t)arg0;
     401
     402            // get pointers on owner process
     403            xptr_t      process_xp  = cluster_get_reference_process_from_pid( pid );
     404            process_t * process_ptr = GET_PTR( process_xp );
     405            cxy_t       process_cxy = GET_CXY( process_xp );
     406
     407            if( process_xp == XPTR_NULL )
     408            {
     409
     410#if DEBUG_SYSCALLS_ERROR
     411printk("\n[ERROR] in %s for BARRIER : process %x not found\n",
     412__FUNCTION__ , pid );
     413#endif
     414                this->errno = EINVAL;
     415                return -1;
     416            }
     417
     418            // get extended pointer on root of list of barriers
     419            xptr_t root_xp = XPTR( process_cxy , &process_ptr->barrier_root );
     420
     421            if( xlist_is_empty( root_xp ) )
     422            {
     423
     424#if DEBUG_SYSCALLS_ERROR
     425printk("\n[ERROR] in %s for BARRIER : no registered barrier in process %x\n",
     426__FUNCTION__ , pid );
     427#endif
     428                this->errno = EINVAL;
     429                return -1;
     430            }
     431
     432            // get extended pointer on first registered generic barrier descriptor
     433            xptr_t gen_barrier_xp  = XLIST_FIRST( root_xp , generic_barrier_t , list );
     434
     435            // display barrier state
     436            generic_barrier_display( gen_barrier_xp );
     437
     438            break;
     439        }
     440        ////////
     441        default:
     442        {
     443
     444#if DEBUG_SYSCALLS_ERROR
     445printk("\n[ERROR] in %s : undefined display type %d\n",
     446        __FUNCTION__ , type );
     447#endif
    101448            this->errno = EINVAL;
    102449            return -1;
    103450        }
    104 
    105         // ckeck string length
    106         length = hal_strlen_from_uspace( string );
    107 
    108         if( length >= 512 )
    109         {
    110 
    111 #if DEBUG_SYSCALLS_ERROR
    112 printk("\n[ERROR] in %s for STRING : string length %d too large\n",
    113 __FUNCTION__ , length );
    114 #endif
    115             this->errno = EINVAL;
    116             return -1;
    117         }
    118 
    119         // copy string to kernel space
    120         hal_strcpy_from_uspace( kbuf , string , 512 );
    121 
    122         // print message on TXT0 kernel terminal
    123         printk("\n%s / cycle %d\n", kbuf, (uint32_t)hal_get_cycles() );
    124     }
    125     //////////////////////////////
    126     else if( type == DISPLAY_VMM )
    127     {
    128         cxy_t cxy = (cxy_t)arg0;
    129         pid_t pid = (pid_t)arg1;
    130 
    131         // check cxy argument
    132             if( cluster_is_undefined( cxy ) )
    133         {
    134 
    135 #if DEBUG_SYSCALLS_ERROR
    136 printk("\n[ERROR] in %s for VMM : process %x in cluster %x not found\n",
    137 __FUNCTION__ , pid , cxy );
    138 #endif
    139             this->errno = EINVAL;
    140             return -1;
    141         }
    142 
    143         // get extended pointer on process PID in cluster CXY
    144         xptr_t process_xp = cluster_get_process_from_pid_in_cxy( cxy , pid );
    145 
    146             if( process_xp == XPTR_NULL )
    147         {
    148 
    149 #if DEBUG_SYSCALLS_ERROR
    150 printk("\n[ERROR] in %s for VMM : process %x in cluster %x not found\n",
    151 __FUNCTION__ , pid , cxy );
    152 #endif
    153             this->errno = EINVAL;
    154             return -1;
    155         }
    156 
    157         // get local pointer on process
    158         process_t * process = (process_t *)GET_PTR( process_xp );
    159 
    160         // call kernel function
    161         if( cxy == local_cxy )
    162         {
    163                 vmm_display( process , true );
    164         }
    165         else
    166         {
    167             rpc_vmm_display_client( cxy , process , true );
    168         }
    169     }
    170     ////////////////////////////////
    171     else if( type == DISPLAY_SCHED )
    172     {
    173         cxy_t cxy = (cxy_t)arg0;
    174         lid_t lid = (lid_t)arg1;
    175 
    176         // check cxy argument
    177             if( cluster_is_undefined( cxy ) )
    178         {
    179 
    180 #if DEBUG_SYSCALLS_ERROR
    181 printk("\n[ERROR] in %s for SCHED : illegal cxy argument %x\n",
    182 __FUNCTION__ , cxy );
    183 #endif
    184             this->errno = EINVAL;
    185             return -1;
    186         }
    187 
    188         // check lid argument
    189         if( lid >= LOCAL_CLUSTER->cores_nr )
    190         {
    191 
    192 #if DEBUG_SYSCALLS_ERROR
    193 printk("\n[ERROR] in %s for SCHED : illegal lid argument %x\n",
    194 __FUNCTION__ , lid );
    195 #endif
    196             this->errno = EINVAL;
    197             return -1;
    198         }
    199 
    200         if( cxy == local_cxy )
    201         {
    202                 sched_display( lid );
    203         }
    204         else
    205         {
    206             sched_remote_display( cxy , lid );
    207         }
    208     }
    209     ////////////////////////////////////////////
    210     else if( type == DISPLAY_CLUSTER_PROCESSES )
    211     {
    212         cxy_t  cxy   = (cxy_t)arg0;
    213         bool_t owned = (bool_t)arg1;
    214 
    215         // check cxy argument
    216             if( cluster_is_undefined( cxy ) )
    217         {
    218 
    219 #if DEBUG_SYSCALLS_ERROR
    220 printk("\n[ERROR] in %s for CLUSTER_PROCESSES : illegal cxy argument %x\n",
    221 __FUNCTION__ , cxy );
    222 #endif
    223             this->errno = EINVAL;
    224             return -1;
    225         }
    226 
    227         cluster_processes_display( cxy , owned );
    228     }
    229     //////////////////////////////
    230     else if( type == DISPLAY_VFS )
    231     {
    232         vfs_display( process->vfs_root_xp );
    233     }
    234     ////////////////////////////////
    235     else if( type == DISPLAY_CHDEV )
    236     {
    237         chdev_dir_display();
    238     }
    239     ////////////////////////////////////////
    240     else if( type == DISPLAY_TXT_PROCESSES )
    241     {
    242         uint32_t txt_id = (uint32_t)arg0;
    243 
    244         // check argument
    245             if( txt_id >= LOCAL_CLUSTER->nb_txt_channels )
    246         {
    247 
    248 #if DEBUG_SYSCALLS_ERROR
    249 printk("\n[ERROR] in %s for TXT_PROCESSES : illegal txt_id argument %d\n",
    250 __FUNCTION__ , txt_id );
    251 #endif
    252             this->errno = EINVAL;
    253             return -1;
    254         }
    255 
    256         process_txt_display( txt_id );
    257     }
    258     ///////////////////////////////
    259     else if( type == DISPLAY_DQDT )
    260     {
    261         dqdt_display();
    262     }
    263     ////////////////////////////////////
    264     else if( type == DISPLAY_BUSYLOCKS )
    265     {
    266         pid_t   pid   = (pid_t)arg0;
    267         trdid_t trdid = (trdid_t)arg1;
    268 
    269         // get extended pointer on target thread
    270         xptr_t thread_xp = thread_get_xptr( pid , trdid );
    271 
    272         if( thread_xp == XPTR_NULL )
    273         {
    274 
    275 #if DEBUG_SYSCALLS_ERROR
    276 printk("\n[ERROR] in %s for BUSYLOCKS : thread[%x,%x] not found\n",
    277 __FUNCTION__ , pid, trdid );
    278 #endif
    279             this->errno = EINVAL;
    280             return -1;
    281         }
    282 
    283         thread_display_busylocks( thread_xp , __FUNCTION__ );
    284     }
    285     /////////////////////////////////
    286     else if( type == DISPLAY_MAPPER )
    287     {
    288         xptr_t        root_inode_xp;
    289         xptr_t        inode_xp;
    290         cxy_t         inode_cxy;
    291         vfs_inode_t * inode_ptr;
    292         xptr_t        mapper_xp;
    293         mapper_t    * mapper_ptr;
    294 
    295         char          kbuf[CONFIG_VFS_MAX_PATH_LENGTH];
    296 
    297         char     * path    = (char *)arg0;
    298         uint32_t   page_id = (uint32_t)arg1;
    299         uint32_t   nbytes  = (uint32_t)arg2;
    300 
    301         // check pathname length
    302         if( hal_strlen_from_uspace( path ) >= CONFIG_VFS_MAX_PATH_LENGTH )
    303         {
    304 
    305 #if DEBUG_SYSCALLS_ERROR
    306 printk("\n[ERROR] in %s for MAPPER : pathname too long\n",
    307  __FUNCTION__ );
    308 #endif
    309             this->errno = ENFILE;
    310             return -1;
    311         }
    312 
    313         // copy pathname in kernel space
    314         hal_strcpy_from_uspace( kbuf , path , CONFIG_VFS_MAX_PATH_LENGTH );
    315 
    316         // compute root inode for pathname
    317         if( kbuf[0] == '/' )                        // absolute path
    318         {
    319             // use extended pointer on VFS root inode
    320             root_inode_xp = process->vfs_root_xp;
    321         }
    322         else                                        // relative path
    323         {
    324             // get cluster and local pointer on reference process
    325             xptr_t      ref_xp  = process->ref_xp;
    326             process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );
    327             cxy_t       ref_cxy = GET_CXY( ref_xp );
    328 
    329             // use extended pointer on CWD inode
    330             root_inode_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->cwd_xp ) );
    331         }
    332 
    333         // get extended pointer on target inode
    334         error = vfs_lookup( root_inode_xp,
    335                             kbuf,
    336                             0,
    337                             &inode_xp,
    338                             NULL );
    339         if( error )
    340             {
    341 
    342 #if DEBUG_SYSCALLS_ERROR
    343 printk("\n[ERROR] in %s for MAPPER : cannot found inode <%s>\n",
    344 __FUNCTION__ , kbuf );
    345 #endif
    346                     this->errno = ENFILE;
    347                     return -1;
    348             }
    349    
    350         // get target inode cluster and local pointer
    351         inode_cxy = GET_CXY( inode_xp );
    352         inode_ptr = GET_PTR( inode_xp );
    353 
    354         // get extended pointer on target mapper
    355         mapper_ptr = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) );
    356         mapper_xp  = XPTR( inode_cxy , mapper_ptr );
    357 
    358         // display mapper
    359         error = mapper_display_page( mapper_xp , page_id , nbytes );
    360 
    361         if( error )
    362             {
    363 
    364 #if DEBUG_SYSCALLS_ERROR
    365 printk("\n[ERROR] in %s for MAPPER : cannot display page %d\n",
    366 __FUNCTION__ , page_id );
    367 #endif
    368                     this->errno = ENFILE;
    369                     return -1;
    370             }
    371     }
    372     ////
    373     else
    374     {
    375 
    376 #if DEBUG_SYSCALLS_ERROR
    377 printk("\n[ERROR] in %s : undefined display type %d\n",
    378         __FUNCTION__ , type );
    379 #endif
    380         this->errno = EINVAL;
    381         return -1;
    382     }
     451    }  // end switch on type
    383452
    384453#if (DEBUG_SYS_DISPLAY || CONFIG_INSTRUMENTATION_SYSCALLS)
  • trunk/kernel/syscalls/sys_mmap.c

    r611 r623  
    5656
    5757#if DEBUG_SYS_MMAP
    58 tm_start = hal_get_cycles();
    59 if ( DEBUG_SYS_MMAP < tm_start )
     58if( DEBUG_SYS_MMAP < tm_start )
    6059printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
    6160__FUNCTION__, process->pid, this->trdid, (uint32_t)tm_start );
     
    314313#endif
    315314
     315#if CONFIG_INSTRUMENTATION_SYSCALLS
     316hal_atomic_add( &syscalls_cumul_cost[SYS_MMAP] , tm_end - tm_start );
     317hal_atomic_add( &syscalls_occurences[SYS_MMAP] , 1 );
     318#endif
     319
    316320#if DEBUG_SYS_MMAP
    317 if ( DEBUG_SYS_MMAP < tm_start )
     321if ( DEBUG_SYS_MMAP < tm_end )
    318322printk("\n[%s] thread[%x,%x] exit / %s / cxy %x / base %x / size %d / cycle %d\n",
    319323__FUNCTION__, process->pid, this->trdid,
  • trunk/kernel/syscalls/sys_munmap.c

    r506 r623  
    2525#include <hal_kernel_types.h>
    2626#include <hal_uspace.h>
     27#include <hal_irqmask.h>
    2728#include <shared_syscalls.h>
    2829#include <errno.h>
     
    4142{
    4243    error_t       error;
     44    vseg_t      * vseg;
     45    reg_t         save_sr;      // required to enable IRQs
    4346
    4447        thread_t    * this    = CURRENT_THREAD;
    4548        process_t   * process = this->process;
    4649
     50#if (DEBUG_SYS_MUNMAP || CONFIG_INSTRUMENTATION_SYSCALLS)
     51uint64_t     tm_start = hal_get_cycles();
     52#endif
     53
    4754#if DEBUG_SYS_MUNMAP
    48 uint64_t tm_start;
    49 uint64_t tm_end;
    50 tm_start = hal_get_cycles();
    5155if( DEBUG_SYS_MUNMAP < tm_start )
    52 printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n"
     56printk("\n[DBG] %s : thread %x enter / process %x / cycle %d\n",
    5357__FUNCTION__ , this, process->pid, (uint32_t)tm_start );
    5458#endif
     59
     60    // check user buffer is mapped
     61    error = vmm_get_vseg( process , (intptr_t)vaddr, &vseg );