Changeset 657 for trunk/kernel/mm


Ignore:
Timestamp:
Mar 18, 2020, 11:16:59 PM (5 years ago)
Author:
alain
Message:

Introduce remote_buf.c/.h & socket.c/.h files.
Update dev_nic.c/.h files.

Location:
trunk/kernel/mm
Files:
10 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/kcm.c

    r656 r657  
    4343//////////////////////////////////////////////////////////////////////////////////////
    4444// This static function must be called by a local thread.
    45 // It returns a pointer on a block allocated from a non-full kcm_page.
    46 // It makes a panic if no block is available in selected page.
     45// It returns a pointer on a block allocated from an active kcm_page.
     46// It makes a panic if no block is available in the selected page.
    4747// It changes the page status as required.
    4848//////////////////////////////////////////////////////////////////////////////////////
    4949// @ kcm      : pointer on KCM allocator.
    50 // @ kcm_page : pointer on a non-full kcm_page.
     50// @ kcm_page : pointer on an active kcm_page.
    5151// @ return pointer on allocated block.
    5252/////////////////////////////////////////////////////////////////////////////////////
     
    6464    uint32_t index  = 1;
    6565    uint64_t mask   = (uint64_t)0x2;
    66     uint32_t found  = 0;
    6766
    6867        // allocate first free block in kcm_page, update status,
     
    7069    while( index <= max )
    7170    {
    72         if( (status & mask) == 0 )   // block non allocated
     71        if( (status & mask) == 0 )   // block found
    7372        {
     73            // update page count and status
    7474            kcm_page->status = status | mask;
    7575            kcm_page->count  = count + 1;
    76             found  = 1;
    77 
    7876            break;     
    7977        }
     
    8381    }
    8482
    85     // change the page list if almost full
     83    // change the page list if found block is the last
    8684    if( count == max-1 )
    8785    {
     
    162160
    163161/////////////////////////////////////////////////////////////////////////////////////
    164 // This private static function must be called by a local thread.
    165 // It returns one non-full kcm_page with te following policy :
     162// This static function must be called by a local thread.
     163// It returns one non-full kcm_page with the following policy :
    166164// - if the "active_list" is non empty, it returns the first "active" page,
    167165//   without modifying the KCM state.
    168 // - if the "active_list" is empty, it allocates a new page fromm PPM, inserts
     166// - if the "active_list" is empty, it allocates a new page from PPM, inserts
    169167//   this page in the active_list, and returns it.
    170168/////////////////////////////////////////////////////////////////////////////////////
     
    275273        // release KCM lock
    276274        remote_busylock_release( lock_xp );
    277 }
     275
     276}  // end kcm_destroy()
    278277
    279278//////////////////////////////////
     
    284283        void       * block_ptr;
    285284
    286     // min block size is 64 bytes
     285   // min block size is 64 bytes
    287286    if( order < 6 ) order = 6;
    288287
     
    301300    kcm_page = kcm_get_page( kcm_ptr );
    302301
     302#if DEBUG_KCM
     303thread_t * this  = CURRENT_THREAD;
     304uint32_t   cycle = (uint32_t)hal_get_cycles();
     305if( (DEBUG_KCM < cycle) && (local_cxy == 1) )
     306{
     307printk("\n[%s] thread[%x,%x] enters / order %d / page %x / kcm %x / page_status (%x|%x)\n",
     308__FUNCTION__, this->process->pid, this->trdid, order, kcm_page, kcm_ptr,
     309GET_CXY( kcm_page->status ), GET_PTR( kcm_page->status ) );
     310kcm_remote_display( local_cxy , kcm_ptr );
     311}
     312#endif
     313
    303314    if( kcm_page == NULL )
    304315        {
     
    314325
    315326#if DEBUG_KCM
    316 thread_t * this  = CURRENT_THREAD;
    317 uint32_t   cycle = (uint32_t)hal_get_cycles();
    318 if( DEBUG_KCM < cycle )
    319 printk("\n[%s] thread[%x,%x] allocated block %x / order %d / kcm %x / status[%x,%x] / count %d\n",
    320 __FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_ptr,
    321 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status), kcm_page->count );
     327if( (DEBUG_KCM < cycle) && (local_cxy == 1) )
     328{
     329printk("\n[%s] thread[%x,%x] exit / order %d / block %x / kcm %x / page_status (%x|%x)\n",
     330__FUNCTION__, this->process->pid, this->trdid, order, block_ptr, kcm_ptr,
     331GET_CXY( kcm_page->status ), GET_PTR( kcm_page->status ) );
     332kcm_remote_display( local_cxy , kcm_ptr );
     333}
    322334#endif
    323335
     
    344356thread_t * this  = CURRENT_THREAD;
    345357uint32_t   cycle = (uint32_t)hal_get_cycles();
    346 if( DEBUG_KCM < cycle )
    347 printk("\n[%s] thread[%x,%x] release block %x / order %d / kcm %x / status [%x,%x] / count %d\n",
    348 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_ptr->order, kcm_ptr,
    349 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status), kcm_page->count );
     358if( (DEBUG_KCM < cycle) && (local_cxy == 1) )
     359{
     360printk("\n[%s] thread[%x,%x] enters / order %d / block %x / page %x / kcm %x / status [%x,%x]\n",
     361__FUNCTION__, this->process->pid, this->trdid, kcm_ptr->order, block_ptr, kcm_page, kcm_ptr,
     362GET_CXY(kcm_page->status), GET_PTR(kcm_page->status) );
     363kcm_remote_display( local_cxy , kcm_ptr );
     364}
    350365#endif
    351366
     
    361376        // release lock
    362377        remote_busylock_release( lock_xp );
     378
     379#if DEBUG_KCM
     380if( (DEBUG_KCM < cycle) && (local_cxy == 1) )
     381{
     382printk("\n[%s] thread[%x,%x] exit / order %d / page %x / status [%x,%x]\n",
     383__FUNCTION__, this->process->pid, this->trdid, kcm_ptr->order, kcm_ptr,
     384GET_CXY(kcm_page->status), GET_PTR(kcm_page->status) );
     385kcm_remote_display( local_cxy , kcm_ptr );
    363386}
     387#endif
     388
     389}  // end kcm_free()
    364390
    365391/////////////////////////////////////////////////////////////////////////////////////
     
    369395/////////////////////////////////////////////////////////////////////////////////////
    370396// This static function can be called by any thread running in any cluster.
    371 // It returns a local pointer on a block allocated from an non-full kcm_page.
    372 // It makes a panic if no block available in selected page.
     397// It returns a local pointer on a block allocated from an active kcm_page.
     398// It makes a panic if no block available in the selected kcm_page.
    373399// It changes the page status as required.
    374400/////////////////////////////////////////////////////////////////////////////////////
    375 // @ kcm_cxy  : remote KCM cluster identidfier.
     401// @ kcm_cxy  : remote KCM cluster identifier.
    376402// @ kcm_ptr  : local pointer on remote KCM allocator.
    377 // @ kcm_page : pointer on active kcm page to use.
     403// @ kcm_page : local pointer on remote active kcm_page to use.
    378404// @ return a local pointer on the allocated block.
    379405/////////////////////////////////////////////////////////////////////////////////////
     
    392418    uint32_t index  = 1;
    393419    uint64_t mask   = (uint64_t)0x2;
    394     uint32_t found  = 0;
    395420   
    396421        // allocate first free block in kcm_page, update status,
     
    398423    while( index <= max )
    399424    {
    400         if( (status & mask) == 0 )   // block non allocated
     425        if( (status & mask) == 0 )   // block found
    401426        {
    402427            hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , status | mask );
    403             hal_remote_s64( XPTR( kcm_cxy , &kcm_page->count  ) , count + 1 );
    404             found  = 1;
     428            hal_remote_s32( XPTR( kcm_cxy , &kcm_page->count  ) , count + 1 );
    405429            break;     
    406430        }
     
    410434    }
    411435
    412         // change the page list if almost full
     436        // change the page list if found block is the last
    413437        if( count == max-1 )
    414438        {
     
    631655                         kcm_t * kcm_ptr )
    632656{
     657    list_entry_t * iter;
     658    kcm_page_t   * kcm_page;
     659    uint64_t       status;
     660    uint32_t       count;
     661
    633662    uint32_t order           = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order) );
    634663    uint32_t full_pages_nr   = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) );
    635664    uint32_t active_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) );
    636665
    637         printk("*** KCM / cxy %x / order %d / full_pages %d / empty_pages %d / active_pages %d\n",
     666        printk("*** KCM : cxy %x / order %d / full_pages_nr %d / active_pages_nr %d\n",
    638667        kcm_cxy, order, full_pages_nr, active_pages_nr );
    639 }
     668
     669    if( active_pages_nr )
     670    {
     671        LIST_REMOTE_FOREACH( kcm_cxy , &kcm_ptr->active_root , iter )
     672        {
     673            kcm_page = LIST_ELEMENT( iter , kcm_page_t , list );
     674            status   = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) );
     675            count    = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) );
     676
     677            printk("- active page %x / status (%x,%x) / count %d\n",
     678            kcm_page, GET_CXY( status ), GET_PTR( status ), count );
     679        }
     680    }
     681
     682    if( full_pages_nr )
     683    {
     684        LIST_REMOTE_FOREACH( kcm_cxy , &kcm_ptr->full_root , iter )
     685        {
     686            kcm_page = LIST_ELEMENT( iter , kcm_page_t , list );
     687            status   = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) );
     688            count    = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) );
     689
     690            printk("- full page %x / status (%x,%x) / count %d\n",
     691            kcm_page, GET_CXY( status ), GET_PTR( status ), count );
     692        }
     693    }
     694}  // end kcm remote_display()
  • trunk/kernel/mm/kcm.h

    r635 r657  
    9292 * It initializes a Kernel Cache Manager, depending on block size.
    9393 ****************************************************************************************
    94  * @ kcm      : pointer on KCM manager to initialize.
     94 * @ kcm      : pointer on KCM to be initialized.
    9595 * @ order    : ln(block_size).
    9696 ***************************************************************************************/
     
    122122 ***************************************************************************************/
    123123void kcm_free( void    * block_ptr );
     124
     125
     126
    124127
    125128/****************************************************************************************
  • trunk/kernel/mm/kmem.c

    r656 r657  
    22 * kmem.c - kernel memory allocator implementation.
    33 *
    4  * Authors  Alain Greiner (2016,2017,2018,2019)
     4 * Authors  Alain Greiner (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/mm/mapper.c

    r656 r657  
    33 *
    44 * Authors   Mohamed Lamine Karaoui (2015)
    5  *           Alain Greiner (2016,2017,2018,2019)
     5 *           Alain Greiner (2016,2017,2018,2019,2020)
    66 *
    77 * Copyright (c)  UPMC Sorbonne Universites
     
    4646
    4747
    48 //////////////////////////////////////////////
    49 mapper_t * mapper_create( vfs_fs_type_t type )
    50 {
    51     mapper_t * mapper;
     48/////////////////////////////////////
     49xptr_t  mapper_create( cxy_t     cxy,
     50                       uint32_t  type )
     51{
     52    mapper_t * mapper_ptr;
    5253    kmem_req_t req;
    5354    error_t    error;
    5455
    5556    // allocate memory for mapper descriptor
    56     req.type  = KMEM_KCM;
    57     req.order = bits_log2( sizeof(mapper_t) );
    58     req.flags = AF_KERNEL | AF_ZERO;
    59     mapper    = kmem_alloc( &req );
    60 
    61     if( mapper == NULL )
     57    req.type    = KMEM_KCM;
     58    req.order   = bits_log2( sizeof(mapper_t) );
     59    req.flags   = AF_KERNEL | AF_ZERO;
     60    mapper_ptr  = kmem_remote_alloc( cxy , &req );
     61
     62    if( mapper_ptr == NULL )
    6263    {
    6364        printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ );
    64         return NULL;
    65     }
    66 
    67     // initialize refcount & inode
    68     mapper->refcount = 0;
    69     mapper->inode    = NULL;
     65        return XPTR_NULL;
     66    }
     67
     68    // initialize refcount and type
     69    hal_remote_s32( XPTR( cxy , &mapper_ptr->refcount ) , 0 );
     70    hal_remote_s32( XPTR( cxy , &mapper_ptr->fs_type )  , type );
    7071
    7172    // initialize radix tree
    72     error = grdxt_init( &mapper->rt,
    73                         CONFIG_MAPPER_GRDXT_W1,
    74                         CONFIG_MAPPER_GRDXT_W2,
    75                         CONFIG_MAPPER_GRDXT_W3 );
     73    error = grdxt_remote_init( XPTR( cxy , &mapper_ptr->rt ),
     74                               CONFIG_MAPPER_GRDXT_W1,
     75                               CONFIG_MAPPER_GRDXT_W2,
     76                               CONFIG_MAPPER_GRDXT_W3 );
    7677    if( error )
    7778    {
    7879        printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ );
    7980        req.type  = KMEM_KCM;
    80         req.ptr   = mapper;
    81         kmem_free( &req );
    82         return NULL;
    83     }
    84 
    85     // initialize mapper type
    86     mapper->type = type;
     81        req.ptr   = mapper_ptr;
     82        kmem_remote_free( cxy , &req );
     83        return XPTR_NULL;
     84    }
    8785
    8886    // initialize mapper lock
    89     remote_rwlock_init( XPTR( local_cxy , &mapper->lock ) , LOCK_MAPPER_STATE );
     87    remote_rwlock_init( XPTR( cxy , &mapper_ptr->lock ) , LOCK_MAPPER_STATE );
    9088
    9189    // initialize waiting threads xlist (empty)
    92     xlist_root_init( XPTR( local_cxy , &mapper->wait_root ) );
     90    xlist_root_init( XPTR( cxy , &mapper_ptr->wait_root ) );
    9391
    9492    // initialize vsegs xlist (empty)
    95     xlist_root_init( XPTR( local_cxy , &mapper->vsegs_root ) );
    96 
    97     return mapper;
     93    xlist_root_init( XPTR( cxy , &mapper_ptr->vsegs_root ) );
     94
     95    return XPTR( cxy , mapper_ptr );
    9896
    9997}  // end mapper_create()
    10098
    10199////////////////////////////////////////
    102 void mapper_destroy( mapper_t * mapper )
    103 {
     100void mapper_destroy( xptr_t  mapper_xp )
     101{
     102    xptr_t     page_xp;
    104103    page_t   * page;
    105104    uint32_t   found_index = 0;
     
    107106    kmem_req_t req;
    108107
     108    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
     109    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
     110
     111    // build extended pointer on radix tree
     112    xptr_t rt_xp = XPTR( mapper_cxy , &mapper_ptr->rt );
     113
    109114    // scan radix tree
    110115    do
    111116    {
    112117        // get page from radix tree
    113         page = (page_t *)grdxt_get_first( &mapper->rt , start_index , &found_index );
    114 
     118        page_xp = grdxt_remote_get_first( rt_xp,
     119                                          start_index ,
     120                                          &found_index );
     121        page = GET_PTR( page_xp );
     122       
    115123        // release registered pages to PPM
    116124        if( page != NULL )
    117125        {
    118126            // remove page from mapper and release to PPM
    119             mapper_remote_release_page( XPTR( local_cxy , mapper ) , page );
     127            mapper_remote_release_page( mapper_xp , page );
    120128
    121129            // update start_key value for next page
     
    126134
    127135    // release the memory allocated to radix tree itself
    128     grdxt_destroy( &mapper->rt );
     136    grdxt_remote_destroy( rt_xp );
    129137
    130138    // release memory for mapper descriptor
    131139    req.type = KMEM_KCM;
    132     req.ptr  = mapper;
    133     kmem_free( &req );
     140    req.ptr  = mapper_ptr;
     141    kmem_remote_free( mapper_cxy , &req );
    134142
    135143}  // end mapper_destroy()
    136144
    137 ////////////////////////////////////////////////////////
    138 error_t mapper_remote_handle_miss( xptr_t     mapper_xp,
    139                                    uint32_t   page_id,
    140                                    xptr_t   * page_xp_ptr )
     145/////////////////////////////////////////////////
     146error_t mapper_handle_miss( xptr_t     mapper_xp,
     147                            uint32_t   page_id,
     148                            xptr_t   * page_xp_ptr )
    141149{
    142150    error_t    error;
    143151
    144     uint32_t   inode_size;   
    145     uint32_t   inode_type;
     152    uint32_t   inode_size = 0;   
     153    uint32_t   inode_type = 0;
    146154
    147155    thread_t * this = CURRENT_THREAD;
     
    159167        inode_size = hal_remote_l32( XPTR( mapper_cxy , &inode->size ) );
    160168        inode_type = hal_remote_l32( XPTR( mapper_cxy , &inode->type ) );
    161     }
    162     else
    163     {
    164         inode_size = 0;
    165         inode_type = 0;
    166169    }
    167170
     
    267270    return 0;
    268271
    269 }  // end mapper_remote_handle_miss()
    270 
    271 ////////////////////////////////////////////////////
    272 xptr_t  mapper_remote_get_page( xptr_t    mapper_xp,
    273                                 uint32_t  page_id )
     272}  // end mapper_handle_miss()
     273
     274/////////////////////////////////////////////
     275xptr_t  mapper_get_page( xptr_t    mapper_xp,
     276                         uint32_t  page_id )
    274277{
    275278    error_t       error;
     
    281284    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
    282285
     286assert( (hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ) != NULL ),
     287"should not be used for the FAT mapper");
     288
    283289#if DEBUG_MAPPER_GET_PAGE
    284 vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
    285290uint32_t      cycle = (uint32_t)hal_get_cycles();
    286291char          name[CONFIG_VFS_MAX_NAME_LENGTH];
    287 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) )  // FAT mapper
    288 {
    289     printk("\n[%s] thread[%x,%x] enter for page %d of FAT mapper / cycle %d\n",
    290     __FUNCTION__, this->process->pid, this->trdid, page_id, cycle );
    291 }
    292 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) )  // file mapper
    293 {
     292if( DEBUG_MAPPER_GET_PAGE < cycle ) 
     293{
     294    vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
    294295    vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
    295296    printk("\n[%s] thread[%x,%x] enter for page %d of <%s> mapper / cycle %d\n",
     
    330331        if ( page_xp == XPTR_NULL )  // miss confirmed => handle it
    331332        {
    332             error = mapper_remote_handle_miss( mapper_xp,
    333                                                page_id,
    334                                                &page_xp );
     333            error = mapper_handle_miss( mapper_xp,
     334                                        page_id,
     335                                        &page_xp );
    335336            if( error )
    336337            {
     
    343344
    344345#if (DEBUG_MAPPER_GET_PAGE & 1)
    345 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) )
    346 {
    347     printk("\n[%s] thread[%x,%x] introduced missing page in <%s> mapper / ppn %x\n",
    348     __FUNCTION__, this->process->pid, this->trdid, name, ppm_page2ppn(page_xp) );
    349 }
    350 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) )
    351 {
    352     printk("\n[%s] thread[%x,%x] introduced missing page in FAT mapper / ppn %x\n",
    353     __FUNCTION__, this->process->pid, this->trdid, ppm_page2ppn(page_xp) );
    354 }
     346if( DEBUG_MAPPER_GET_PAGE < cycle )
     347printk("\n[%s] thread[%x,%x] introduced missing page %d in <%s> mapper / ppn %x\n",
     348__FUNCTION__, this->process->pid, this->trdid, page_id, name, ppm_page2ppn(page_xp) );
    355349#endif
    356350       
     
    365359
    366360#if DEBUG_MAPPER_GET_PAGE
    367 cycle = (uint32_t)hal_get_cycles();
    368 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) )
    369 {
    370     printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x\n",
    371     __FUNCTION__, this->process->pid, this->trdid, page_id, name, ppm_page2ppn(page_xp) );
    372 }
    373 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) )
    374 {
    375     printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper  / ppn %x\n",
    376     __FUNCTION__, this->process->pid, this->trdid, page_id, ppm_page2ppn(page_xp) );
    377 }
     361if( DEBUG_MAPPER_GET_PAGE < cycle )
     362printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x\n",
     363__FUNCTION__, this->process->pid, this->trdid, page_id, name, ppm_page2ppn(page_xp) );
    378364#endif
    379365
     
    385371    return page_xp;
    386372
    387 }  // end mapper_remote_get_page()
     373}  // end mapper_get_page()
     374
     375/////////////////////////////////////////////////
     376xptr_t  mapper_get_fat_page( xptr_t    mapper_xp,
     377                             uint32_t  page_id )
     378{
     379    error_t       error;
     380
     381    thread_t * this = CURRENT_THREAD;
     382
     383    // get mapper cluster and local pointer
     384    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
     385    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
     386
     387assert( (hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ) == NULL ),
     388"should be used for the FAT mapper");
     389
     390#if DEBUG_MAPPER_GET_FAT_PAGE
     391uint32_t      cycle = (uint32_t)hal_get_cycles();
     392if( DEBUG_MAPPER_GET_FAT_PAGE < cycle )
     393printk("\n[%s] thread[%x,%x] enter for page %d of FAT mapper / cycle %d\n",
     394__FUNCTION__, this->process->pid, this->trdid, page_id, cycle );
     395#endif
     396
     397#if( DEBUG_MAPPER_GET_FAT_PAGE & 2 )
     398if( DEBUG_MAPPER_GET_FAT_PAGE < cycle )
     399ppm_remote_display( local_cxy );
     400#endif
     401
     402    // check thread can yield
     403    thread_assert_can_yield( this , __FUNCTION__ );
     404
     405    // build extended pointer on mapper lock and mapper rt
     406    xptr_t lock_xp  = XPTR( mapper_cxy , &mapper_ptr->lock );
     407    xptr_t rt_xp    = XPTR( mapper_cxy , &mapper_ptr->rt );
     408
     409    // take mapper lock in READ_MODE
     410    remote_rwlock_rd_acquire( lock_xp );
     411
     412    // search page in radix tree
     413    xptr_t page_xp  = grdxt_remote_lookup( rt_xp , page_id );
     414
     415    // test mapper miss
     416    if( page_xp == XPTR_NULL )                  // miss => handle it
     417    {
     418        // release the lock in READ_MODE and take it in WRITE_MODE
     419        remote_rwlock_rd_release( lock_xp );
     420        remote_rwlock_wr_acquire( lock_xp );
     421
     422        // second test on missing page because the page status can be modified
     423        // by another thread, when passing from READ_MODE to WRITE_MODE.
     424        // from this point there is no concurrent accesses to mapper.
     425        page_xp = grdxt_remote_lookup( rt_xp , page_id );
     426
     427        if ( page_xp == XPTR_NULL )  // miss confirmed => handle it
     428        {
     429            error = mapper_handle_miss( mapper_xp,
     430                                        page_id,
     431                                        &page_xp );
     432            if( error )
     433            {
     434                printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
     435                __FUNCTION__ , this->process->pid, this->trdid );
     436                remote_rwlock_wr_release( lock_xp );
     437                return XPTR_NULL;
     438            }
     439        }
     440
     441#if (DEBUG_MAPPER_GET_FAT_PAGE & 1)
     442if( DEBUG_MAPPER_GET_FAT_PAGE < cycle )
     443printk("\n[%s] thread[%x,%x] introduced missing page %d in FAT mapper / ppn %x\n",
     444__FUNCTION__, this->process->pid, this->trdid, page_id, ppm_page2ppn(page_xp) );
     445#endif
     446       
     447        // release mapper lock from WRITE_MODE
     448        remote_rwlock_wr_release( lock_xp );
     449    }
     450    else                                              // hit
     451    {
     452        // release mapper lock from READ_MODE
     453        remote_rwlock_rd_release( lock_xp );
     454    }
     455
     456#if DEBUG_MAPPER_GET_FAT_PAGE
     457if( DEBUG_MAPPER_GET_FAT_PAGE < cycle )
     458printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper  / ppn %x\n",
     459__FUNCTION__, this->process->pid, this->trdid, page_id, ppm_page2ppn(page_xp) );
     460#endif
     461
     462#if( DEBUG_MAPPER_GET_FAT_PAGE & 2)
     463if( DEBUG_MAPPER_GET_FAT_PAGE < cycle )
     464ppm_remote_display( local_cxy );
     465#endif
     466
     467    return page_xp;
     468
     469}  // end mapper_get_fat_page()
    388470
    389471////////////////////////////////////////////////////
     
    481563
    482564        // get extended pointer on page descriptor in mapper
    483         page_xp = mapper_remote_get_page( mapper_xp , page_id );
     565        page_xp = mapper_get_page( mapper_xp , page_id );
    484566
    485567        if ( page_xp == XPTR_NULL ) return -1;
     
    519601__FUNCTION__, this->process->pid, this->trdid, page_bytes,
    520602local_cxy, buf_ptr, name, GET_CXY(map_xp), GET_PTR(map_xp) );
    521 mapper_display_page(  mapper_xp , page_xp , 128 );
     603mapper_display_page(  mapper_xp , page_id , 128 );
    522604#endif
    523605
     
    617699
    618700        // get extended pointer on page descriptor
    619         page_xp = mapper_remote_get_page( mapper_xp , page_id );
     701        page_xp = mapper_get_page( mapper_xp , page_id );
    620702
    621703        if ( page_xp == XPTR_NULL ) return -1;
     
    678760   
    679761    // get page containing the searched word
    680     page_xp  = mapper_remote_get_page( mapper_xp , page_id );
     762    page_xp  = mapper_get_page( mapper_xp , page_id );
    681763
    682764    if( page_xp == XPTR_NULL )  return -1;
     
    702784
    703785    // get page containing the searched word
    704     page_xp  = mapper_remote_get_page( mapper_xp , page_id );
     786    page_xp  = mapper_get_page( mapper_xp , page_id );
    705787
    706788    if( page_xp == XPTR_NULL ) return -1;
     
    719801}  // end mapper_remote_set_32()
    720802
    721 /////////////////////////////////////////
    722 error_t mapper_sync( mapper_t *  mapper )
    723 {
    724     page_t   * page;                // local pointer on current page descriptor
    725     xptr_t     page_xp;             // extended pointer on current page descriptor
    726     grdxt_t  * rt;                  // pointer on radix_tree descriptor
    727     uint32_t   start_key;           // start page index in mapper
    728     uint32_t   found_key;           // current page index in mapper
     803////////////////////////////////////////
     804error_t mapper_sync( xptr_t  mapper_xp )
     805{
     806    uint32_t   found_key;           // unused, required by grdxt_remote_get_first()
    729807    error_t    error;
     808
     809    // get mapper cluster and local pointer
     810    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
     811    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
    730812
    731813#if DEBUG_MAPPER_SYNC
     
    733815uint32_t   cycle = (uint32_t)hal_get_cycles();
    734816char       name[CONFIG_VFS_MAX_NAME_LENGTH];
    735 vfs_inode_get_name( XPTR( local_cxy , mapper->inode ) , name );
    736 #endif
    737 
    738     // get pointer on radix tree
    739     rt = &mapper->rt;
     817vfs_inode_get_name( XPTR( mapper_cxy , &mapper_ptr->inode ) , name );
     818#endif
     819
     820    // build extended pointer on radix tree
     821    xptr_t   rt_xp = XPTR( mapper_cxy , &mapper_ptr->rt );
    740822
    741823    // initialise loop variable
    742     start_key = 0;
     824    uint32_t start_key = 0;
    743825
    744826    // scan radix-tree until last page found
     
    746828    {
    747829        // get page descriptor from radix tree
    748         page = (page_t *)grdxt_get_first( rt , start_key , &found_key );
     830        xptr_t page_xp = grdxt_remote_get_first( rt_xp , start_key , &found_key );
    749831         
    750         if( page == NULL ) break;
    751 
    752 assert( (page->index == found_key ), "page_index (%d) != key (%d)", page->index, found_key );
    753 assert( (page->order == 0), "page_order (%d] != 0", page->order );
    754 
    755         // build extended pointer on page descriptor
    756         page_xp = XPTR( local_cxy , page );
     832        page_t * page_ptr = GET_PTR( page_xp );
     833
     834        // exit loop when last page found
     835        if( page_ptr == NULL ) break;
     836
     837        // get page flags & index fields
     838        uint32_t flags = hal_remote_l32( XPTR( mapper_cxy , &page_ptr->flags ) );
     839        uint32_t index = hal_remote_l32( XPTR( mapper_cxy , &page_ptr->index ) );
    757840
    758841        // synchronize page if dirty
    759         if( (page->flags & PG_DIRTY) != 0 )
     842        if( flags & PG_DIRTY )
    760843        {
    761844
     
    763846if( cycle > DEBUG_MAPPER_SYNC )
    764847printk("\n[%s] thread[%x,%x] synchonise page %d of <%s> to IOC device\n",
    765 __FUNCTION__, this->process->pid, this->trdid, page->index, name );
     848__FUNCTION__, this->process->pid, this->trdid, page_ptr->index, name );
    766849#endif
    767850            // copy page to file system
     
    771854            {
    772855                printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n",
    773                 __FUNCTION__, page->index );
     856                __FUNCTION__, page_ptr->index );
    774857                return -1;
    775858            }
     
    784867if( cycle > DEBUG_MAPPER_SYNC )
    785868printk("\n[%s] thread[%x,%x] skip page %d for <%s>\n",
    786 __FUNCTION__, this->process->pid, this->trdid, page->index, name );
     869__FUNCTION__, this->process->pid, this->trdid, page_ptr->index, name );
    787870#endif
    788871        }
    789872
    790873        // update loop variable
    791         start_key = page->index + 1;
     874        start_key = index + 1;
    792875    }  // end while
    793876
     
    798881///////////////////////////////////////////////
    799882void mapper_display_page( xptr_t     mapper_xp,
    800                           xptr_t     page_xp,
     883                          uint32_t   page_id,
    801884                          uint32_t   nbytes )
    802885{
     
    809892assert( (nbytes <= 4096)         , "nbytes cannot be larger than 4096");
    810893assert( (mapper_xp != XPTR_NULL) , "mapper_xp argument cannot be null");
    811 assert( (page_xp   != XPTR_NULL) , "page_xp argument cannot be null");
    812894
    813895    // get mapper cluster and local pointer
     
    815897    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
    816898
    817     // get page cluster an local pointer
     899    // get extended pointer on page descriptor
     900    xptr_t page_xp = mapper_get_page( mapper_xp , page_id );
     901
     902    // get page cluster and local pointer
    818903    cxy_t    page_cxy = GET_CXY( page_xp );
    819904    page_t * page_ptr = GET_PTR( page_xp );
    820905
    821906    // get page_id and mapper from page descriptor
    822     uint32_t   page_id = hal_remote_l32( XPTR( page_cxy , &page_ptr->index ) );
     907    uint32_t   index  = hal_remote_l32( XPTR( page_cxy , &page_ptr->index ) );
    823908    mapper_t * mapper  = hal_remote_lpt( XPTR( page_cxy , &page_ptr->mapper ) );
    824909
    825910assert( (mapper_cxy == page_cxy ) , "mapper and page must be in same cluster");
    826 assert( (mapper_ptr == mapper   ) , "unconsistent mapper_xp & page_xp arguments");
     911assert( (mapper_ptr == mapper   ) , "unconsistent mapper field in page descriptor");
     912assert( (page_id    == index    ) , "unconsistent index  field in page descriptor");
    827913
    828914    // get inode
  • trunk/kernel/mm/mapper.h

    r656 r657  
    6161 * - In the present implementation the cache size for a given file increases on demand,
    6262 *   and the  allocated memory is only released when the mapper/inode is destroyed.
    63  *
    64  * TODO the "type" field in mapper descriptor is redundant and probably unused.
    6563 ******************************************************************************************/
    6664
     
    7371{
    7472        struct vfs_inode_s * inode;           /*! owner inode                                     */
    75     uint32_t             type;        /*! file system type                                */
     73    uint32_t             fs_type;     /*! file system type                                */
    7674        grdxt_t              rt;              /*! embedded pages cache descriptor (radix tree)    */
    7775        remote_rwlock_t      lock;        /*! several readers / only one writer               */
     
    8482
    8583/*******************************************************************************************
    86  * This function allocates physical memory for a mapper descriptor, and initializes it
    87  * (refcount <= 0) / inode <= NULL).
    88  * It must be executed by a thread running in the cluster containing the mapper.
    89  *******************************************************************************************
    90  * @ type   : type of the mapper to create.
    91  * @ return : pointer on created mapper if success / return NULL if no memory
    92  ******************************************************************************************/
    93 mapper_t * mapper_create( vfs_fs_type_t type );
    94 
    95 /*******************************************************************************************
    96  * This function releases all physical memory allocated for a mapper.
    97  * Both the mapper descriptor and the radix tree are released.
     84 * This function allocates physical memory for a mapper descriptor, in cluster
     85 * identified by the <cxy> argument. It initializes it (refcount <= 0) / inode <= NULL).
     86 * It can be executed by any thread running in any cluster.
     87 *******************************************************************************************
     88 * @ cxy    : target cluster identifier.
     89 * @ type   : FS type.
     90 * @ return an extended pointer on created mapper if success / return NULL if no memory
     91 ******************************************************************************************/
     92xptr_t  mapper_create( cxy_t     cxy,
     93                       uint32_t  type );
     94
     95/*******************************************************************************************
     96 * This function releases all physical memory allocated for a mapper, identified
     97 * by the <mapper_xp> argument. Both the mapper descriptor and the radix tree are released.
    9898 * It does NOT synchronize dirty pages. Use the vfs_sync_inode() function if required.
    99  * It must be executed by a thread running in the cluster containing the mapper.
    100  *******************************************************************************************
    101  * @ mapper      : target mapper.
    102  ******************************************************************************************/
    103 void mapper_destroy( mapper_t * mapper );
     99 * It can be executed by any thread running in any cluster.
     100 *******************************************************************************************
     101 * @ mapper_xp   : extended pointer on target mapper.
     102 ******************************************************************************************/
     103void mapper_destroy( xptr_t  mapper_xp );
    104104
    105105/*******************************************************************************************
     
    117117 * @ return 0 if success / return -1 if IOC cannot be accessed.
    118118 ******************************************************************************************/
    119 error_t mapper_remote_handle_miss( xptr_t     mapper_xp,
    120                                    uint32_t   page_id,
    121                                    xptr_t   * page_xp );
     119error_t mapper_handle_miss( xptr_t     mapper_xp,
     120                            uint32_t   page_id,
     121                            xptr_t   * page_xp );
    122122
    123123/*******************************************************************************************
     
    180180
    181181/*******************************************************************************************
    182  * This function returns an extended pointer on a page descriptor.
    183  * The - possibly remote - mapper is identified by the <mapper_xp> argument.
     182 * This function returns an extended pointer on a page descriptor for a regular mapper
     183 * (i.e. this mapper is NOT the FAT mapper), identified by the <mapper_xp> argument.
    184184 * The page is identified by <page_id> argument (page index in the file).
    185185 * It can be executed by a thread running in any cluster, as it uses remote
     
    193193 * @ returns extended pointer on page descriptor if success / return XPTR_NULL if error.
    194194 ******************************************************************************************/
    195 xptr_t mapper_remote_get_page( xptr_t    mapper_xp,
    196                                uint32_t  page_id );
     195xptr_t mapper_get_page( xptr_t    mapper_xp,
     196                        uint32_t  page_id );
     197
     198/*******************************************************************************************
     199 * This function returns an extended pointer on a page descriptor for the FAT mapper.
     200 * The page is identified by <page_id> argument (page index in the FAT mapper).
     201 * It can be executed by a thread running in any cluster, as it uses remote
     202 * access primitives to scan the mapper.
     203 * In case of miss, this function takes the mapper lock in WRITE_MODE, and call the
     204 * mapper_handle_miss() to load the missing page from device to mapper, using an RPC
     205 * when the mapper is remote.
     206 *******************************************************************************************
     207 * @ mapper_xp  : extended pointer on the mapper.
     208 * @ page_id    : page index in file
     209 * @ returns extended pointer on page descriptor if success / return XPTR_NULL if error.
     210 ******************************************************************************************/
     211xptr_t mapper_get_fat_page( xptr_t    mapper_xp,
     212                            uint32_t  page_id );
    197213
    198214/*******************************************************************************************
     
    234250
    235251/*******************************************************************************************
    236  * This function scan all pages present in the mapper identified by the <mapper> argument,
    237  * and synchronize all pages marked as "dirty" on disk.
     252 * This function scan all pages present in the mapper identified by the <mapper_xp>
     253 * argument, and synchronize all pages marked as "dirty" on disk.
    238254 * These pages are unmarked and removed from the local PPM dirty_list.
    239  * This function must be called by a local thread running in same cluster as the mapper.
    240  * A remote thread must call the RPC_MAPPER_SYNC function.
    241  *******************************************************************************************
    242  * @ mapper     : [in]  local pointer on local mapper.
    243  * @ returns 0 if success / return -1 if error.
    244  ******************************************************************************************/
    245 error_t mapper_sync( mapper_t *  mapper );
     255 * It can be called by any thread running in any cluster.
     256 *******************************************************************************************
     257 * @ mapper_xp  : [in]  extended pointer on local mapper.
     258 * @ returns 0 if success / return -1 if error.
     259 ******************************************************************************************/
     260error_t mapper_sync( xptr_t  mapper_xp );
    246261
    247262/*******************************************************************************************
    248263 * This debug function displays the content of a given page of a given mapper, identified
    249  * by the <mapper_xp> and <page_xp> arguments.
     264 * by the <mapper_xp> and <page_id> arguments.
    250265 * The number of bytes to display in page is defined by the <nbytes> argument.
    251266 * The format is eigth (32 bits) words per line in hexadecimal.
     
    253268 *******************************************************************************************
    254269 * @ mapper_xp  : [in]  extended pointer on the mapper.
    255  * @ page_xp    : [in]  extended pointer on page descriptor.
     270 * @ page_id    : [in]  page_index in mapper.
    256271 * @ nbytes     : [in]  number of bytes in page.
    257272 * @ returns 0 if success / return -1 if error.
    258273 ******************************************************************************************/
    259274void mapper_display_page( xptr_t     mapper_xp,
    260                           xptr_t     page_xp,
     275                          uint32_t   page_id,
    261276                          uint32_t   nbytes );
    262277
  • trunk/kernel/mm/ppm.c

    r656 r657  
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Alain Greiner    (2016,2017,2018,2019)
     5 *          Alain Greiner    (2016,2017,2018,2019,2020)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/mm/vmm.c

    r656 r657  
    33 *
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
    5  *           Alain Greiner (2016,2017,2018,2019)
     5 *           Alain Greiner (2016,2017,2018,2019,2020)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    19881988    xptr_t gpt_xp = XPTR( local_cxy , &process->vmm.gpt );
    19891989
    1990     // loop on PTEs in GPT to unmap PTE if (oldd_vpn_min <= vpn < new_vpn_min)
     1990    // loop on PTEs in GPT to unmap PTE if (old_vpn_min <= vpn < new_vpn_min)
    19911991        for( vpn = old_vpn_min ; vpn < new_vpn_min ; vpn++ )
    19921992    {
     
    22922292       
    22932293        // get extended pointer on page descriptor
    2294         page_xp = mapper_remote_get_page( mapper_xp , page_id );
     2294        page_xp = mapper_get_page( mapper_xp , page_id );
    22952295
    22962296        if ( page_xp == XPTR_NULL ) return EINVAL;
  • trunk/kernel/mm/vmm.h

    r656 r657  
    33 *
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
    5  *           Mohamed Lamine Karaoui (2015)
    6  *           Alain Greiner (2016,2017,2018,2019)
     5 *           Alain Greiner (2016,2017,2018,2019,2020))
    76 *
    87 * Copyright (c) UPMC Sorbonne Universites
     
    3130#include <list.h>
    3231#include <queuelock.h>
     32#include <remote_queuelock.h>
    3333#include <hal_gpt.h>
    3434#include <vseg.h>
     
    208208
    209209/*********************************************************************************************
    210  * This function modifies the size of the vseg identified by <process> and <base> arguments
    211  * in all clusters containing a VSL copy, as defined by <new_base> and <new_size> arguments.
    212  * This function is called by the sys_munmap() function, and can be called by a thread
    213  * running in any cluster, as it uses remote accesses.
     210 * This function modifies the vseg identified by <process> and <base> arguments in all
     211 * clusters containing a VSL copy, as defined by <new_base> and <new_size> arguments.
     212 * The new vseg, defined by the <new_base> and <new_size> arguments must be included
     213 * in the existing vseg. The target VSL size and base fields are modified in the VSL.
     214 * This is done in all clusters containing a VMM copy to maintain VMM coherence.
     215 * It is called by the sys_munmap() and dev_fbf_resize_window() functions.
     216 * It can be called by a thread running in any cluster, as it uses the vmm_resize_vseg() in
     217 * the local cluster, and parallel RPC_VMM_RESIZE_VSEG for remote clusters.
    214218 * It cannot fail, as only vseg registered  in VSL copies are updated.
    215219 *********************************************************************************************
     
    228232 * the VSL and remove all associated PTE entries from the GPT.
    229233 * This is done in all clusters containing a VMM copy to maintain VMM coherence.
    230  * This function can be called by a thread running in any cluster, as it uses the
    231  * vmm_remove_vseg() in the local cluster, and the RPC_VMM_REMOVE_VSEG for remote clusters.
     234 * It is called by the sys_munmap() and dev_fbf_resize_window() functions.
     235 * It can be called by a thread running in any cluster, as it uses the vmm_remove_vseg() in
     236 * the local cluster, and parallel RPC_VMM_REMOVE_VSEG for remote clusters.
    232237 * It cannot fail, as only vseg registered  in VSL copies are deleted.
    233238 *********************************************************************************************
     
    317322 * It must be called by a local thread, running in the cluster containing the modified VMM.
    318323 * Use the RPC_VMM_REMOVE_VSEG if required.
    319  * It makes a kernel panic if the process is not registered in the local cluster,
    320  * or if the vseg is not registered in the process VSL.
     324 * It makes a kernel panic if the process is not registered in the local cluster.
    321325 * For all vseg types, the vseg is detached from local VSL, and all associated PTEs are
    322326 * unmapped from local GPT. Other actions depend on the vseg type:
     
    340344/*********************************************************************************************
    341345 * This function resize a local vseg identified by the <process> and <vseg> arguments.
    342  * It is called by the vmm_global_resize() function.
    343  * It must be called by a local thread, running in the cluster containing the modified VMM.
     346 * Both the "size" and "base" fields are modified in the process VSL. When the new vseg
     347 * contains less pages than the target vseg, the relevant pages are removed from the GPT.
     348 * It is called by the vmm_global_resize() and dev_fbf_resize_window() functions.
     349 * It must be called by a local thread, running in the cluster containing the modified VSL.
    344350 * Use the RPC_VMM_RESIZE_VSEG if required.
    345  * It makes a kernel panic if the process is not registered in the local cluster,
    346  * or if the vseg is not registered in the process VSL.
    347  * The new vseg, defined by the <new_base> and <new_size> arguments must be strictly
    348  * included in the target vseg. The target VSL size and base fields are modified in the VSL.
    349  * If the new vseg contains less pages than the target vseg, the relevant pages are
    350  * removed from the GPT.
    351351 * The VSL lock protecting the VSL must be taken by the caller.
    352352 *********************************************************************************************
     
    454454                         ppn_t  * ppn );
    455455
    456 
    457456#endif /* _VMM_H_ */
  • trunk/kernel/mm/vseg.c

    r651 r657  
    22 * vseg.c - virtual segment (vseg) related operations
    33 *
    4  * Authors   Alain Greiner (2016,2017,2018,2019)
     4 * Authors   Alain Greiner (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/mm/vseg.h

    r651 r657  
    22 * vseg.h - virtual segment (vseg) related operations
    33 *
    4  * Authors  Alain Greiner (2016,2017,2018,2019)
     4 * Authors  Alain Greiner (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
Note: See TracChangeset for help on using the changeset viewer.