Changeset 625 for trunk/kernel/mm


Ignore:
Timestamp:
Apr 10, 2019, 10:09:39 AM (6 years ago)
Author:
alain
Message:

Fix a bug in the vmm_remove_vseg() function: the physical pages
associated to an user DATA vseg were released to the kernel when
the target process descriptor was in the reference cluster.
This physical pages release should be done only when the page
forks counter value is zero.
All other modifications are cosmetic.

Location:
trunk/kernel/mm
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/mapper.c

    r624 r625  
    153153
    154154#if DEBUG_MAPPER_GET_PAGE
    155 uint32_t cycle = (uint32_t)hal_get_cycles();
     155vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
     156uint32_t      cycle = (uint32_t)hal_get_cycles();
    156157char          name[CONFIG_VFS_MAX_NAME_LENGTH];
    157 vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
    158 vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
    159 if( DEBUG_MAPPER_GET_PAGE < cycle )
    160 printk("\n[%s] thread [%x,%x] enter for page %d of <%s> / cycle %d\n",
    161 __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
     158if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) )  // FAT mapper
     159{
     160    printk("\n[%s] thread[%x,%x] enter for page %d of FAT mapper / cycle %d\n",
     161    __FUNCTION__, this->process->pid, this->trdid, page_id, cycle );
     162}
     163if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) )  // file mapper
     164{
     165    vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
     166    printk("\n[%s] thread[%x,%x] enter for page %d of <%s> mapper / cycle %d\n",
     167    __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
     168}
    162169#endif
    163170
     
    235242#if DEBUG_MAPPER_GET_PAGE
    236243cycle = (uint32_t)hal_get_cycles();
    237 if( DEBUG_MAPPER_GET_PAGE < cycle )
    238 printk("\n[%s] thread[%x,%x] exit for page %d of <%s> / ppn %x / cycle %d\n",
    239 __FUNCTION__, this->process->pid, this->trdid,
    240 page_id, name, ppm_page2ppn( page_xp ), cycle );
     244if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) )
     245{
     246    printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x / cycle %d\n",
     247    __FUNCTION__, this->process->pid, this->trdid, page_id,
     248    name, ppm_page2ppn(page_xp), cycle );
     249}
     250if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) )
     251{
     252    printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper  / ppn %x / cycle %d\n",
     253    __FUNCTION__, this->process->pid, this->trdid, page_id,
     254    ppm_page2ppn(page_xp), cycle );
     255}
    241256#endif
    242257
     
    257272
    258273#if DEBUG_MAPPER_HANDLE_MISS
    259 uint32_t cycle = (uint32_t)hal_get_cycles();
     274uint32_t      cycle = (uint32_t)hal_get_cycles();
    260275char          name[CONFIG_VFS_MAX_NAME_LENGTH];
    261276vfs_inode_t * inode = mapper->inode;
    262 vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
    263 if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    264 printk("\n[%s] enter for page %d in <%s> / cycle %d",
    265 __FUNCTION__, page_id, name, cycle );
    266 if( DEBUG_MAPPER_HANDLE_MISS & 1 )
    267 grdxt_display( XPTR( local_cxy , &mapper->rt ) , name );
     277if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
     278{
     279    vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
     280    printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / cycle %d",
     281    __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
     282   if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), name );
     283}
     284if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) )
     285{
     286    printk("\n[%s] thread[%x,%x] enter for page %d in FAT / cycle %d",
     287    __FUNCTION__, this->process->pid, this->trdid, page_id, cycle );
     288   if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), "FAT" );
     289}
    268290#endif
    269291
     
    321343#if DEBUG_MAPPER_HANDLE_MISS
    322344cycle = (uint32_t)hal_get_cycles();
    323 if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    324 printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d",
    325 __FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle );
    326 if( DEBUG_MAPPER_HANDLE_MISS & 1 )
    327 grdxt_display( XPTR( local_cxy , &mapper->rt ) , name );
     345if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
     346{
     347    printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / ppn %x / cycle %d",
     348    __FUNCTION__, this->process->pid, this->trdid,
     349    page_id, name, ppm_page2ppn( *page_xp ), cycle );
     350    if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt) , name );
     351}
     352if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) )
     353{
     354    printk("\n[%s] thread[%x,%x] exit for page %d in FAT / ppn %x / cycle %d",
     355    __FUNCTION__, this->process->pid, this->trdid,
     356    page_id, ppm_page2ppn( *page_xp ), cycle );
     357    if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt ), "FAT" );
     358}
    328359#endif
    329360
     
    482513
    483514#if DEBUG_MAPPER_MOVE_KERNEL
    484 uint32_t   cycle = (uint32_t)hal_get_cycles();
    485 thread_t * this  = CURRENT_THREAD;
     515char          name[CONFIG_VFS_MAX_NAME_LENGTH];
     516uint32_t      cycle  = (uint32_t)hal_get_cycles();
     517thread_t    * this   = CURRENT_THREAD;
     518mapper_t    * mapper = GET_PTR( mapper_xp );
     519vfs_inode_t * inode  = hal_remote_lpt( XPTR( mapper_cxy , &mapper->inode ) );
     520vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
    486521if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    487 printk("\n[%s] thread[%x,%x] enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
    488 __FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );
     522printk("\n[%s] thread[%x,%x] enter / %d bytes / offset %d / mapper <%s> / cycle %d\n",
     523__FUNCTION__, this->process->pid, this->trdid, size, file_offset, name, cycle );
    489524#endif
    490525
     
    496531    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
    497532    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
    498 
    499 #if (DEBUG_MAPPER_MOVE_KERNEL & 1)
    500 if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    501 printk("\n[%s] first_page %d / last_page %d\n", __FUNCTION__, first, last );
    502 #endif
    503533
    504534    // compute source and destination clusters
     
    528558        else if ( page_id == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
    529559        else                         page_count = CONFIG_PPM_PAGE_SIZE;
    530 
    531 #if (DEBUG_MAPPER_MOVE_KERNEL & 1)
    532 if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    533 printk("\n[%s] page_id = %d / offset = %d / bytes = %d\n",
    534 __FUNCTION__ , page_id , page_offset , page_count );
    535 #endif
    536560
    537561        // get extended pointer on page descriptor
     
    560584#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
    561585if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    562 printk("\n[%s] src_cxy %x / src_ptr %x / dst_cxy %x / dst_ptr %x\n",
    563 __FUNCTION__, src_cxy, src_ptr, dst_cxy, dst_ptr );
     586{
     587    if( to_buffer )
     588    printk("\n[%s] mapper <%s> page %d => buffer(%x,%x) / %d bytes\n",
     589    __FUNCTION__, name, page_id, dst_cxy, dst_ptr, page_count );
     590    else
     591    printk("\n[%s] buffer(%x,%x) => mapper <%s> page %d / %d bytes\n",
     592    __FUNCTION__, src_cxy, src_ptr, name, page_id, page_count );
     593}
    564594#endif
    565595
     
    571601
    572602#if DEBUG_MAPPER_MOVE_KERNEL
    573 cycle = (uint32_t)hal_get_cycles();
     603cycle  = (uint32_t)hal_get_cycles();
    574604if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    575 printk("\n[%s] thread[%x,%x] exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
    576 __FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );
     605printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
     606__FUNCTION__, this->process->pid, this->trdid, cycle );
    577607#endif
    578608
     
    662692
    663693    // get pointer on radix tree
    664     rt        = &mapper->rt;
     694    rt = &mapper->rt;
    665695
    666696    // initialise loop variable
     
    675705        if( page == NULL ) break;
    676706
    677 assert( (page->index == found_key ), __FUNCTION__, "wrong page descriptor index" );
    678 assert( (page->order == 0),          __FUNCTION__, "mapper page order must be 0" );
     707assert( (page->index == found_key ), "wrong page descriptor index" );
     708assert( (page->order == 0),          "mapper page order must be 0" );
    679709
    680710        // build extended pointer on page descriptor
     
    730760    char          buffer[4096];   // local buffer
    731761    uint32_t    * tabi;           // pointer on uint32_t to scan buffer
    732     char        * tabc;           // pointer on char to scan buffer
    733762    uint32_t      line;           // line index
    734763    uint32_t      word;           // word index
    735     uint32_t      n;              // char index
    736764    cxy_t         mapper_cxy;     // mapper cluster identifier
    737765    mapper_t    * mapper_ptr;     // mapper local pointer
     
    776804    // display 8 words per line
    777805    tabi = (uint32_t *)buffer;
    778     tabc = (char *)buffer;
    779806    printk("\n***** <%s> first %d bytes of page %d *****\n", name, nbytes, page_id );
    780807    for( line = 0 ; line < (nbytes >> 5) ; line++ )
    781808    {
    782         printk("%X : ", line );
     809        printk("%X : ", line << 5 );
    783810        for( word = 0 ; word < 8 ; word++ ) printk("%X ", tabi[(line<<3) + word] );
    784         printk(" | ");
    785         for( n = 0 ; n < 32 ; n++ ) printk("%c", tabc[(line<<5) + n] );
    786811        printk("\n");
    787812    }
  • trunk/kernel/mm/mapper.h

    r623 r625  
    123123
    124124/*******************************************************************************************
    125  * This function move data between a remote mapper, dentified by the <mapper_xp> argument,
     125 * This function move data between a remote mapper, identified by the <mapper_xp> argument,
    126126 * and a distributed user buffer. It can be called by a thread running in any cluster.
    127127 * It is called by the vfs_user_move() to implement sys_read() and sys_write() syscalls.
     
    148148
    149149/********************************************************************************************
    150  * This function move data between a remote mapper and a remote kernel buffer.
    151  * It can be called by a thread running any cluster.
     150 * This function move data between a remote mapper, identified by the <mapper_xp> argument,
     151 * and a localised remote kernel buffer. It can be called by a thread running any cluster.
    152152 * If required, the data transfer is split in "fragments", where one fragment contains
    153153 * contiguous bytes in the same mapper page.
     
    215215/*******************************************************************************************
    216216 * This function allows to write a single word to a mapper seen as and array of uint32_t.
    217  * It has bee designed to support remote access tho the FAT mapper of the FATFS.
     217 * It has been designed to support remote access to the FAT mapper of the FATFS.
    218218 * It can be called by any thread running in any cluster.
    219219 * In case of miss, it takes the mapper lock in WRITE_MODE, load the missing
  • trunk/kernel/mm/page.h

    r623 r625  
    5050 *   test/modify the forks counter or the page flags.
    5151 * - The list entry is used to register the page in a free list or in dirty list.
    52  * NOTE: Size is 48 bytes for a 32 bits core.
    53  * TODO : the refcount use is not defined [AG]
     52 *   The refcount is used for page release to KMEM.
     53 * NOTE: the size is 48 bytes for a 32 bits core.
    5454 ************************************************************************************/
    5555
     
    6161    uint32_t          index;          /*! page index in mapper                 (4)  */
    6262        list_entry_t      list;           /*! for both dirty pages and free pages  (8)  */
    63         uint32_t          refcount;       /*! reference counter TODO ??? [AG]      (4)  */
     63        int32_t           refcount;       /*! references counter for page release  (4)  */
    6464        uint32_t          forks;          /*! number of pending forks              (4)  */
    6565        remote_busylock_t lock;           /*! protect forks or flags modifs        (16) */
  • trunk/kernel/mm/ppm.c

    r611 r625  
    349349}  // end ppm_free_pages()
    350350
    351 ///////////////////////////////
    352 void ppm_print( char * string )
     351////////////////////////
     352void ppm_display( void )
    353353{
    354354        uint32_t       order;
     
    361361        busylock_acquire( &ppm->free_lock );
    362362
    363         printk("\n***  PPM in cluster %x / %s / %d pages ***\n",
    364     local_cxy , string, ppm->pages_nr );
     363        printk("\n***** PPM in cluster %x / %d pages\n", local_cxy , ppm->pages_nr );
    365364
    366365        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
  • trunk/kernel/mm/ppm.h

    r623 r625  
    176176 * string   : character string printed in header
    177177 ****************************************************************************************/
    178 void ppm_print( char * string );
     178void ppm_display( void );
    179179
    180180/*****************************************************************************************
  • trunk/kernel/mm/vmm.c

    r624 r625  
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
    55 *           Mohamed Lamine Karaoui (2015)
    6  *           Alain Greiner (2016,2017,2018)
     6 *           Alain Greiner (2016,2017,2018,2019)
    77 *
    88 * Copyright (c) UPMC Sorbonne Universites
     
    5555extern  process_t  process_zero;      // allocated in cluster.c
    5656
    57 ///////////////////////////////////////
    58 error_t vmm_init( process_t * process )
     57////////////////////////////////////////////////////////////////////////////////////////////
     58// This static function is called by the vmm_create_vseg() function, and implements
     59// the VMM STACK specific allocator.
     60////////////////////////////////////////////////////////////////////////////////////////////
     61// @ vmm      : [in]  pointer on VMM.
     62// @ ltid     : [in]  requested slot == local user thread identifier.
     63// @ vpn_base : [out] first allocated page
     64// @ vpn_size : [out] number of allocated pages
     65////////////////////////////////////////////////////////////////////////////////////////////
     66static void vmm_stack_alloc( vmm_t  * vmm,
     67                             ltid_t   ltid,
     68                             vpn_t  * vpn_base,
     69                             vpn_t  * vpn_size )
    5970{
    60     error_t   error;
     71
     72// check ltid argument
     73assert( (ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
     74"slot index %d too large for an user stack vseg", ltid );
     75
     76    // get stack allocator pointer
     77    stack_mgr_t * mgr = &vmm->stack_mgr;
     78
     79    // get lock on stack allocator
     80    busylock_acquire( &mgr->lock );
     81
     82// check requested slot is available
     83assert( (bitmap_state( &mgr->bitmap , ltid ) == false),
     84"slot index %d already allocated", ltid );
     85
     86    // update bitmap
     87    bitmap_set( &mgr->bitmap , ltid );
     88
     89    // release lock on stack allocator
     90    busylock_release( &mgr->lock );
     91
     92    // returns vpn_base, vpn_size (first page non allocated)
     93    *vpn_base = mgr->vpn_base + ltid * CONFIG_VMM_STACK_SIZE + 1;
     94    *vpn_size = CONFIG_VMM_STACK_SIZE - 1;
     95
     96} // end vmm_stack_alloc()
     97
     98////////////////////////////////////////////////////////////////////////////////////////////
     99// This static function is called by the vmm_remove_vseg() function, and implements
     100// the VMM STACK specific desallocator.
     101////////////////////////////////////////////////////////////////////////////////////////////
     102// @ vmm      : [in] pointer on VMM.
     103// @ vseg     : [in] pointer on released vseg.
     104////////////////////////////////////////////////////////////////////////////////////////////
     105static void vmm_stack_free( vmm_t  * vmm,
     106                            vseg_t * vseg )
     107{
     108    // get stack allocator pointer
     109    stack_mgr_t * mgr = &vmm->stack_mgr;
     110
     111    // compute slot index
     112    uint32_t index = (vseg->vpn_base - 1 - mgr->vpn_base) / CONFIG_VMM_STACK_SIZE;
     113
     114// check index
     115assert( (index <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
     116"slot index %d too large for an user stack vseg", index );
     117
     118// check released slot is allocated
     119assert( (bitmap_state( &mgr->bitmap , index ) == true),
     120"released slot index %d non allocated", index );
     121
     122    // get lock on stack allocator
     123    busylock_acquire( &mgr->lock );
     124
     125    // update stacks_bitmap
     126    bitmap_clear( &mgr->bitmap , index );
     127
     128    // release lock on stack allocator
     129    busylock_release( &mgr->lock );
     130
     131}  // end vmm_stack_free()
     132
     133////////////////////////////////////////////////////////////////////////////////////////////
     134// This static function is called by the vmm_create_vseg() function, and implements
     135// the VMM MMAP specific allocator.
     136////////////////////////////////////////////////////////////////////////////////////////////
     137// @ vmm      : [in] pointer on VMM.
     138// @ npages   : [in] requested number of pages.
     139// @ vpn_base : [out] first allocated page.
     140// @ vpn_size : [out] actual number of allocated pages.
     141////////////////////////////////////////////////////////////////////////////////////////////
     142static error_t vmm_mmap_alloc( vmm_t * vmm,
     143                               vpn_t   npages,
     144                               vpn_t * vpn_base,
     145                               vpn_t * vpn_size )
     146{
     147    uint32_t   order;
     148    xptr_t     vseg_xp;
     149    vseg_t   * vseg;
     150    vpn_t      base;
     151    vpn_t      size;
     152    vpn_t      free;
     153
     154#if DEBUG_VMM_MMAP_ALLOC
     155thread_t * this = CURRENT_THREAD;
     156uint32_t cycle = (uint32_t)hal_get_cycles();
     157if( DEBUG_VMM_MMAP_ALLOC < cycle )
     158printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
     159__FUNCTION__, this->process->pid, this->trdid, cycle );
     160#endif
     161
     162    // number of allocated pages must be power of 2
     163    // compute actual size and order
     164    size  = POW2_ROUNDUP( npages );
     165    order = bits_log2( size );
     166
     167    // get mmap allocator pointer
     168    mmap_mgr_t * mgr = &vmm->mmap_mgr;
     169
     170    // build extended pointer on root of zombi_list[order]
     171    xptr_t root_xp = XPTR( local_cxy , &mgr->zombi_list[order] );
     172
     173    // take lock protecting zombi_lists
     174    busylock_acquire( &mgr->lock );
     175
     176    // get vseg from zombi_list or from mmap zone
     177    if( xlist_is_empty( root_xp ) )                   // from mmap zone
     178    {
     179        // check overflow
     180        free = mgr->first_free_vpn;
     181        if( (free + size) > mgr->vpn_size ) return -1;
     182
     183        // update MMAP allocator
     184        mgr->first_free_vpn += size;
     185
     186        // compute base
     187        base = free;
     188    }
     189    else                                              // from zombi_list
     190    {
     191        // get pointer on zombi vseg from zombi_list
     192        vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
     193        vseg    = GET_PTR( vseg_xp );
     194
     195        // remove vseg from free-list
     196        xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
     197
     198        // compute base
     199        base = vseg->vpn_base;
     200    }
     201
     202    // release lock
     203    busylock_release( &mgr->lock );
     204
     205#if DEBUG_VMM_MMAP_ALLOC
     206cycle = (uint32_t)hal_get_cycles();
     207if( DEBUG_VMM_DESTROY < cycle )
     208printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n",
     209__FUNCTION__, this->process->pid, this->trdid, base, size, cycle );
     210#endif
     211
     212    // returns vpn_base, vpn_size
     213    *vpn_base = base;
     214    *vpn_size = size;
     215    return 0;
     216
     217}  // end vmm_mmap_alloc()
     218
     219////////////////////////////////////////////////////////////////////////////////////////////
     220// This static function is called by the vmm_remove_vseg() function, and implements
     221// the VMM MMAP specific desallocator.
     222////////////////////////////////////////////////////////////////////////////////////////////
     223// @ vmm      : [in] pointer on VMM.
     224// @ vseg     : [in] pointer on released vseg.
     225////////////////////////////////////////////////////////////////////////////////////////////
     226static void vmm_mmap_free( vmm_t  * vmm,
     227                           vseg_t * vseg )
     228{
     229    // get pointer on mmap allocator
     230    mmap_mgr_t * mgr = &vmm->mmap_mgr;
     231
     232    // compute zombi_list order
     233    uint32_t order = bits_log2( vseg->vpn_size );
     234
     235    // take lock protecting zombi lists
     236    busylock_acquire( &mgr->lock );
     237
     238    // update relevant zombi_list
     239    xlist_add_first( XPTR( local_cxy , &mgr->zombi_list[order] ),
     240                     XPTR( local_cxy , &vseg->xlist ) );
     241
     242    // release lock
     243    busylock_release( &mgr->lock );
     244
     245}  // end of vmm_mmap_free()
     246
     247////////////////////////////////////////////////////////////////////////////////////////////
     248// This static function registers one vseg in the VSL of a local process descriptor.
     249////////////////////////////////////////////////////////////////////////////////////////////
     250// vmm       : [in] pointer on VMM.
     251// vseg      : [in] pointer on vseg.
     252////////////////////////////////////////////////////////////////////////////////////////////
     253void vmm_attach_vseg_to_vsl( vmm_t  * vmm,
     254                             vseg_t * vseg )
     255{
     256    // update vseg descriptor
     257    vseg->vmm = vmm;
     258
     259    // increment vsegs number
     260    vmm->vsegs_nr++;
     261
     262    // add vseg in vmm list
     263    xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ),
     264                    XPTR( local_cxy , &vseg->xlist ) );
     265
     266}  // end vmm_attach_vseg_from_vsl()
     267
     268////////////////////////////////////////////////////////////////////////////////////////////
     269// This static function removes one vseg from the VSL of a local process descriptor.
     270////////////////////////////////////////////////////////////////////////////////////////////
     271// vmm       : [in] pointer on VMM.
     272// vseg      : [in] pointer on vseg.
     273////////////////////////////////////////////////////////////////////////////////////////////
     274void vmm_detach_vseg_from_vsl( vmm_t  * vmm,
     275                               vseg_t * vseg )
     276{
     277    // update vseg descriptor
     278    vseg->vmm = NULL;
     279
     280    // decrement vsegs number
     281    vmm->vsegs_nr--;
     282
     283    // remove vseg from VSL
     284    xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
     285
     286}  // end vmm_detach_from_vsl()
     287
     288
     289
     290
     291////////////////////////////////////////////
     292error_t vmm_user_init( process_t * process )
     293{
    61294    vseg_t  * vseg_args;
    62295    vseg_t  * vseg_envs;
     
    65298    uint32_t  i;
    66299
    67 #if DEBUG_VMM_INIT
     300#if DEBUG_VMM_USER_INIT
    68301thread_t * this = CURRENT_THREAD;
    69302uint32_t cycle = (uint32_t)hal_get_cycles();
    70 if( DEBUG_VMM_INIT )
     303if( DEBUG_VMM_USER_INIT )
    71304printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
    72305__FUNCTION__ , this->process->pid, this->trdid, process->pid, local_cxy, cycle );
     
    76309    vmm_t   * vmm = &process->vmm;
    77310
    78     // initialize VSL (empty)
    79     vmm->vsegs_nr = 0;
    80         xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
    81         remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) , LOCK_VMM_VSL );
    82 
     311// check UTILS zone
    83312assert( ((CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) <=
    84313         (CONFIG_VMM_ELF_BASE - CONFIG_VMM_UTILS_BASE)) ,
    85314         "UTILS zone too small\n" );
    86315
     316// check STACK zone
    87317assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <=
    88318(CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) ,
    89319"STACK zone too small\n");
    90320
    91     // register args vseg in VSL
     321    // register "args" vseg in VSL
    92322    base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_SHIFT;
    93323    size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT;
     
    101331                                 XPTR_NULL,     // mapper_xp unused
    102332                                 local_cxy );
    103 
    104333    if( vseg_args == NULL )
    105334    {
     
    110339    vmm->args_vpn_base = base;
    111340
    112     // register the envs vseg in VSL
     341    // register "envs" vseg in VSL
    113342    base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_SHIFT;
    114343    size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT;
     
    122351                                 XPTR_NULL,     // mapper_xp unused
    123352                                 local_cxy );
    124 
    125353    if( vseg_envs == NULL )
    126354    {
     
    130358
    131359    vmm->envs_vpn_base = base;
    132 
    133     // create GPT (empty)
    134     error = hal_gpt_create( &vmm->gpt );
    135 
    136     if( error )
    137     {
    138         printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );
    139         return -1;
    140     }
    141 
    142     // initialize GPT lock
    143     remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );
    144 
    145     // update process VMM with kernel vsegs as required by the hardware architecture
    146     error = hal_vmm_kernel_update( process );
    147 
    148     if( error )
    149     {
    150         printk("\n[ERROR] in %s : cannot update GPT for kernel vsegs\n", __FUNCTION__ );
    151         return -1;
    152     }
    153360
    154361    // initialize STACK allocator
     
    162369    vmm->mmap_mgr.first_free_vpn  = CONFIG_VMM_HEAP_BASE;
    163370    busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP );
    164     for( i = 0 ; i < 32 ; i++ ) list_root_init( &vmm->mmap_mgr.zombi_list[i] );
     371    for( i = 0 ; i < 32 ; i++ )
     372    {
     373        xlist_root_init( XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] ) );
     374    }
    165375
    166376    // initialize instrumentation counters
     
    169379    hal_fence();
    170380
    171 #if DEBUG_VMM_INIT
     381#if DEBUG_VMM_USER_INIT
    172382cycle = (uint32_t)hal_get_cycles();
    173 if( DEBUG_VMM_INIT )
     383if( DEBUG_VMM_USER_INIT )
    174384printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
    175385__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
     
    178388    return 0;
    179389
    180 }  // end vmm_init()
    181 
     390}  // end vmm_user_init()
    182391
    183392//////////////////////////////////////////
    184 void vmm_attach_vseg_to_vsl( vmm_t  * vmm,
    185                              vseg_t * vseg )
     393void vmm_user_reset( process_t * process )
    186394{
    187     // build extended pointer on rwlock protecting VSL
    188     xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
    189 
    190     // get rwlock in write mode
    191     remote_rwlock_wr_acquire( lock_xp );
    192 
    193     // update vseg descriptor
    194     vseg->vmm = vmm;
    195 
    196     // increment vsegs number
    197     vmm->vsegs_nr++;
    198 
    199     // add vseg in vmm list
    200     xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ),
    201                     XPTR( local_cxy , &vseg->xlist ) );
    202 
    203     // release rwlock in write mode
    204     remote_rwlock_wr_release( lock_xp );
    205 }
    206 
    207 ////////////////////////////////////////////
    208 void vmm_detach_vseg_from_vsl( vmm_t  * vmm,
    209                                vseg_t * vseg )
    210 {
    211     // get vseg type
    212     uint32_t type = vseg->type;
    213 
    214     // build extended pointer on rwlock protecting VSL
    215     xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
    216 
    217     // get rwlock in write mode
    218     remote_rwlock_wr_acquire( lock_xp );
    219 
    220     // update vseg descriptor
    221     vseg->vmm = NULL;
    222 
    223     // remove vseg from VSL
    224     xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
    225 
    226     // release rwlock in write mode
    227     remote_rwlock_wr_release( lock_xp );
    228 
    229     // release the stack slot to VMM stack allocator if STACK type
    230     if( type == VSEG_TYPE_STACK )
    231     {
    232         // get pointer on stack allocator
    233         stack_mgr_t * mgr = &vmm->stack_mgr;
    234 
    235         // compute slot index
    236         uint32_t index = ((vseg->vpn_base - mgr->vpn_base - 1) / CONFIG_VMM_STACK_SIZE);
    237 
    238         // update stacks_bitmap
    239         busylock_acquire( &mgr->lock );
    240         bitmap_clear( &mgr->bitmap , index );
    241         busylock_release( &mgr->lock );
    242     }
    243 
    244     // release the vseg to VMM mmap allocator if MMAP type
    245     if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_FILE) || (type == VSEG_TYPE_REMOTE) )
    246     {
    247         // get pointer on mmap allocator
    248         mmap_mgr_t * mgr = &vmm->mmap_mgr;
    249 
    250         // compute zombi_list index
    251         uint32_t index = bits_log2( vseg->vpn_size );
    252 
    253         // update zombi_list
    254         busylock_acquire( &mgr->lock );
    255         list_add_first( &mgr->zombi_list[index] , &vseg->zlist );
    256         busylock_release( &mgr->lock );
    257     }
    258 
    259     // release physical memory allocated for vseg if no MMAP and no kernel type
    260     if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) &&
    261         (type != VSEG_TYPE_KCODE) && (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) )
    262     {
    263         vseg_free( vseg );
    264     }
    265 
    266 }  // end vmm_remove_vseg_from_vsl()
     395    xptr_t       vseg_xp;
     396        vseg_t     * vseg;
     397    vseg_type_t  vseg_type;
     398
     399#if DEBUG_VMM_USER_RESET
     400uint32_t cycle = (uint32_t)hal_get_cycles();
     401thread_t * this = CURRENT_THREAD;
     402if( DEBUG_VMM_USER_RESET < cycle )
     403printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
     404__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
     405#endif
     406
     407#if (DEBUG_VMM_USER_RESET & 1 )
     408if( DEBUG_VMM_USER_RESET < cycle )
     409hal_vmm_display( process , true );
     410#endif
     411
     412    // get pointer on local VMM
     413    vmm_t * vmm = &process->vmm;
     414
     415    // build extended pointer on VSL root and VSL lock
     416    xptr_t   root_xp = XPTR( local_cxy , &vmm->vsegs_root );
     417    xptr_t   lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
     418
     419    // take the VSL lock
     420        remote_rwlock_wr_acquire( lock_xp );
     421
     422    // scan the VSL to delete all non kernel vsegs
     423    // (we don't use a FOREACH in case of item deletion)
     424    xptr_t   iter_xp;
     425    xptr_t   next_xp;
     426        for( iter_xp = hal_remote_l64( root_xp ) ;
     427         iter_xp != root_xp ;
     428         iter_xp = next_xp )
     429        {
     430        // save extended pointer on next item in xlist
     431        next_xp = hal_remote_l64( iter_xp );
     432
     433        // get pointers on current vseg in VSL
     434        vseg_xp   = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
     435        vseg      = GET_PTR( vseg_xp );
     436        vseg_type = vseg->type;
     437
     438#if( DEBUG_VMM_USER_RESET & 1 )
     439if( DEBUG_VMM_USER_RESET < cycle )
     440printk("\n[%s] found %s vseg / vpn_base %x / vpn_size %d\n",
     441__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
     442#endif
     443        // delete non kernel vseg 
     444        if( (vseg_type != VSEG_TYPE_KCODE) &&
     445            (vseg_type != VSEG_TYPE_KDATA) &&
     446            (vseg_type != VSEG_TYPE_KDEV ) )
     447        {
     448            // remove vseg from VSL
     449            vmm_remove_vseg( process , vseg );
     450
     451#if( DEBUG_VMM_USER_RESET & 1 )
     452if( DEBUG_VMM_USER_RESET < cycle )
     453printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n",
     454__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
     455#endif
     456        }
     457        else
     458        {
     459
     460#if( DEBUG_VMM_USER_RESET & 1 )
     461if( DEBUG_VMM_USER_RESET < cycle )
     462printk("\n[%s] keep %s vseg / vpn_base %x / vpn_size %d\n",
     463__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
     464#endif
     465        }
     466        }  // end loop on vsegs in VSL
     467
     468    // release the VSL lock
     469        remote_rwlock_wr_release( lock_xp );
     470
     471// FIXME il faut gérer les process copies...
     472
     473#if DEBUG_VMM_USER_RESET
     474cycle = (uint32_t)hal_get_cycles();
     475if( DEBUG_VMM_USER_RESET < cycle )
     476printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
     477__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
     478#endif
     479
     480}  // end vmm_user_reset()
    267481
    268482////////////////////////////////////////////////
     
    507721    cxy_t       page_cxy;
    508722    xptr_t      forks_xp;       // extended pointer on forks counter in page descriptor
    509     xptr_t      lock_xp;        // extended pointer on lock protecting the forks counter
    510723    xptr_t      parent_root_xp;
    511724    bool_t      mapped;
     
    528741    child_vmm  = &child_process->vmm;
    529742
    530     // get extended pointer on lock protecting the parent VSL
    531     parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsegs_lock );
    532 
    533     // initialize the lock protecting the child VSL
    534     remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsegs_lock ), LOCK_VMM_STACK );
     743    // initialize the locks protecting the child VSL and GPT
     744    remote_rwlock_init( XPTR( local_cxy , &child_vmm->gpt_lock ) , LOCK_VMM_GPT );
     745        remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsl_lock ) , LOCK_VMM_VSL );
    535746
    536747    // initialize the child VSL as empty
     
    538749    child_vmm->vsegs_nr = 0;
    539750
    540     // create the child GPT
     751    // create an empty child GPT
    541752    error = hal_gpt_create( &child_vmm->gpt );
    542 
    543753    if( error )
    544754    {
     
    547757    }
    548758
    549     // build extended pointer on parent VSL
     759    // build extended pointer on parent VSL root and lock
    550760    parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root );
     761    parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsl_lock );
    551762
    552763    // take the lock protecting the parent VSL in read mode
     
    556767    XLIST_FOREACH( parent_root_xp , iter_xp )
    557768    {
    558         // get local and extended pointers on current parent vseg
     769        // get pointers on current parent vseg
    559770        parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    560771        parent_vseg    = GET_PTR( parent_vseg_xp );
     
    587798            vseg_init_from_ref( child_vseg , parent_vseg_xp );
    588799
     800            // build extended pointer on VSL lock
     801            xptr_t lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock );
     802 
     803            // take the VSL lock in write mode
     804            remote_rwlock_wr_acquire( lock_xp );
     805
    589806            // register child vseg in child VSL
    590807            vmm_attach_vseg_to_vsl( child_vmm , child_vseg );
     808
     809            // release the VSL lock
     810            remote_rwlock_wr_release( lock_xp );
    591811
    592812#if DEBUG_VMM_FORK_COPY
     
    597817hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
    598818#endif
    599 
    600             // copy DATA, MMAP, REMOTE, FILE parent GPT entries to child GPT
     819            // copy DATA, ANON, REMOTE, FILE parent GPT entries to child GPT
    601820            if( type != VSEG_TYPE_CODE )
    602821            {
    603                 // activate the COW for DATA, MMAP, REMOTE vsegs only
     822                // activate the COW for DATA, ANON, REMOTE vsegs only
    604823                cow = ( type != VSEG_TYPE_FILE );
    605824
     
    611830                {
    612831                    error = hal_gpt_pte_copy( &child_vmm->gpt,
     832                                              vpn,
    613833                                              XPTR( parent_cxy , &parent_vmm->gpt ),
    614834                                              vpn,
     
    677897    child_vmm->mmap_mgr.vpn_size        = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE;
    678898    child_vmm->mmap_mgr.first_free_vpn  = CONFIG_VMM_HEAP_BASE;
    679     for( i = 0 ; i < 32 ; i++ ) list_root_init( &child_vmm->mmap_mgr.zombi_list[i] );
     899    for( i = 0 ; i < 32 ; i++ )
     900    {
     901        xlist_root_init( XPTR( local_cxy , &child_vmm->mmap_mgr.zombi_list[i] ) );
     902    }
    680903
    681904    // initialize instrumentation counters
     
    726949    vmm_t  * vmm = &process->vmm;
    727950
    728     // get extended pointer on VSL root and VSL lock
    729     xptr_t   root_xp = XPTR( local_cxy , &vmm->vsegs_root );
     951    // build extended pointer on VSL root, VSL lock and GPT lock
     952    xptr_t   vsl_root_xp = XPTR( local_cxy , &vmm->vsegs_root );
     953    xptr_t   vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
     954    xptr_t   gpt_lock_xp = XPTR( local_cxy , &vmm->gpt_lock );
     955
     956    // take the VSL lock
     957    remote_rwlock_wr_acquire( vsl_lock_xp );
    730958
    731959    // scan the VSL to delete all registered vsegs
    732     // (don't use a FOREACH for item deletion in xlist)
    733 
    734         while( !xlist_is_empty( root_xp ) )
     960    // (we don't use a FOREACH in case of item deletion)
     961    xptr_t  iter_xp;
     962    xptr_t  next_xp;
     963        for( iter_xp = hal_remote_l64( vsl_root_xp ) ;
     964         iter_xp != vsl_root_xp ;
     965         iter_xp = next_xp )
    735966        {
    736         // get pointer on first vseg in VSL
    737                 vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
    738         vseg    = GET_PTR( vseg_xp );
     967        // save extended pointer on next item in xlist
     968        next_xp = hal_remote_l64( iter_xp );
     969
     970        // get pointers on current vseg in VSL
     971        vseg_xp   = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
     972        vseg      = GET_PTR( vseg_xp );
    739973
    740974        // delete vseg and release physical pages
    741         vmm_delete_vseg( process->pid , vseg->min );
     975        vmm_remove_vseg( process , vseg );
    742976
    743977#if( DEBUG_VMM_DESTROY & 1 )
     
    749983        }
    750984
    751     // remove all vsegs from zombi_lists in MMAP allocator
     985    // release the VSL lock
     986    remote_rwlock_wr_release( vsl_lock_xp );
     987
     988    // remove all registered MMAP vsegs
     989    // from zombi_lists in MMAP allocator
    752990    uint32_t i;
    753991    for( i = 0 ; i<32 ; i++ )
    754992    {
    755             while( !list_is_empty( &vmm->mmap_mgr.zombi_list[i] ) )
     993        // build extended pointer on zombi_list[i]
     994        xptr_t root_xp = XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] );
     995 
     996        // scan zombi_list[i]
     997            while( !xlist_is_empty( root_xp ) )
    756998            {
    757                     vseg = LIST_FIRST( &vmm->mmap_mgr.zombi_list[i] , vseg_t , zlist );
     999                    vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
     1000            vseg    = GET_PTR( vseg_xp );
    7581001
    7591002#if( DEBUG_VMM_DESTROY & 1 )
     
    7651008            vseg->vmm = NULL;
    7661009
    767             // remove vseg from  xlist
     1010            // remove vseg from  zombi_list
    7681011            xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
    7691012
     
    7791022    }
    7801023
     1024    // take the GPT lock
     1025    remote_rwlock_wr_acquire( gpt_lock_xp );
     1026
    7811027    // release memory allocated to the GPT itself
    7821028    hal_gpt_destroy( &vmm->gpt );
     1029
     1030    // release the GPT lock
     1031    remote_rwlock_wr_release( gpt_lock_xp );
    7831032
    7841033#if DEBUG_VMM_DESTROY
     
    8161065}  // end vmm_check_conflict()
    8171066
    818 ////////////////////////////////////////////////////////////////////////////////////////////
    819 // This static function is called by the vmm_create_vseg() function, and implements
    820 // the VMM stack_vseg specific allocator.
    821 ////////////////////////////////////////////////////////////////////////////////////////////
    822 // @ vmm      : pointer on VMM.
    823 // @ vpn_base : (return value) first allocated page
    824 // @ vpn_size : (return value) number of allocated pages
    825 ////////////////////////////////////////////////////////////////////////////////////////////
    826 static error_t vmm_stack_alloc( vmm_t * vmm,
    827                                 vpn_t * vpn_base,
    828                                 vpn_t * vpn_size )
    829 {
    830     // get stack allocator pointer
    831     stack_mgr_t * mgr = &vmm->stack_mgr;
    832 
    833     // get lock on stack allocator
    834     busylock_acquire( &mgr->lock );
    835 
    836     // get first free slot index in bitmap
    837     int32_t index = bitmap_ffc( &mgr->bitmap , 4 );
    838     if( (index < 0) || (index > 31) )
    839     {
    840         busylock_release( &mgr->lock );
    841         return 0xFFFFFFFF;
    842     }
    843 
    844     // update bitmap
    845     bitmap_set( &mgr->bitmap , index );
    846 
    847     // release lock on stack allocator
    848     busylock_release( &mgr->lock );
    849 
    850     // returns vpn_base, vpn_size (one page non allocated)
    851     *vpn_base = mgr->vpn_base + index * CONFIG_VMM_STACK_SIZE + 1;
    852     *vpn_size = CONFIG_VMM_STACK_SIZE - 1;
    853     return 0;
    854 
    855 } // end vmm_stack_alloc()
    856 
    857 ////////////////////////////////////////////////////////////////////////////////////////////
    858 // This static function is called by the vmm_create_vseg() function, and implements
    859 // the VMM MMAP specific allocator.
    860 ////////////////////////////////////////////////////////////////////////////////////////////
    861 // @ vmm      : [in] pointer on VMM.
    862 // @ npages   : [in] requested number of pages.
    863 // @ vpn_base : [out] first allocated page.
    864 // @ vpn_size : [out] actual number of allocated pages.
    865 ////////////////////////////////////////////////////////////////////////////////////////////
    866 static error_t vmm_mmap_alloc( vmm_t * vmm,
    867                                vpn_t   npages,
    868                                vpn_t * vpn_base,
    869                                vpn_t * vpn_size )
    870 {
    871     uint32_t   index;
    872     vseg_t   * vseg;
    873     vpn_t      base;
    874     vpn_t      size;
    875     vpn_t      free;
    876 
    877 #if DEBUG_VMM_MMAP_ALLOC
    878 thread_t * this = CURRENT_THREAD;
    879 uint32_t cycle = (uint32_t)hal_get_cycles();
    880 if( DEBUG_VMM_MMAP_ALLOC < cycle )
    881 printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
    882 __FUNCTION__, this->process->pid, this->trdid, cycle );
    883 #endif
    884 
    885     // vseg size must be power of 2
    886     // compute actual size and index in zombi_list array
    887     size  = POW2_ROUNDUP( npages );
    888     index = bits_log2( size );
    889 
    890     // get mmap allocator pointer
    891     mmap_mgr_t * mgr = &vmm->mmap_mgr;
    892 
    893     // get lock on mmap allocator
    894     busylock_acquire( &mgr->lock );
    895 
    896     // get vseg from zombi_list or from mmap zone
    897     if( list_is_empty( &mgr->zombi_list[index] ) )     // from mmap zone
    898     {
    899         // check overflow
    900         free = mgr->first_free_vpn;
    901         if( (free + size) > mgr->vpn_size ) return -1;
    902 
    903         // update MMAP allocator
    904         mgr->first_free_vpn += size;
    905 
    906         // compute base
    907         base = free;
    908     }
    909     else                                             // from zombi_list
    910     {
    911         // get pointer on zombi vseg from zombi_list
    912         vseg = LIST_FIRST( &mgr->zombi_list[index] , vseg_t , zlist );
    913 
    914         // remove vseg from free-list
    915         list_unlink( &vseg->zlist );
    916 
    917         // compute base
    918         base = vseg->vpn_base;
    919     }
    920 
    921     // release lock on mmap allocator
    922     busylock_release( &mgr->lock );
    923 
    924 #if DEBUG_VMM_MMAP_ALLOC
    925 cycle = (uint32_t)hal_get_cycles();
    926 if( DEBUG_VMM_DESTROY < cycle )
    927 printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n",
    928 __FUNCTION__, this->process->pid, this->trdid, base, size, cycle );
    929 #endif
    930 
    931     // returns vpn_base, vpn_size
    932     *vpn_base = base;
    933     *vpn_size = size;
    934     return 0;
    935 
    936 }  // end vmm_mmap_alloc()
     1067
    9371068
    9381069////////////////////////////////////////////////
     
    9681099    {
    9691100        // get vpn_base and vpn_size from STACK allocator
    970         error = vmm_stack_alloc( vmm , &vpn_base , &vpn_size );
    971         if( error )
    972         {
    973             printk("\n[ERROR] in %s : no space for stack vseg / process %x in cluster %x\n",
    974             __FUNCTION__ , process->pid , local_cxy );
    975             return NULL;
    976         }
     1101        vmm_stack_alloc( vmm , base , &vpn_base , &vpn_size );
    9771102
    9781103        // compute vseg base and size from vpn_base and vpn_size
     
    10721197               cxy );
    10731198
     1199    // build extended pointer on VSL lock
     1200    xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
     1201 
     1202    // take the VSL lock in write mode
     1203    remote_rwlock_wr_acquire( lock_xp );
     1204
    10741205    // attach vseg to VSL
    10751206        vmm_attach_vseg_to_vsl( vmm , vseg );
     1207
     1208    // release the VSL lock
     1209    remote_rwlock_wr_release( lock_xp );
    10761210
    10771211#if DEBUG_VMM_CREATE_VSEG
     
    10861220}  // vmm_create_vseg()
    10871221
    1088 ///////////////////////////////////
    1089 void vmm_delete_vseg( pid_t    pid,
    1090                       intptr_t vaddr )
     1222
     1223//////////////////////////////////////////
     1224void vmm_remove_vseg( process_t * process,
     1225                      vseg_t    * vseg )
    10911226{
    1092     process_t * process;    // local pointer on local process
    1093     vmm_t     * vmm;        // local pointer on local process VMM
    1094     vseg_t    * vseg;       // local pointer on local vseg containing vaddr
    1095     gpt_t     * gpt;        // local pointer on local process GPT
     1227    vmm_t     * vmm;        // local pointer on process VMM
     1228    bool_t      is_ref;     // local process is reference process
     1229    uint32_t    vseg_type;  // vseg type
    10961230    vpn_t       vpn;        // VPN of current PTE
    10971231    vpn_t       vpn_min;    // VPN of first PTE
     
    11031237    cxy_t       page_cxy;   // page descriptor cluster
    11041238    page_t    * page_ptr;   // page descriptor pointer
    1105     xptr_t      forks_xp;   // extended pointer on pending forks counter
    1106     xptr_t      lock_xp;    // extended pointer on lock protecting forks counter
    1107     uint32_t    forks;      // actual number of pendinf forks
    1108     uint32_t    vseg_type;  // vseg type
    1109 
    1110 #if DEBUG_VMM_DELETE_VSEG
    1111 uint32_t   cycle = (uint32_t)hal_get_cycles();
    1112 thread_t * this  = CURRENT_THREAD;
    1113 if( DEBUG_VMM_DELETE_VSEG < cycle )
    1114 printk("\n[%s] thread[%x,%x] enter / process %x / vaddr %x / cycle %d\n",
    1115 __FUNCTION__, this->process->pid, this->trdid, pid, vaddr, cycle );
    1116 #endif
    1117 
    1118     // get local pointer on local process descriptor
    1119     process = cluster_get_local_process_from_pid( pid );
    1120 
    1121     if( process == NULL )
    1122     {
    1123         printk("\n[ERRORR] in %s : cannot get local process descriptor\n",
    1124         __FUNCTION__ );
    1125         return;
    1126     }
    1127 
    1128     // get pointers on local process VMM an GPT
     1239    xptr_t      count_xp;   // extended pointer on page refcount
     1240    uint32_t    count;      // current value of page refcount
     1241
     1242// check arguments
     1243assert( (process != NULL), "process argument is NULL" );
     1244assert( (vseg    != NULL), "vseg argument is NULL" );
     1245
     1246    // compute is_ref
     1247    is_ref = (GET_CXY( process->ref_xp ) == local_cxy);
     1248
     1249    // get pointers on local process VMM
    11291250    vmm = &process->vmm;
    1130     gpt = &process->vmm.gpt;
    1131 
    1132     // get local pointer on vseg containing vaddr
    1133     vseg = vmm_vseg_from_vaddr( vmm , vaddr );
    1134 
    1135     if( vseg == NULL )
    1136     {
    1137         printk("\n[ERRORR] in %s : cannot get vseg descriptor\n",
    1138         __FUNCTION__ );
    1139         return;
    1140     }
    11411251
    11421252    // get relevant vseg infos
     
    11451255    vpn_max   = vpn_min + vseg->vpn_size;
    11461256
    1147     // loop to invalidate all vseg PTEs in GPT
     1257#if DEBUG_VMM_REMOVE_VSEG
     1258uint32_t   cycle = (uint32_t)hal_get_cycles();
     1259thread_t * this  = CURRENT_THREAD;
     1260if( DEBUG_VMM_REMOVE_VSEG < cycle )
     1261printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n",
     1262__FUNCTION__, this->process->pid, this->trdid,
     1263process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
     1264#endif
     1265
     1266    // loop on PTEs in GPT
    11481267        for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
    11491268    {
    1150         // get ppn and attr from GPT entry
    1151         hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn );
    1152 
    1153         if( attr & GPT_MAPPED )  // entry is mapped
     1269        // get ppn and attr
     1270        hal_gpt_get_pte( XPTR( local_cxy , &vmm->gpt ) , vpn , &attr , &ppn );
     1271
     1272        if( attr & GPT_MAPPED )  // PTE is mapped
    11541273        {
    11551274
    1156 #if( DEBUG_VMM_DELETE_VSEG & 1 )
    1157 if( DEBUG_VMM_DELETE_VSEG < cycle )
    1158 printk("- unmap vpn %x / ppn %x / vseg %s \n" , vpn , ppn, vseg_type_str(vseg->type) );
     1275#if( DEBUG_VMM_REMOVE_VSEG & 1 )
     1276if( DEBUG_VMM_REMOVE_VSEG < cycle )
     1277printk("- unmap vpn %x / ppn %x / %s" , vpn , ppn, vseg_type_str(vseg_type) );
    11591278#endif
    11601279            // unmap GPT entry in local GPT
    1161             hal_gpt_reset_pte( gpt , vpn );
    1162 
    1163             // the allocated page is not released to for kernel vseg
    1164             if( (vseg_type != VSEG_TYPE_KCODE) &&
    1165                 (vseg_type != VSEG_TYPE_KDATA) &&
    1166                 (vseg_type != VSEG_TYPE_KDEV ) )
     1280            hal_gpt_reset_pte( &vmm->gpt , vpn );
     1281
     1282            // get pointers on physical page descriptor
     1283            page_xp  = ppm_ppn2page( ppn );
     1284            page_cxy = GET_CXY( page_xp );
     1285            page_ptr = GET_PTR( page_xp );
     1286
     1287            // decrement page refcount
     1288            count_xp = XPTR( page_cxy , &page_ptr->refcount );
     1289            count    = hal_remote_atomic_add( count_xp , -1 );
     1290
     1291            // compute the ppn_release condition depending on vseg type
     1292            bool_t ppn_release;
     1293            if( (vseg_type == VSEG_TYPE_FILE)  ||
     1294                (vseg_type == VSEG_TYPE_KCODE) ||
     1295                (vseg_type == VSEG_TYPE_KDATA) ||
     1296                (vseg_type == VSEG_TYPE_KDEV) )           
    11671297            {
    1168                 // get extended pointer on physical page descriptor
    1169                 page_xp  = ppm_ppn2page( ppn );
    1170                 page_cxy = GET_CXY( page_xp );
    1171                 page_ptr = GET_PTR( page_xp );
    1172 
    1173 // FIXME This code must be re-written, as the actual release depends on vseg type,
    1174 // the reference cluster, the page refcount and/or the forks counter...
    1175 
    1176                 // get extended pointers on forks and lock fields
    1177                 forks_xp = XPTR( page_cxy , &page_ptr->forks );
    1178                 lock_xp  = XPTR( page_cxy , &page_ptr->lock );
    1179 
    1180                 // get the lock protecting the page
     1298                // no physical page release for FILE and KERNEL
     1299                ppn_release = false;
     1300            }
     1301            else if( (vseg_type == VSEG_TYPE_CODE)  ||
     1302                     (vseg_type == VSEG_TYPE_STACK) )
     1303            {
     1304                // always release physical page for private vsegs
     1305                ppn_release = true;
     1306            }
     1307            else if( (vseg_type == VSEG_TYPE_ANON)  ||
     1308                     (vseg_type == VSEG_TYPE_REMOTE) )
     1309            {
     1310                // release physical page if reference cluster
     1311                ppn_release = is_ref;
     1312            }
     1313            else if( is_ref )  // vseg_type == DATA in reference cluster
     1314            {
     1315                // get extended pointers on forks and lock field in page descriptor
     1316                xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks );
     1317                xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
     1318
     1319                // take lock protecting "forks" counter
    11811320                remote_busylock_acquire( lock_xp );
    11821321
    1183                 // get pending forks counter
    1184                 forks = hal_remote_l32( forks_xp );
    1185 
    1186                 if( forks )  // decrement pending forks counter
     1322                // get number of pending forks from page descriptor
     1323                uint32_t forks = hal_remote_l32( forks_xp );
     1324
     1325                // decrement pending forks counter if required
     1326                if( forks )  hal_remote_atomic_add( forks_xp , -1 );
     1327
     1328                // release lock protecting "forks" counter
     1329                remote_busylock_release( lock_xp );
     1330
     1331                // release physical page if forks == 0
     1332                ppn_release = (forks == 0);
     1333            }
     1334            else              // vseg_type == DATA not in reference cluster
     1335            {
     1336                // no physical page release if not in reference cluster
     1337                ppn_release = false;
     1338            }
     1339
     1340            // release physical page to relevant kmem when required
     1341            if( ppn_release )
     1342            {
     1343                if( page_cxy == local_cxy )
    11871344                {
    1188                     // update forks counter
    1189                     hal_remote_atomic_add( forks_xp , -1 );
    1190 
    1191                     // release the lock protecting the page
    1192                     remote_busylock_release( lock_xp );
    1193                 } 
    1194                 else         // release physical page to relevant cluster
     1345                    req.type = KMEM_PAGE;
     1346                    req.ptr  = page_ptr;
     1347                    kmem_free( &req );
     1348                }
     1349                else
    11951350                {
    1196                     // release the lock protecting the page
    1197                     remote_busylock_release( lock_xp );
    1198 
    1199                     // release the page to kmem
    1200                     if( page_cxy == local_cxy )   // local cluster
    1201                     {
    1202                         req.type = KMEM_PAGE;
    1203                         req.ptr  = page_ptr;
    1204                         kmem_free( &req );
    1205                     }
    1206                     else                          // remote cluster
    1207                     {
    1208                         rpc_pmem_release_pages_client( page_cxy , page_ptr );
    1209                     }
    1210 
    1211 #if( DEBUG_VMM_DELETE_VSEG & 1 )
    1212 if( DEBUG_VMM_DELETE_VSEG < cycle )
    1213 printk("- release ppn %x\n", ppn );
    1214 #endif
     1351                    rpc_pmem_release_pages_client( page_cxy , page_ptr );
    12151352                }
    1216 
    12171353            }
     1354
     1355#if( DEBUG_VMM_REMOVE_VSEG & 1 )
     1356if( DEBUG_VMM_REMOVE_VSEG < cycle )
     1357{
     1358    if( ppn_release ) printk(" / released to kmem\n" );
     1359    else              printk("\n");
     1360}
     1361#endif
    12181362        }
    12191363    }
    12201364
    1221     // remove vseg from VSL and release vseg descriptor (if not MMAP)
     1365    // remove vseg from VSL
    12221366    vmm_detach_vseg_from_vsl( vmm , vseg );
    12231367
    1224 #if DEBUG_VMM_DELETE_VSEG
     1368    // release vseg descriptor depending on vseg type
     1369    if( vseg_type == VSEG_TYPE_STACK )
     1370    {
     1371        // release slot to local stack allocator
     1372        vmm_stack_free( vmm , vseg );
     1373
     1374        // release vseg descriptor to local kmem
     1375        vseg_free( vseg );
     1376    }
     1377    else if( (vseg_type == VSEG_TYPE_ANON) ||
     1378             (vseg_type == VSEG_TYPE_FILE) ||
     1379             (vseg_type == VSEG_TYPE_REMOTE) ) 
     1380    {
     1381        // release vseg to local mmap allocator
     1382        vmm_mmap_free( vmm , vseg );
     1383    }
     1384    else
     1385    {
     1386        // release vseg descriptor to local kmem
     1387        vseg_free( vseg );
     1388    }
     1389
     1390#if DEBUG_VMM_REMOVE_VSEG
    12251391cycle = (uint32_t)hal_get_cycles();
    1226 if( DEBUG_VMM_DELETE_VSEG < cycle )
    1227 printk("\n[%s] thread[%x,%x] exit / process %x / vseg %s / base %x / cycle %d\n",
    1228 __FUNCTION__, this->process->pid, this->trdid, pid, vseg_type_str(vseg->type), vaddr, cycle );
    1229 #endif
    1230 
    1231 }  // end vmm_delete_vseg()
     1392if( DEBUG_VMM_REMOVE_VSEG < cycle )
     1393printk("[%s] thread[%x,%x] exit / process %x / %s / base %x / cycle %d\n",
     1394__FUNCTION__, this->process->pid, this->trdid,
     1395process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
     1396#endif
     1397
     1398}  // end vmm_remove_vseg()
     1399
     1400
     1401///////////////////////////////////
     1402void vmm_delete_vseg( pid_t    pid,
     1403                      intptr_t vaddr )
     1404{
     1405    process_t * process;    // local pointer on local process
     1406    vseg_t    * vseg;       // local pointer on local vseg containing vaddr
     1407
     1408    // get local pointer on local process descriptor
     1409    process = cluster_get_local_process_from_pid( pid );
     1410
     1411    if( process == NULL )
     1412    {
     1413        printk("\n[WARNING] in %s : cannot get local process descriptor\n",
     1414        __FUNCTION__ );
     1415        return;
     1416    }
     1417
     1418    // get local pointer on local vseg containing vaddr
     1419    vseg = vmm_vseg_from_vaddr( &process->vmm , vaddr );
     1420
     1421    if( vseg == NULL )
     1422    {
     1423        printk("\n[WARNING] in %s : cannot get vseg descriptor\n",
     1424        __FUNCTION__ );
     1425        return;
     1426    }
     1427
     1428    // call relevant function
     1429    vmm_remove_vseg( process , vseg );
     1430
     1431}  // end vmm_delete_vseg
     1432
    12321433
    12331434/////////////////////////////////////////////
     
    12351436                              intptr_t   vaddr )
    12361437{
    1237     xptr_t   iter_xp;
    12381438    xptr_t   vseg_xp;
    12391439    vseg_t * vseg;
     1440    xptr_t   iter_xp;
    12401441
    12411442    // get extended pointers on VSL lock and root
    1242     xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
     1443    xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
    12431444    xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root );
    12441445
     
    12491450    XLIST_FOREACH( root_xp , iter_xp )
    12501451    {
     1452        // get pointers on vseg
    12511453        vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    12521454        vseg    = GET_PTR( vseg_xp );
    12531455
     1456        // return success when match
    12541457        if( (vaddr >= vseg->min) && (vaddr < vseg->max) )
    12551458        {
     
    12621465    // return failure
    12631466    remote_rwlock_rd_release( lock_xp );
    1264 
    12651467    return NULL;
    12661468
     
    14621664        vseg_init_from_ref( vseg , vseg_xp );
    14631665
     1666        // build extended pointer on VSL lock
     1667        xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
     1668 
     1669        // take the VSL lock in write mode
     1670        remote_rwlock_wr_acquire( lock_xp );
     1671
    14641672        // register local vseg in local VSL
    14651673        vmm_attach_vseg_to_vsl( vmm , vseg );
     1674 
     1675        // release the VSL lock
     1676        remote_rwlock_wr_release( lock_xp );
    14661677    }   
    14671678
     
    14861697uint32_t   cycle   = (uint32_t)hal_get_cycles();
    14871698thread_t * this    = CURRENT_THREAD;
    1488 xptr_t     this_xp = XPTR( local_cxy , this );
    14891699if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
    14901700printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
     
    17171927    error_t          error;           // value returned by called functions
    17181928
     1929#if DEBUG_VMM_HANDLE_PAGE_FAULT
     1930uint32_t   cycle = (uint32_t)hal_get_cycles();
     1931thread_t * this  = CURRENT_THREAD;
     1932if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
     1933printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
     1934__FUNCTION__, this->process->pid, this->trdid, vpn, cycle );
     1935hal_vmm_display( process , true );
     1936#endif
     1937
    17191938    // get local vseg (access to reference VSL can be required)
    17201939    error = vmm_get_vseg( process,
     
    17231942    if( error )
    17241943    {
    1725         printk("\n[ERROR] in %s : vpn %x in process %x not in a registered vseg\n",
    1726         __FUNCTION__ , vpn , process->pid );
     1944        printk("\n[ERROR] in %s : vpn %x in process %x not in registered vseg / cycle %d\n",
     1945        __FUNCTION__ , vpn , process->pid, (uint32_t)hal_get_cycles() );
    17271946       
    17281947        return EXCP_USER_ERROR;
    17291948    }
    17301949
    1731  #if DEBUG_VMM_HANDLE_PAGE_FAULT
    1732 uint32_t   cycle = (uint32_t)hal_get_cycles();
    1733 thread_t * this  = CURRENT_THREAD;
     1950#if DEBUG_VMM_HANDLE_PAGE_FAULT
     1951cycle = (uint32_t)hal_get_cycles();
    17341952if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
    1735 printk("\n[%s] threadr[%x,%x] enter for vpn %x / %s / cycle %d\n",
    1736 __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(vseg->type), cycle );
     1953printk("\n[%s] threadr[%x,%x] found vseg %s / cycle %d\n",
     1954__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type), cycle );
    17371955#endif
    17381956
     
    19712189    error_t          error;
    19722190
     2191    thread_t * this = CURRENT_THREAD;
     2192
    19732193#if DEBUG_VMM_HANDLE_COW
    19742194uint32_t   cycle   = (uint32_t)hal_get_cycles();
    1975 thread_t * this    = CURRENT_THREAD;
    1976 xptr_t     this_xp = XPTR( local_cxy , this );
    19772195if( DEBUG_VMM_HANDLE_COW < cycle )
    19782196printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n",
    19792197__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
     2198hal_vmm_display( process , true );
    19802199#endif
    19812200
     
    19912210    if( error )
    19922211    {
    1993         printk("\n[PANIC] in %s : vpn %x in process %x not in a registered vseg\n",
    1994         __FUNCTION__, vpn, process->pid );
     2212        printk("\n[PANIC] in %s vpn %x in thread[%x,%x] not in a registered vseg\n",
     2213        __FUNCTION__, vpn, process->pid, this->trdid );
    19952214
    19962215        return EXCP_KERNEL_PANIC;
  • trunk/kernel/mm/vmm.h

    r624 r625  
    4848 * Each slot can contain one user stack vseg. The first 4 Kbytes page in the slot is not
    4949 * mapped to detect stack overflow.
    50  * The slot index can be computed form the slot base address, and reversely.
    51  * All allocation / release operations are registered in the stack_bitmap, that completely
    52  * define the STACK zone status.
     50 * In this implementation, the slot index is defined by the user thead LTID.
     51 * All allocated stacks are registered in a bitmap defining the STACK zone state:
     52 * - The allocator checks that the requested slot has not been already allocated, and set the
     53 *   corresponding bit in the bitmap.
     54 * - The de-allocator function reset the corresponding bit in the bitmap.
    5355 ********************************************************************************************/
    5456
     
    5759    busylock_t     lock;               /*! lock protecting STACK allocator                  */
    5860    vpn_t          vpn_base;           /*! first page of STACK zone                         */
    59     bitmap_t       bitmap;             /*! bit bector of allocated stacks                   */
     61    bitmap_t       bitmap;             /*! bit vector of allocated stacks                   */
    6062}
    6163stack_mgr_t;
     
    8486    vpn_t          vpn_size;           /*! number of pages in MMAP zone                     */
    8587    vpn_t          first_free_vpn;     /*! first free page in MMAP zone                     */
    86     list_entry_t   zombi_list[32];     /*! array of roots of released vsegs lists           */
     88    xlist_entry_t  zombi_list[32];     /*! array of roots of released vsegs lists           */
    8789}
    8890mmap_mgr_t;
     
    109111typedef struct vmm_s
    110112{
    111         remote_rwlock_t  vsegs_lock;         /*! lock protecting the local VSL                  */
     113        remote_rwlock_t  vsl_lock;           /*! lock protecting the local VSL                  */
    112114        xlist_entry_t    vsegs_root;         /*! Virtual Segment List (complete in reference)   */
    113115        uint32_t         vsegs_nr;           /*! total number of local vsegs                    */
     
    132134
    133135/*********************************************************************************************
    134  * This function initialises the virtual memory manager attached to an user process.
     136 * This function mkkes a partial initialisation of the VMM attached to an user process.
     137 * The GPT must have been previously created, with the hal_gpt_create() function.
     138 * - It registers "args", "envs" vsegs in the VSL.
    135139 * - It initializes the STACK and MMAP allocators.
    136  * - It registers the "kentry", "args", "envs" vsegs in the VSL.
    137  * - It initializes the generic page table, calling the HAL specific hal_gpt_init() function.
    138  * - For TSAR it map all pages for the "kentry" vseg, that must be identity mapping.
    139  ******************************************************a**************************************
    140  * Implementation notes:
     140 * Note:
    141141 * - The "code" and "data" vsegs are registered by the elf_load_process() function.
    142  * - The "stack" vsegs are dynamically created by the thread_user_create() function.
    143  * - The "file", "anon", "remote" vsegs are dynamically created by the mmap() syscall.
     142 * - The "stack" vsegs are dynamically registered by the thread_user_create() function.
     143 * - The "file", "anon", "remote" vsegs are dynamically registered by the mmap() syscall.
    144144 *********************************************************************************************
    145145 * @ process   : pointer on process descriptor
    146146 * @ return 0 if success / return -1 if failure.
    147147 ********************************************************************************************/
    148 error_t vmm_init( struct process_s * process );
    149 
    150 /*********************************************************************************************
    151  * This function displays on TXY0 the list or registered vsegs for a given <process>.
    152  * It must be executed by a thread running in reference cluster.
    153  * If the <mapping> argument is true, it displays for each vseg all mapped PTEs in GPT.
     148error_t vmm_user_init( struct process_s * process );
     149
     150/*********************************************************************************************
     151 * This function re-initialises the VMM attached to an user process to prepare a new
     152 * call to the vmm_user_init() function after an exec() syscall.
     153 * It removes from the VMM of the process identified by the <process> argument all
     154 * non kernel vsegs (i.e. all user vsegs), by calling the vmm_remove_vseg() function.
     155 * - the vsegs are removed from the VSL.
     156 * - the corresponding GPT entries are removed from the GPT.
     157 * - the physical pages are released to the relevant kmem when they are not shared.
     158 * The VSL and the GPT are not modified for the kernel vsegs.
    154159 *********************************************************************************************
    155160 * @ process   : pointer on process descriptor.
    156  * @ mapping   : detailed mapping if true.
    157  ********************************************************************************************/
    158 void hal_vmm_display( struct process_s * process,
    159                   bool_t             mapping );
     161 ********************************************************************************************/
     162void vmm_user_reset( struct process_s * process );
    160163
    161164/*********************************************************************************************
    162165 * This function is called by the process_make_fork() function. It partially copies
    163166 * the content of a remote parent process VMM to the local child process VMM:
    164  * - all DATA, MMAP, REMOTE vsegs registered in the parent VSL are registered in the child
    165  *   VSL, and all valid GPT entries in parent GPT are copied to the child GPT.
    166  *   The WRITABLE flag is reset and the COW flag is set in child GPT.
    167  * - all CODE vsegs registered in the parent VSL are registered in the child VSL, but the
    168  *   GPT entries are not copied in the chilf GPT, that will be dynamically updated from
     167 * - All DATA, ANON, REMOTE vsegs registered in the parent VSL are registered in the
     168 *   child VSL. All valid PTEs in parent GPT are copied to the child GPT, but the
     169 *   WRITABLE flag is reset and the COW flag is set.
     170 * - All CODE vsegs registered in the parent VSL are registered in the child VSL, but the
     171 *   GPT entries are not copied in the child GPT, and will be dynamically updated from
    169172 *   the .elf file when a page fault is reported.
    170  * - all FILE vsegs registered in the parent VSL are registered in the child VSL, and all
     173 * - All FILE vsegs registered in the parent VSL are registered in the child VSL, and all
    171174 *   valid GPT entries in parent GPT are copied to the child GPT. The COW flag is not set.
    172  * - no STACK vseg is copied from  parent VMM to child VMM, because the child STACK vseg
     175 * - No STACK vseg is copied from  parent VMM to child VMM, because the child stack vseg
    173176 *   must be copied later from the cluster containing the user thread requesting the fork().
     177 * - The KERNEL vsegs required by the target architecture are re-created in the child
     178 *   VMM, from the local kernel process VMM, using the hal_vmm_kernel_update() function.
    174179 *********************************************************************************************
    175180 * @ child_process     : local pointer on local child process descriptor.
     
    196201
    197202/*********************************************************************************************
    198  * This global function modifies a GPT entry identified by the <process> and <vpn>
    199  * arguments in all clusters containing a process copy.
     203 * This function modifies a GPT entry identified by the <process> and <vpn> arguments
     204 * in all clusters containing a process copy.
    200205 * It must be called by a thread running in the reference cluster.
    201206 * It updates all copies of the process in all clusters, to maintain coherence in GPT copies,
     
    240245/*********************************************************************************************
    241246 * This function allocates memory for a vseg descriptor, initialises it, and register it
    242  * in the VMM of the local process descriptor, that must be the reference process.
    243  * For the 'stack", "file", "anon", & "remote" types, it does not use the <base> argument,
    244  * but uses the STACK and MMAP virtual memory allocators.
     247 * in the VSL of the local process descriptor, that must be the reference process.
     248 * - For the FILE, ANON, & REMOTE types, it does not use the <base> and <size> arguments,
     249 *   but uses the specific MMAP virtual memory allocator.
     250 * - For the STACK type, it does not use the <size> argument, and the <base> argument
     251 *   defines the user thread LTID used by the specific STACK virtual memory allocator.
    245252 * It checks collision with all pre-existing vsegs.
    246  * To comply with the "on-demand" paging policy, this function does NOT modify the page table,
     253 * To comply with the "on-demand" paging policy, this function does NOT modify the GPT,
    247254 * and does not allocate physical memory for vseg data.
    248255 * It should be called by a local thread (could be a RPC thread if the client thread is not
    249  * running in the regerence cluster).
     256 * running in the reference cluster).
    250257 *********************************************************************************************
    251258 * @ process     : pointer on local processor descriptor.
    252259 * @ type        : vseg type.
    253  * @ base        : vseg base address (not used for dynamically allocated vsegs).
     260 * @ base        : vseg base address (or user thread ltid for an user stack vseg).
    254261 * @ size        : vseg size (bytes).
    255262 * @ file_offset : offset in file for CODE, DATA, FILE types.
     
    269276
    270277/*********************************************************************************************
    271  * This function removes from the local VMM of a process descriptor identified by the <pid>
    272  * argument a local vseg identified by its base address <vaddr> in user space.
    273  * It can be used for any type of vseg, but must be called by a local thread.
    274  * Use the RPC_VMM_DELETE_VSEG if the client thread is not local.
    275  * It does nothing if the process is not registered in the local cluster.
    276  * It does nothing if the vseg is not registered in the local process VSL.
    277  * - It removes from the local GPT all registered PTEs. If it is executed in the reference
    278  *   cluster, it releases the referenced physical pages, to the relevant kmem allocator,
    279  *   depending on vseg type and the pending forks counter.
    280  * - It removes the vseg from the local VSL, and release the vseg descriptor if not MMAP.
    281  *********************************************************************************************
    282  * @ process  : process identifier.
    283  * @ vaddr    : vseg base address in user space.
     278 * This function removes from the VMM of a process descriptor identified by the <process>
     279 * argument the vseg identified by the <vseg> argument. It can be used for any type of vseg.
     280 * As it uses local pointers, it must be called by a local thread.
     281 * It is called by the vmm_user_reset(), vmm_delete_vseg() and vmm_destroy() functions.
     282 * It makes a kernel panic if the process is not registered in the local cluster,
     283 * or if the vseg is not registered in the process VSL.
     284 * For all vseg types, the vseg is detached from local VSL, and all associated PTEs are
     285 * unmapped from local GPT. Other actions depend on the vseg type:
     286 * - Regarding the vseg descriptor release:
     287 *   . for ANON and REMOTE, the vseg is not released, but registered in local zombi_list.
     288 *   . for STACK the vseg is released to the local stack allocator.
     289 *   . for all other types, the vseg is released to the local kmem.
     290 * - Regarding the physical pages release:
     291 *   . for KERNEL and FILE, the pages are not released to kmem.
     292 *   . for CODE and STACK, the pages are released to local kmem when they are not COW.
     293 *   . for DATA, ANON and REMOTE, the pages are released to relevant kmem only when
     294 *     the local cluster is the reference cluster.
     295 * The lock protecting the VSL must be taken by the caller.
     296 *********************************************************************************************
     297 * @ process  : local pointer on process.
     298 * @ vseg     : local pointer on vseg.
     299 ********************************************************************************************/
     300void vmm_remove_vseg( struct process_s * process,
     301                      struct vseg_s    * vseg );
     302
     303/*********************************************************************************************
     304 * This function call the vmm_remove vseg() function to remove from the VMM of a local
     305 * process descriptor, identified by the <pid> argument the vseg identified by the <vaddr>
     306 * virtual address in user space.
     307 * Use the RPC_VMM_DELETE_VSEG to remove a vseg from a remote process descriptor.
     308 *********************************************************************************************
     309 * @ pid      : process identifier.
     310 * @ vaddr    : virtual address in user space.
    284311 ********************************************************************************************/
    285312void vmm_delete_vseg( pid_t    pid,
    286313                      intptr_t vaddr );
    287 
    288 /*********************************************************************************************
    289  * This function insert a new <vseg> descriptor in the VSL identifed by the <vmm> argument.
    290  * and updates the vmm field in the vseg descriptor.
    291  * It takes the lock protecting VSL.
    292  *********************************************************************************************
    293  * @ vmm       : local pointer on local VMM.
    294  * @ vseg      : local pointer on local vseg descriptor.
    295  ********************************************************************************************/
    296 void vmm_attach_vseg_to_vsl( vmm_t  * vmm,
    297                              vseg_t * vseg );
    298 
    299 /*********************************************************************************************
    300  * This function removes a vseg identified by the <vseg> argument from the local VSL
    301  * identified by the <vmm> argument and release the memory allocated to vseg descriptor,
    302  * for all vseg types, BUT the MMAP type (i.e. ANON or REMOTE).
    303  * - If the vseg has not the STACK or MMAP type, it is simply removed from the VSL,
    304  *   and vseg descriptor is released.
    305  * - If the vseg has the STACK type, it is removed from VSL, vseg descriptor is released,
    306  *   and the stack slot is returned to the local VMM_STACK allocator.
    307  * - If the vseg has the MMAP type, it is removed from VSL and is registered in zombi_list
    308  *   of the VMM_MMAP allocator for future reuse. The vseg descriptor is NOT released.
    309  *********************************************************************************************
    310  * @ vmm       : local pointer on local VMM.
    311  * @ vseg      : local pointer on local vseg to be removed.
    312  ********************************************************************************************/
    313 void vmm_detach_vseg_from_vsl( vmm_t  * vmm,
    314                                vseg_t * vseg );
    315314
    316315/*********************************************************************************************
  • trunk/kernel/mm/vseg.c

    r623 r625  
    6161}
    6262
    63 /////////////////////
     63///////////////////////////
    6464vseg_t * vseg_alloc( void )
    6565{
  • trunk/kernel/mm/vseg.h

    r623 r625  
    7070/*******************************************************************************************
    7171 * This structure defines a virtual segment descriptor.
    72  * - The VSL contains only local vsegs, but is implemented as an xlist, because it can be
    73  *   accessed by thread running in a remote cluster.
    74  * - The zombi list is used by the local MMAP allocator. It is implemented as a local list.
     72 * The VSL contains only local vsegs, but is implemented as an xlist, because it can be
     73 * accessed by a thread running in a remote cluster.
     74 * The xlist field is also used to implement the zombi lists used by the MMAP allocator.
    7575 ******************************************************************************************/
    7676
     
    7878{
    7979    xlist_entry_t     xlist;        /*! all vsegs in same VSL                             */
    80     list_entry_t      zlist;        /*! all vsegs in same zombi list                      */
    8180    struct vmm_s    * vmm;          /*! pointer on associated VM manager                  */
    8281    uint32_t          type;         /*! vseg type                                         */
Note: See TracChangeset for help on using the changeset viewer.