Changeset 625 for trunk/kernel/mm/vmm.c


Ignore:
Timestamp:
Apr 10, 2019, 10:09:39 AM (6 years ago)
Author:
alain
Message:

Fix a bug in the vmm_remove_vseg() function: the physical pages
associated to an user DATA vseg were released to the kernel when
the target process descriptor was in the reference cluster.
This physical pages release should be done only when the page
forks counter value is zero.
All other modifications are cosmetic.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/vmm.c

    r624 r625  
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
    55 *           Mohamed Lamine Karaoui (2015)
    6  *           Alain Greiner (2016,2017,2018)
     6 *           Alain Greiner (2016,2017,2018,2019)
    77 *
    88 * Copyright (c) UPMC Sorbonne Universites
     
    5555extern  process_t  process_zero;      // allocated in cluster.c
    5656
    57 ///////////////////////////////////////
    58 error_t vmm_init( process_t * process )
     57////////////////////////////////////////////////////////////////////////////////////////////
     58// This static function is called by the vmm_create_vseg() function, and implements
     59// the VMM STACK specific allocator.
     60////////////////////////////////////////////////////////////////////////////////////////////
     61// @ vmm      : [in]  pointer on VMM.
     62// @ ltid     : [in]  requested slot == local user thread identifier.
     63// @ vpn_base : [out] first allocated page
     64// @ vpn_size : [out] number of allocated pages
     65////////////////////////////////////////////////////////////////////////////////////////////
     66static void vmm_stack_alloc( vmm_t  * vmm,
     67                             ltid_t   ltid,
     68                             vpn_t  * vpn_base,
     69                             vpn_t  * vpn_size )
    5970{
    60     error_t   error;
     71
     72// check ltid argument
     73assert( (ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
     74"slot index %d too large for an user stack vseg", ltid );
     75
     76    // get stack allocator pointer
     77    stack_mgr_t * mgr = &vmm->stack_mgr;
     78
     79    // get lock on stack allocator
     80    busylock_acquire( &mgr->lock );
     81
     82// check requested slot is available
     83assert( (bitmap_state( &mgr->bitmap , ltid ) == false),
     84"slot index %d already allocated", ltid );
     85
     86    // update bitmap
     87    bitmap_set( &mgr->bitmap , ltid );
     88
     89    // release lock on stack allocator
     90    busylock_release( &mgr->lock );
     91
     92    // returns vpn_base, vpn_size (first page non allocated)
     93    *vpn_base = mgr->vpn_base + ltid * CONFIG_VMM_STACK_SIZE + 1;
     94    *vpn_size = CONFIG_VMM_STACK_SIZE - 1;
     95
     96} // end vmm_stack_alloc()
     97
     98////////////////////////////////////////////////////////////////////////////////////////////
     99// This static function is called by the vmm_remove_vseg() function, and implements
     100// the VMM STACK specific desallocator.
     101////////////////////////////////////////////////////////////////////////////////////////////
     102// @ vmm      : [in] pointer on VMM.
     103// @ vseg     : [in] pointer on released vseg.
     104////////////////////////////////////////////////////////////////////////////////////////////
     105static void vmm_stack_free( vmm_t  * vmm,
     106                            vseg_t * vseg )
     107{
     108    // get stack allocator pointer
     109    stack_mgr_t * mgr = &vmm->stack_mgr;
     110
     111    // compute slot index
     112    uint32_t index = (vseg->vpn_base - 1 - mgr->vpn_base) / CONFIG_VMM_STACK_SIZE;
     113
     114// check index
     115assert( (index <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
     116"slot index %d too large for an user stack vseg", index );
     117
     118// check released slot is allocated
     119assert( (bitmap_state( &mgr->bitmap , index ) == true),
     120"released slot index %d non allocated", index );
     121
     122    // get lock on stack allocator
     123    busylock_acquire( &mgr->lock );
     124
     125    // update stacks_bitmap
     126    bitmap_clear( &mgr->bitmap , index );
     127
     128    // release lock on stack allocator
     129    busylock_release( &mgr->lock );
     130
     131}  // end vmm_stack_free()
     132
     133////////////////////////////////////////////////////////////////////////////////////////////
     134// This static function is called by the vmm_create_vseg() function, and implements
     135// the VMM MMAP specific allocator.
     136////////////////////////////////////////////////////////////////////////////////////////////
     137// @ vmm      : [in] pointer on VMM.
     138// @ npages   : [in] requested number of pages.
     139// @ vpn_base : [out] first allocated page.
     140// @ vpn_size : [out] actual number of allocated pages.
     141////////////////////////////////////////////////////////////////////////////////////////////
     142static error_t vmm_mmap_alloc( vmm_t * vmm,
     143                               vpn_t   npages,
     144                               vpn_t * vpn_base,
     145                               vpn_t * vpn_size )
     146{
     147    uint32_t   order;
     148    xptr_t     vseg_xp;
     149    vseg_t   * vseg;
     150    vpn_t      base;
     151    vpn_t      size;
     152    vpn_t      free;
     153
     154#if DEBUG_VMM_MMAP_ALLOC
     155thread_t * this = CURRENT_THREAD;
     156uint32_t cycle = (uint32_t)hal_get_cycles();
     157if( DEBUG_VMM_MMAP_ALLOC < cycle )
     158printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
     159__FUNCTION__, this->process->pid, this->trdid, cycle );
     160#endif
     161
     162    // number of allocated pages must be power of 2
     163    // compute actual size and order
     164    size  = POW2_ROUNDUP( npages );
     165    order = bits_log2( size );
     166
     167    // get mmap allocator pointer
     168    mmap_mgr_t * mgr = &vmm->mmap_mgr;
     169
     170    // build extended pointer on root of zombi_list[order]
     171    xptr_t root_xp = XPTR( local_cxy , &mgr->zombi_list[order] );
     172
     173    // take lock protecting zombi_lists
     174    busylock_acquire( &mgr->lock );
     175
     176    // get vseg from zombi_list or from mmap zone
     177    if( xlist_is_empty( root_xp ) )                   // from mmap zone
     178    {
     179        // check overflow
     180        free = mgr->first_free_vpn;
     181        if( (free + size) > mgr->vpn_size ) return -1;
     182
     183        // update MMAP allocator
     184        mgr->first_free_vpn += size;
     185
     186        // compute base
     187        base = free;
     188    }
     189    else                                              // from zombi_list
     190    {
     191        // get pointer on zombi vseg from zombi_list
     192        vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
     193        vseg    = GET_PTR( vseg_xp );
     194
     195        // remove vseg from free-list
     196        xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
     197
     198        // compute base
     199        base = vseg->vpn_base;
     200    }
     201
     202    // release lock
     203    busylock_release( &mgr->lock );
     204
     205#if DEBUG_VMM_MMAP_ALLOC
     206cycle = (uint32_t)hal_get_cycles();
     207if( DEBUG_VMM_DESTROY < cycle )
     208printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n",
     209__FUNCTION__, this->process->pid, this->trdid, base, size, cycle );
     210#endif
     211
     212    // returns vpn_base, vpn_size
     213    *vpn_base = base;
     214    *vpn_size = size;
     215    return 0;
     216
     217}  // end vmm_mmap_alloc()
     218
     219////////////////////////////////////////////////////////////////////////////////////////////
     220// This static function is called by the vmm_remove_vseg() function, and implements
     221// the VMM MMAP specific desallocator.
     222////////////////////////////////////////////////////////////////////////////////////////////
     223// @ vmm      : [in] pointer on VMM.
     224// @ vseg     : [in] pointer on released vseg.
     225////////////////////////////////////////////////////////////////////////////////////////////
     226static void vmm_mmap_free( vmm_t  * vmm,
     227                           vseg_t * vseg )
     228{
     229    // get pointer on mmap allocator
     230    mmap_mgr_t * mgr = &vmm->mmap_mgr;
     231
     232    // compute zombi_list order
     233    uint32_t order = bits_log2( vseg->vpn_size );
     234
     235    // take lock protecting zombi lists
     236    busylock_acquire( &mgr->lock );
     237
     238    // update relevant zombi_list
     239    xlist_add_first( XPTR( local_cxy , &mgr->zombi_list[order] ),
     240                     XPTR( local_cxy , &vseg->xlist ) );
     241
     242    // release lock
     243    busylock_release( &mgr->lock );
     244
     245}  // end of vmm_mmap_free()
     246
     247////////////////////////////////////////////////////////////////////////////////////////////
     248// This static function registers one vseg in the VSL of a local process descriptor.
     249////////////////////////////////////////////////////////////////////////////////////////////
     250// vmm       : [in] pointer on VMM.
     251// vseg      : [in] pointer on vseg.
     252////////////////////////////////////////////////////////////////////////////////////////////
     253void vmm_attach_vseg_to_vsl( vmm_t  * vmm,
     254                             vseg_t * vseg )
     255{
     256    // update vseg descriptor
     257    vseg->vmm = vmm;
     258
     259    // increment vsegs number
     260    vmm->vsegs_nr++;
     261
     262    // add vseg in vmm list
     263    xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ),
     264                    XPTR( local_cxy , &vseg->xlist ) );
     265
     266}  // end vmm_attach_vseg_from_vsl()
     267
     268////////////////////////////////////////////////////////////////////////////////////////////
     269// This static function removes one vseg from the VSL of a local process descriptor.
     270////////////////////////////////////////////////////////////////////////////////////////////
     271// vmm       : [in] pointer on VMM.
     272// vseg      : [in] pointer on vseg.
     273////////////////////////////////////////////////////////////////////////////////////////////
     274void vmm_detach_vseg_from_vsl( vmm_t  * vmm,
     275                               vseg_t * vseg )
     276{
     277    // update vseg descriptor
     278    vseg->vmm = NULL;
     279
     280    // decrement vsegs number
     281    vmm->vsegs_nr--;
     282
     283    // remove vseg from VSL
     284    xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
     285
     286}  // end vmm_detach_from_vsl()
     287
     288
     289
     290
     291////////////////////////////////////////////
     292error_t vmm_user_init( process_t * process )
     293{
    61294    vseg_t  * vseg_args;
    62295    vseg_t  * vseg_envs;
     
    65298    uint32_t  i;
    66299
    67 #if DEBUG_VMM_INIT
     300#if DEBUG_VMM_USER_INIT
    68301thread_t * this = CURRENT_THREAD;
    69302uint32_t cycle = (uint32_t)hal_get_cycles();
    70 if( DEBUG_VMM_INIT )
     303if( DEBUG_VMM_USER_INIT )
    71304printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
    72305__FUNCTION__ , this->process->pid, this->trdid, process->pid, local_cxy, cycle );
     
    76309    vmm_t   * vmm = &process->vmm;
    77310
    78     // initialize VSL (empty)
    79     vmm->vsegs_nr = 0;
    80         xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) );
    81         remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) , LOCK_VMM_VSL );
    82 
     311// check UTILS zone
    83312assert( ((CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) <=
    84313         (CONFIG_VMM_ELF_BASE - CONFIG_VMM_UTILS_BASE)) ,
    85314         "UTILS zone too small\n" );
    86315
     316// check STACK zone
    87317assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <=
    88318(CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) ,
    89319"STACK zone too small\n");
    90320
    91     // register args vseg in VSL
     321    // register "args" vseg in VSL
    92322    base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_SHIFT;
    93323    size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT;
     
    101331                                 XPTR_NULL,     // mapper_xp unused
    102332                                 local_cxy );
    103 
    104333    if( vseg_args == NULL )
    105334    {
     
    110339    vmm->args_vpn_base = base;
    111340
    112     // register the envs vseg in VSL
     341    // register "envs" vseg in VSL
    113342    base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_SHIFT;
    114343    size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT;
     
    122351                                 XPTR_NULL,     // mapper_xp unused
    123352                                 local_cxy );
    124 
    125353    if( vseg_envs == NULL )
    126354    {
     
    130358
    131359    vmm->envs_vpn_base = base;
    132 
    133     // create GPT (empty)
    134     error = hal_gpt_create( &vmm->gpt );
    135 
    136     if( error )
    137     {
    138         printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );
    139         return -1;
    140     }
    141 
    142     // initialize GPT lock
    143     remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );
    144 
    145     // update process VMM with kernel vsegs as required by the hardware architecture
    146     error = hal_vmm_kernel_update( process );
    147 
    148     if( error )
    149     {
    150         printk("\n[ERROR] in %s : cannot update GPT for kernel vsegs\n", __FUNCTION__ );
    151         return -1;
    152     }
    153360
    154361    // initialize STACK allocator
     
    162369    vmm->mmap_mgr.first_free_vpn  = CONFIG_VMM_HEAP_BASE;
    163370    busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP );
    164     for( i = 0 ; i < 32 ; i++ ) list_root_init( &vmm->mmap_mgr.zombi_list[i] );
     371    for( i = 0 ; i < 32 ; i++ )
     372    {
     373        xlist_root_init( XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] ) );
     374    }
    165375
    166376    // initialize instrumentation counters
     
    169379    hal_fence();
    170380
    171 #if DEBUG_VMM_INIT
     381#if DEBUG_VMM_USER_INIT
    172382cycle = (uint32_t)hal_get_cycles();
    173 if( DEBUG_VMM_INIT )
     383if( DEBUG_VMM_USER_INIT )
    174384printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
    175385__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
     
    178388    return 0;
    179389
    180 }  // end vmm_init()
    181 
     390}  // end vmm_user_init()
    182391
    183392//////////////////////////////////////////
    184 void vmm_attach_vseg_to_vsl( vmm_t  * vmm,
    185                              vseg_t * vseg )
     393void vmm_user_reset( process_t * process )
    186394{
    187     // build extended pointer on rwlock protecting VSL
    188     xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
    189 
    190     // get rwlock in write mode
    191     remote_rwlock_wr_acquire( lock_xp );
    192 
    193     // update vseg descriptor
    194     vseg->vmm = vmm;
    195 
    196     // increment vsegs number
    197     vmm->vsegs_nr++;
    198 
    199     // add vseg in vmm list
    200     xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ),
    201                     XPTR( local_cxy , &vseg->xlist ) );
    202 
    203     // release rwlock in write mode
    204     remote_rwlock_wr_release( lock_xp );
    205 }
    206 
    207 ////////////////////////////////////////////
    208 void vmm_detach_vseg_from_vsl( vmm_t  * vmm,
    209                                vseg_t * vseg )
    210 {
    211     // get vseg type
    212     uint32_t type = vseg->type;
    213 
    214     // build extended pointer on rwlock protecting VSL
    215     xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
    216 
    217     // get rwlock in write mode
    218     remote_rwlock_wr_acquire( lock_xp );
    219 
    220     // update vseg descriptor
    221     vseg->vmm = NULL;
    222 
    223     // remove vseg from VSL
    224     xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
    225 
    226     // release rwlock in write mode
    227     remote_rwlock_wr_release( lock_xp );
    228 
    229     // release the stack slot to VMM stack allocator if STACK type
    230     if( type == VSEG_TYPE_STACK )
    231     {
    232         // get pointer on stack allocator
    233         stack_mgr_t * mgr = &vmm->stack_mgr;
    234 
    235         // compute slot index
    236         uint32_t index = ((vseg->vpn_base - mgr->vpn_base - 1) / CONFIG_VMM_STACK_SIZE);
    237 
    238         // update stacks_bitmap
    239         busylock_acquire( &mgr->lock );
    240         bitmap_clear( &mgr->bitmap , index );
    241         busylock_release( &mgr->lock );
    242     }
    243 
    244     // release the vseg to VMM mmap allocator if MMAP type
    245     if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_FILE) || (type == VSEG_TYPE_REMOTE) )
    246     {
    247         // get pointer on mmap allocator
    248         mmap_mgr_t * mgr = &vmm->mmap_mgr;
    249 
    250         // compute zombi_list index
    251         uint32_t index = bits_log2( vseg->vpn_size );
    252 
    253         // update zombi_list
    254         busylock_acquire( &mgr->lock );
    255         list_add_first( &mgr->zombi_list[index] , &vseg->zlist );
    256         busylock_release( &mgr->lock );
    257     }
    258 
    259     // release physical memory allocated for vseg if no MMAP and no kernel type
    260     if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) &&
    261         (type != VSEG_TYPE_KCODE) && (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) )
    262     {
    263         vseg_free( vseg );
    264     }
    265 
    266 }  // end vmm_remove_vseg_from_vsl()
     395    xptr_t       vseg_xp;
     396        vseg_t     * vseg;
     397    vseg_type_t  vseg_type;
     398
     399#if DEBUG_VMM_USER_RESET
     400uint32_t cycle = (uint32_t)hal_get_cycles();
     401thread_t * this = CURRENT_THREAD;
     402if( DEBUG_VMM_USER_RESET < cycle )
     403printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
     404__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
     405#endif
     406
     407#if (DEBUG_VMM_USER_RESET & 1 )
     408if( DEBUG_VMM_USER_RESET < cycle )
     409hal_vmm_display( process , true );
     410#endif
     411
     412    // get pointer on local VMM
     413    vmm_t * vmm = &process->vmm;
     414
     415    // build extended pointer on VSL root and VSL lock
     416    xptr_t   root_xp = XPTR( local_cxy , &vmm->vsegs_root );
     417    xptr_t   lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
     418
     419    // take the VSL lock
     420        remote_rwlock_wr_acquire( lock_xp );
     421
     422    // scan the VSL to delete all non kernel vsegs
     423    // (we don't use a FOREACH in case of item deletion)
     424    xptr_t   iter_xp;
     425    xptr_t   next_xp;
     426        for( iter_xp = hal_remote_l64( root_xp ) ;
     427         iter_xp != root_xp ;
     428         iter_xp = next_xp )
     429        {
     430        // save extended pointer on next item in xlist
     431        next_xp = hal_remote_l64( iter_xp );
     432
     433        // get pointers on current vseg in VSL
     434        vseg_xp   = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
     435        vseg      = GET_PTR( vseg_xp );
     436        vseg_type = vseg->type;
     437
     438#if( DEBUG_VMM_USER_RESET & 1 )
     439if( DEBUG_VMM_USER_RESET < cycle )
     440printk("\n[%s] found %s vseg / vpn_base %x / vpn_size %d\n",
     441__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
     442#endif
     443        // delete non kernel vseg 
     444        if( (vseg_type != VSEG_TYPE_KCODE) &&
     445            (vseg_type != VSEG_TYPE_KDATA) &&
     446            (vseg_type != VSEG_TYPE_KDEV ) )
     447        {
     448            // remove vseg from VSL
     449            vmm_remove_vseg( process , vseg );
     450
     451#if( DEBUG_VMM_USER_RESET & 1 )
     452if( DEBUG_VMM_USER_RESET < cycle )
     453printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n",
     454__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
     455#endif
     456        }
     457        else
     458        {
     459
     460#if( DEBUG_VMM_USER_RESET & 1 )
     461if( DEBUG_VMM_USER_RESET < cycle )
     462printk("\n[%s] keep %s vseg / vpn_base %x / vpn_size %d\n",
     463__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
     464#endif
     465        }
     466        }  // end loop on vsegs in VSL
     467
     468    // release the VSL lock
     469        remote_rwlock_wr_release( lock_xp );
     470
     471// FIXME il faut gérer les process copies...
     472
     473#if DEBUG_VMM_USER_RESET
     474cycle = (uint32_t)hal_get_cycles();
     475if( DEBUG_VMM_USER_RESET < cycle )
     476printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
     477__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
     478#endif
     479
     480}  // end vmm_user_reset()
    267481
    268482////////////////////////////////////////////////
     
    507721    cxy_t       page_cxy;
    508722    xptr_t      forks_xp;       // extended pointer on forks counter in page descriptor
    509     xptr_t      lock_xp;        // extended pointer on lock protecting the forks counter
    510723    xptr_t      parent_root_xp;
    511724    bool_t      mapped;
     
    528741    child_vmm  = &child_process->vmm;
    529742
    530     // get extended pointer on lock protecting the parent VSL
    531     parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsegs_lock );
    532 
    533     // initialize the lock protecting the child VSL
    534     remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsegs_lock ), LOCK_VMM_STACK );
     743    // initialize the locks protecting the child VSL and GPT
     744    remote_rwlock_init( XPTR( local_cxy , &child_vmm->gpt_lock ) , LOCK_VMM_GPT );
     745        remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsl_lock ) , LOCK_VMM_VSL );
    535746
    536747    // initialize the child VSL as empty
     
    538749    child_vmm->vsegs_nr = 0;
    539750
    540     // create the child GPT
     751    // create an empty child GPT
    541752    error = hal_gpt_create( &child_vmm->gpt );
    542 
    543753    if( error )
    544754    {
     
    547757    }
    548758
    549     // build extended pointer on parent VSL
     759    // build extended pointer on parent VSL root and lock
    550760    parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root );
     761    parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsl_lock );
    551762
    552763    // take the lock protecting the parent VSL in read mode
     
    556767    XLIST_FOREACH( parent_root_xp , iter_xp )
    557768    {
    558         // get local and extended pointers on current parent vseg
     769        // get pointers on current parent vseg
    559770        parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    560771        parent_vseg    = GET_PTR( parent_vseg_xp );
     
    587798            vseg_init_from_ref( child_vseg , parent_vseg_xp );
    588799
     800            // build extended pointer on VSL lock
     801            xptr_t lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock );
     802 
     803            // take the VSL lock in write mode
     804            remote_rwlock_wr_acquire( lock_xp );
     805
    589806            // register child vseg in child VSL
    590807            vmm_attach_vseg_to_vsl( child_vmm , child_vseg );
     808
     809            // release the VSL lock
     810            remote_rwlock_wr_release( lock_xp );
    591811
    592812#if DEBUG_VMM_FORK_COPY
     
    597817hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
    598818#endif
    599 
    600             // copy DATA, MMAP, REMOTE, FILE parent GPT entries to child GPT
     819            // copy DATA, ANON, REMOTE, FILE parent GPT entries to child GPT
    601820            if( type != VSEG_TYPE_CODE )
    602821            {
    603                 // activate the COW for DATA, MMAP, REMOTE vsegs only
     822                // activate the COW for DATA, ANON, REMOTE vsegs only
    604823                cow = ( type != VSEG_TYPE_FILE );
    605824
     
    611830                {
    612831                    error = hal_gpt_pte_copy( &child_vmm->gpt,
     832                                              vpn,
    613833                                              XPTR( parent_cxy , &parent_vmm->gpt ),
    614834                                              vpn,
     
    677897    child_vmm->mmap_mgr.vpn_size        = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE;
    678898    child_vmm->mmap_mgr.first_free_vpn  = CONFIG_VMM_HEAP_BASE;
    679     for( i = 0 ; i < 32 ; i++ ) list_root_init( &child_vmm->mmap_mgr.zombi_list[i] );
     899    for( i = 0 ; i < 32 ; i++ )
     900    {
     901        xlist_root_init( XPTR( local_cxy , &child_vmm->mmap_mgr.zombi_list[i] ) );
     902    }
    680903
    681904    // initialize instrumentation counters
     
    726949    vmm_t  * vmm = &process->vmm;
    727950
    728     // get extended pointer on VSL root and VSL lock
    729     xptr_t   root_xp = XPTR( local_cxy , &vmm->vsegs_root );
     951    // build extended pointer on VSL root, VSL lock and GPT lock
     952    xptr_t   vsl_root_xp = XPTR( local_cxy , &vmm->vsegs_root );
     953    xptr_t   vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
     954    xptr_t   gpt_lock_xp = XPTR( local_cxy , &vmm->gpt_lock );
     955
     956    // take the VSL lock
     957    remote_rwlock_wr_acquire( vsl_lock_xp );
    730958
    731959    // scan the VSL to delete all registered vsegs
    732     // (don't use a FOREACH for item deletion in xlist)
    733 
    734         while( !xlist_is_empty( root_xp ) )
     960    // (we don't use a FOREACH in case of item deletion)
     961    xptr_t  iter_xp;
     962    xptr_t  next_xp;
     963        for( iter_xp = hal_remote_l64( vsl_root_xp ) ;
     964         iter_xp != vsl_root_xp ;
     965         iter_xp = next_xp )
    735966        {
    736         // get pointer on first vseg in VSL
    737                 vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
    738         vseg    = GET_PTR( vseg_xp );
     967        // save extended pointer on next item in xlist
     968        next_xp = hal_remote_l64( iter_xp );
     969
     970        // get pointers on current vseg in VSL
     971        vseg_xp   = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
     972        vseg      = GET_PTR( vseg_xp );
    739973
    740974        // delete vseg and release physical pages
    741         vmm_delete_vseg( process->pid , vseg->min );
     975        vmm_remove_vseg( process , vseg );
    742976
    743977#if( DEBUG_VMM_DESTROY & 1 )
     
    749983        }
    750984
    751     // remove all vsegs from zombi_lists in MMAP allocator
     985    // release the VSL lock
     986    remote_rwlock_wr_release( vsl_lock_xp );
     987
     988    // remove all registered MMAP vsegs
     989    // from zombi_lists in MMAP allocator
    752990    uint32_t i;
    753991    for( i = 0 ; i<32 ; i++ )
    754992    {
    755             while( !list_is_empty( &vmm->mmap_mgr.zombi_list[i] ) )
     993        // build extended pointer on zombi_list[i]
     994        xptr_t root_xp = XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] );
     995 
     996        // scan zombi_list[i]
     997            while( !xlist_is_empty( root_xp ) )
    756998            {
    757                     vseg = LIST_FIRST( &vmm->mmap_mgr.zombi_list[i] , vseg_t , zlist );
     999                    vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
     1000            vseg    = GET_PTR( vseg_xp );
    7581001
    7591002#if( DEBUG_VMM_DESTROY & 1 )
     
    7651008            vseg->vmm = NULL;
    7661009
    767             // remove vseg from  xlist
     1010            // remove vseg from  zombi_list
    7681011            xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
    7691012
     
    7791022    }
    7801023
     1024    // take the GPT lock
     1025    remote_rwlock_wr_acquire( gpt_lock_xp );
     1026
    7811027    // release memory allocated to the GPT itself
    7821028    hal_gpt_destroy( &vmm->gpt );
     1029
     1030    // release the GPT lock
     1031    remote_rwlock_wr_release( gpt_lock_xp );
    7831032
    7841033#if DEBUG_VMM_DESTROY
     
    8161065}  // end vmm_check_conflict()
    8171066
    818 ////////////////////////////////////////////////////////////////////////////////////////////
    819 // This static function is called by the vmm_create_vseg() function, and implements
    820 // the VMM stack_vseg specific allocator.
    821 ////////////////////////////////////////////////////////////////////////////////////////////
    822 // @ vmm      : pointer on VMM.
    823 // @ vpn_base : (return value) first allocated page
    824 // @ vpn_size : (return value) number of allocated pages
    825 ////////////////////////////////////////////////////////////////////////////////////////////
    826 static error_t vmm_stack_alloc( vmm_t * vmm,
    827                                 vpn_t * vpn_base,
    828                                 vpn_t * vpn_size )
    829 {
    830     // get stack allocator pointer
    831     stack_mgr_t * mgr = &vmm->stack_mgr;
    832 
    833     // get lock on stack allocator
    834     busylock_acquire( &mgr->lock );
    835 
    836     // get first free slot index in bitmap
    837     int32_t index = bitmap_ffc( &mgr->bitmap , 4 );
    838     if( (index < 0) || (index > 31) )
    839     {
    840         busylock_release( &mgr->lock );
    841         return 0xFFFFFFFF;
    842     }
    843 
    844     // update bitmap
    845     bitmap_set( &mgr->bitmap , index );
    846 
    847     // release lock on stack allocator
    848     busylock_release( &mgr->lock );
    849 
    850     // returns vpn_base, vpn_size (one page non allocated)
    851     *vpn_base = mgr->vpn_base + index * CONFIG_VMM_STACK_SIZE + 1;
    852     *vpn_size = CONFIG_VMM_STACK_SIZE - 1;
    853     return 0;
    854 
    855 } // end vmm_stack_alloc()
    856 
    857 ////////////////////////////////////////////////////////////////////////////////////////////
    858 // This static function is called by the vmm_create_vseg() function, and implements
    859 // the VMM MMAP specific allocator.
    860 ////////////////////////////////////////////////////////////////////////////////////////////
    861 // @ vmm      : [in] pointer on VMM.
    862 // @ npages   : [in] requested number of pages.
    863 // @ vpn_base : [out] first allocated page.
    864 // @ vpn_size : [out] actual number of allocated pages.
    865 ////////////////////////////////////////////////////////////////////////////////////////////
    866 static error_t vmm_mmap_alloc( vmm_t * vmm,
    867                                vpn_t   npages,
    868                                vpn_t * vpn_base,
    869                                vpn_t * vpn_size )
    870 {
    871     uint32_t   index;
    872     vseg_t   * vseg;
    873     vpn_t      base;
    874     vpn_t      size;
    875     vpn_t      free;
    876 
    877 #if DEBUG_VMM_MMAP_ALLOC
    878 thread_t * this = CURRENT_THREAD;
    879 uint32_t cycle = (uint32_t)hal_get_cycles();
    880 if( DEBUG_VMM_MMAP_ALLOC < cycle )
    881 printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
    882 __FUNCTION__, this->process->pid, this->trdid, cycle );
    883 #endif
    884 
    885     // vseg size must be power of 2
    886     // compute actual size and index in zombi_list array
    887     size  = POW2_ROUNDUP( npages );
    888     index = bits_log2( size );
    889 
    890     // get mmap allocator pointer
    891     mmap_mgr_t * mgr = &vmm->mmap_mgr;
    892 
    893     // get lock on mmap allocator
    894     busylock_acquire( &mgr->lock );
    895 
    896     // get vseg from zombi_list or from mmap zone
    897     if( list_is_empty( &mgr->zombi_list[index] ) )     // from mmap zone
    898     {
    899         // check overflow
    900         free = mgr->first_free_vpn;
    901         if( (free + size) > mgr->vpn_size ) return -1;
    902 
    903         // update MMAP allocator
    904         mgr->first_free_vpn += size;
    905 
    906         // compute base
    907         base = free;
    908     }
    909     else                                             // from zombi_list
    910     {
    911         // get pointer on zombi vseg from zombi_list
    912         vseg = LIST_FIRST( &mgr->zombi_list[index] , vseg_t , zlist );
    913 
    914         // remove vseg from free-list
    915         list_unlink( &vseg->zlist );
    916 
    917         // compute base
    918         base = vseg->vpn_base;
    919     }
    920 
    921     // release lock on mmap allocator
    922     busylock_release( &mgr->lock );
    923 
    924 #if DEBUG_VMM_MMAP_ALLOC
    925 cycle = (uint32_t)hal_get_cycles();
    926 if( DEBUG_VMM_DESTROY < cycle )
    927 printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n",
    928 __FUNCTION__, this->process->pid, this->trdid, base, size, cycle );
    929 #endif
    930 
    931     // returns vpn_base, vpn_size
    932     *vpn_base = base;
    933     *vpn_size = size;
    934     return 0;
    935 
    936 }  // end vmm_mmap_alloc()
     1067
    9371068
    9381069////////////////////////////////////////////////
     
    9681099    {
    9691100        // get vpn_base and vpn_size from STACK allocator
    970         error = vmm_stack_alloc( vmm , &vpn_base , &vpn_size );
    971         if( error )
    972         {
    973             printk("\n[ERROR] in %s : no space for stack vseg / process %x in cluster %x\n",
    974             __FUNCTION__ , process->pid , local_cxy );
    975             return NULL;
    976         }
     1101        vmm_stack_alloc( vmm , base , &vpn_base , &vpn_size );
    9771102
    9781103        // compute vseg base and size from vpn_base and vpn_size
     
    10721197               cxy );
    10731198
     1199    // build extended pointer on VSL lock
     1200    xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
     1201 
     1202    // take the VSL lock in write mode
     1203    remote_rwlock_wr_acquire( lock_xp );
     1204
    10741205    // attach vseg to VSL
    10751206        vmm_attach_vseg_to_vsl( vmm , vseg );
     1207
     1208    // release the VSL lock
     1209    remote_rwlock_wr_release( lock_xp );
    10761210
    10771211#if DEBUG_VMM_CREATE_VSEG
     
    10861220}  // vmm_create_vseg()
    10871221
    1088 ///////////////////////////////////
    1089 void vmm_delete_vseg( pid_t    pid,
    1090                       intptr_t vaddr )
     1222
     1223//////////////////////////////////////////
     1224void vmm_remove_vseg( process_t * process,
     1225                      vseg_t    * vseg )
    10911226{
    1092     process_t * process;    // local pointer on local process
    1093     vmm_t     * vmm;        // local pointer on local process VMM
    1094     vseg_t    * vseg;       // local pointer on local vseg containing vaddr
    1095     gpt_t     * gpt;        // local pointer on local process GPT
     1227    vmm_t     * vmm;        // local pointer on process VMM
     1228    bool_t      is_ref;     // local process is reference process
     1229    uint32_t    vseg_type;  // vseg type
    10961230    vpn_t       vpn;        // VPN of current PTE
    10971231    vpn_t       vpn_min;    // VPN of first PTE
     
    11031237    cxy_t       page_cxy;   // page descriptor cluster
    11041238    page_t    * page_ptr;   // page descriptor pointer
    1105     xptr_t      forks_xp;   // extended pointer on pending forks counter
    1106     xptr_t      lock_xp;    // extended pointer on lock protecting forks counter
    1107     uint32_t    forks;      // actual number of pendinf forks
    1108     uint32_t    vseg_type;  // vseg type
    1109 
    1110 #if DEBUG_VMM_DELETE_VSEG
    1111 uint32_t   cycle = (uint32_t)hal_get_cycles();
    1112 thread_t * this  = CURRENT_THREAD;
    1113 if( DEBUG_VMM_DELETE_VSEG < cycle )
    1114 printk("\n[%s] thread[%x,%x] enter / process %x / vaddr %x / cycle %d\n",
    1115 __FUNCTION__, this->process->pid, this->trdid, pid, vaddr, cycle );
    1116 #endif
    1117 
    1118     // get local pointer on local process descriptor
    1119     process = cluster_get_local_process_from_pid( pid );
    1120 
    1121     if( process == NULL )
    1122     {
    1123         printk("\n[ERRORR] in %s : cannot get local process descriptor\n",
    1124         __FUNCTION__ );
    1125         return;
    1126     }
    1127 
    1128     // get pointers on local process VMM an GPT
     1239    xptr_t      count_xp;   // extended pointer on page refcount
     1240    uint32_t    count;      // current value of page refcount
     1241
     1242// check arguments
     1243assert( (process != NULL), "process argument is NULL" );
     1244assert( (vseg    != NULL), "vseg argument is NULL" );
     1245
     1246    // compute is_ref
     1247    is_ref = (GET_CXY( process->ref_xp ) == local_cxy);
     1248
     1249    // get pointers on local process VMM
    11291250    vmm = &process->vmm;
    1130     gpt = &process->vmm.gpt;
    1131 
    1132     // get local pointer on vseg containing vaddr
    1133     vseg = vmm_vseg_from_vaddr( vmm , vaddr );
    1134 
    1135     if( vseg == NULL )
    1136     {
    1137         printk("\n[ERRORR] in %s : cannot get vseg descriptor\n",
    1138         __FUNCTION__ );
    1139         return;
    1140     }
    11411251
    11421252    // get relevant vseg infos
     
    11451255    vpn_max   = vpn_min + vseg->vpn_size;
    11461256
    1147     // loop to invalidate all vseg PTEs in GPT
     1257#if DEBUG_VMM_REMOVE_VSEG
     1258uint32_t   cycle = (uint32_t)hal_get_cycles();
     1259thread_t * this  = CURRENT_THREAD;
     1260if( DEBUG_VMM_REMOVE_VSEG < cycle )
     1261printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n",
     1262__FUNCTION__, this->process->pid, this->trdid,
     1263process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
     1264#endif
     1265
     1266    // loop on PTEs in GPT
    11481267        for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
    11491268    {
    1150         // get ppn and attr from GPT entry
    1151         hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn );
    1152 
    1153         if( attr & GPT_MAPPED )  // entry is mapped
     1269        // get ppn and attr
     1270        hal_gpt_get_pte( XPTR( local_cxy , &vmm->gpt ) , vpn , &attr , &ppn );
     1271
     1272        if( attr & GPT_MAPPED )  // PTE is mapped
    11541273        {
    11551274
    1156 #if( DEBUG_VMM_DELETE_VSEG & 1 )
    1157 if( DEBUG_VMM_DELETE_VSEG < cycle )
    1158 printk("- unmap vpn %x / ppn %x / vseg %s \n" , vpn , ppn, vseg_type_str(vseg->type) );
     1275#if( DEBUG_VMM_REMOVE_VSEG & 1 )
     1276if( DEBUG_VMM_REMOVE_VSEG < cycle )
     1277printk("- unmap vpn %x / ppn %x / %s" , vpn , ppn, vseg_type_str(vseg_type) );
    11591278#endif
    11601279            // unmap GPT entry in local GPT
    1161             hal_gpt_reset_pte( gpt , vpn );
    1162 
    1163             // the allocated page is not released to for kernel vseg
    1164             if( (vseg_type != VSEG_TYPE_KCODE) &&
    1165                 (vseg_type != VSEG_TYPE_KDATA) &&
    1166                 (vseg_type != VSEG_TYPE_KDEV ) )
     1280            hal_gpt_reset_pte( &vmm->gpt , vpn );
     1281
     1282            // get pointers on physical page descriptor
     1283            page_xp  = ppm_ppn2page( ppn );
     1284            page_cxy = GET_CXY( page_xp );
     1285            page_ptr = GET_PTR( page_xp );
     1286
     1287            // decrement page refcount
     1288            count_xp = XPTR( page_cxy , &page_ptr->refcount );
     1289            count    = hal_remote_atomic_add( count_xp , -1 );
     1290
     1291            // compute the ppn_release condition depending on vseg type
     1292            bool_t ppn_release;
     1293            if( (vseg_type == VSEG_TYPE_FILE)  ||
     1294                (vseg_type == VSEG_TYPE_KCODE) ||
     1295                (vseg_type == VSEG_TYPE_KDATA) ||
     1296                (vseg_type == VSEG_TYPE_KDEV) )           
    11671297            {
    1168                 // get extended pointer on physical page descriptor
    1169                 page_xp  = ppm_ppn2page( ppn );
    1170                 page_cxy = GET_CXY( page_xp );
    1171                 page_ptr = GET_PTR( page_xp );
    1172 
    1173 // FIXME This code must be re-written, as the actual release depends on vseg type,
    1174 // the reference cluster, the page refcount and/or the forks counter...
    1175 
    1176                 // get extended pointers on forks and lock fields
    1177                 forks_xp = XPTR( page_cxy , &page_ptr->forks );
    1178                 lock_xp  = XPTR( page_cxy , &page_ptr->lock );
    1179 
    1180                 // get the lock protecting the page
     1298                // no physical page release for FILE and KERNEL
     1299                ppn_release = false;
     1300            }
     1301            else if( (vseg_type == VSEG_TYPE_CODE)  ||
     1302                     (vseg_type == VSEG_TYPE_STACK) )
     1303            {
     1304                // always release physical page for private vsegs
     1305                ppn_release = true;
     1306            }
     1307            else if( (vseg_type == VSEG_TYPE_ANON)  ||
     1308                     (vseg_type == VSEG_TYPE_REMOTE) )
     1309            {
     1310                // release physical page if reference cluster
     1311                ppn_release = is_ref;
     1312            }
     1313            else if( is_ref )  // vseg_type == DATA in reference cluster
     1314            {
     1315                // get extended pointers on forks and lock field in page descriptor
     1316                xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks );
     1317                xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
     1318
     1319                // take lock protecting "forks" counter
    11811320                remote_busylock_acquire( lock_xp );
    11821321
    1183                 // get pending forks counter
    1184                 forks = hal_remote_l32( forks_xp );
    1185 
    1186                 if( forks )  // decrement pending forks counter
     1322                // get number of pending forks from page descriptor
     1323                uint32_t forks = hal_remote_l32( forks_xp );
     1324
     1325                // decrement pending forks counter if required
     1326                if( forks )  hal_remote_atomic_add( forks_xp , -1 );
     1327
     1328                // release lock protecting "forks" counter
     1329                remote_busylock_release( lock_xp );
     1330
     1331                // release physical page if forks == 0
     1332                ppn_release = (forks == 0);
     1333            }
     1334            else              // vseg_type == DATA not in reference cluster
     1335            {
     1336                // no physical page release if not in reference cluster
     1337                ppn_release = false;
     1338            }
     1339
     1340            // release physical page to relevant kmem when required
     1341            if( ppn_release )
     1342            {
     1343                if( page_cxy == local_cxy )
    11871344                {
    1188                     // update forks counter
    1189                     hal_remote_atomic_add( forks_xp , -1 );
    1190 
    1191                     // release the lock protecting the page
    1192                     remote_busylock_release( lock_xp );
    1193                 } 
    1194                 else         // release physical page to relevant cluster
     1345                    req.type = KMEM_PAGE;
     1346                    req.ptr  = page_ptr;
     1347                    kmem_free( &req );
     1348                }
     1349                else
    11951350                {
    1196                     // release the lock protecting the page
    1197                     remote_busylock_release( lock_xp );
    1198 
    1199                     // release the page to kmem
    1200                     if( page_cxy == local_cxy )   // local cluster
    1201                     {
    1202                         req.type = KMEM_PAGE;
    1203                         req.ptr  = page_ptr;
    1204                         kmem_free( &req );
    1205                     }
    1206                     else                          // remote cluster
    1207                     {
    1208                         rpc_pmem_release_pages_client( page_cxy , page_ptr );
    1209                     }
    1210 
    1211 #if( DEBUG_VMM_DELETE_VSEG & 1 )
    1212 if( DEBUG_VMM_DELETE_VSEG < cycle )
    1213 printk("- release ppn %x\n", ppn );
    1214 #endif
     1351                    rpc_pmem_release_pages_client( page_cxy , page_ptr );
    12151352                }
    1216 
    12171353            }
     1354
     1355#if( DEBUG_VMM_REMOVE_VSEG & 1 )
     1356if( DEBUG_VMM_REMOVE_VSEG < cycle )
     1357{
     1358    if( ppn_release ) printk(" / released to kmem\n" );
     1359    else              printk("\n");
     1360}
     1361#endif
    12181362        }
    12191363    }
    12201364
    1221     // remove vseg from VSL and release vseg descriptor (if not MMAP)
     1365    // remove vseg from VSL
    12221366    vmm_detach_vseg_from_vsl( vmm , vseg );
    12231367
    1224 #if DEBUG_VMM_DELETE_VSEG
     1368    // release vseg descriptor depending on vseg type
     1369    if( vseg_type == VSEG_TYPE_STACK )
     1370    {
     1371        // release slot to local stack allocator
     1372        vmm_stack_free( vmm , vseg );
     1373
     1374        // release vseg descriptor to local kmem
     1375        vseg_free( vseg );
     1376    }
     1377    else if( (vseg_type == VSEG_TYPE_ANON) ||
     1378             (vseg_type == VSEG_TYPE_FILE) ||
     1379             (vseg_type == VSEG_TYPE_REMOTE) ) 
     1380    {
     1381        // release vseg to local mmap allocator
     1382        vmm_mmap_free( vmm , vseg );
     1383    }
     1384    else
     1385    {
     1386        // release vseg descriptor to local kmem
     1387        vseg_free( vseg );
     1388    }
     1389
     1390#if DEBUG_VMM_REMOVE_VSEG
    12251391cycle = (uint32_t)hal_get_cycles();
    1226 if( DEBUG_VMM_DELETE_VSEG < cycle )
    1227 printk("\n[%s] thread[%x,%x] exit / process %x / vseg %s / base %x / cycle %d\n",
    1228 __FUNCTION__, this->process->pid, this->trdid, pid, vseg_type_str(vseg->type), vaddr, cycle );
    1229 #endif
    1230 
    1231 }  // end vmm_delete_vseg()
     1392if( DEBUG_VMM_REMOVE_VSEG < cycle )
     1393printk("[%s] thread[%x,%x] exit / process %x / %s / base %x / cycle %d\n",
     1394__FUNCTION__, this->process->pid, this->trdid,
     1395process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
     1396#endif
     1397
     1398}  // end vmm_remove_vseg()
     1399
     1400
     1401///////////////////////////////////
     1402void vmm_delete_vseg( pid_t    pid,
     1403                      intptr_t vaddr )
     1404{
     1405    process_t * process;    // local pointer on local process
     1406    vseg_t    * vseg;       // local pointer on local vseg containing vaddr
     1407
     1408    // get local pointer on local process descriptor
     1409    process = cluster_get_local_process_from_pid( pid );
     1410
     1411    if( process == NULL )
     1412    {
     1413        printk("\n[WARNING] in %s : cannot get local process descriptor\n",
     1414        __FUNCTION__ );
     1415        return;
     1416    }
     1417
     1418    // get local pointer on local vseg containing vaddr
     1419    vseg = vmm_vseg_from_vaddr( &process->vmm , vaddr );
     1420
     1421    if( vseg == NULL )
     1422    {
     1423        printk("\n[WARNING] in %s : cannot get vseg descriptor\n",
     1424        __FUNCTION__ );
     1425        return;
     1426    }
     1427
     1428    // call relevant function
     1429    vmm_remove_vseg( process , vseg );
     1430
     1431}  // end vmm_delete_vseg
     1432
    12321433
    12331434/////////////////////////////////////////////
     
    12351436                              intptr_t   vaddr )
    12361437{
    1237     xptr_t   iter_xp;
    12381438    xptr_t   vseg_xp;
    12391439    vseg_t * vseg;
     1440    xptr_t   iter_xp;
    12401441
    12411442    // get extended pointers on VSL lock and root
    1242     xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
     1443    xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
    12431444    xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root );
    12441445
     
    12491450    XLIST_FOREACH( root_xp , iter_xp )
    12501451    {
     1452        // get pointers on vseg
    12511453        vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    12521454        vseg    = GET_PTR( vseg_xp );
    12531455
     1456        // return success when match
    12541457        if( (vaddr >= vseg->min) && (vaddr < vseg->max) )
    12551458        {
     
    12621465    // return failure
    12631466    remote_rwlock_rd_release( lock_xp );
    1264 
    12651467    return NULL;
    12661468
     
    14621664        vseg_init_from_ref( vseg , vseg_xp );
    14631665
     1666        // build extended pointer on VSL lock
     1667        xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
     1668 
     1669        // take the VSL lock in write mode
     1670        remote_rwlock_wr_acquire( lock_xp );
     1671
    14641672        // register local vseg in local VSL
    14651673        vmm_attach_vseg_to_vsl( vmm , vseg );
     1674 
     1675        // release the VSL lock
     1676        remote_rwlock_wr_release( lock_xp );
    14661677    }   
    14671678
     
    14861697uint32_t   cycle   = (uint32_t)hal_get_cycles();
    14871698thread_t * this    = CURRENT_THREAD;
    1488 xptr_t     this_xp = XPTR( local_cxy , this );
    14891699if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
    14901700printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
     
    17171927    error_t          error;           // value returned by called functions
    17181928
     1929#if DEBUG_VMM_HANDLE_PAGE_FAULT
     1930uint32_t   cycle = (uint32_t)hal_get_cycles();
     1931thread_t * this  = CURRENT_THREAD;
     1932if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
     1933printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
     1934__FUNCTION__, this->process->pid, this->trdid, vpn, cycle );
     1935hal_vmm_display( process , true );
     1936#endif
     1937
    17191938    // get local vseg (access to reference VSL can be required)
    17201939    error = vmm_get_vseg( process,
     
    17231942    if( error )
    17241943    {
    1725         printk("\n[ERROR] in %s : vpn %x in process %x not in a registered vseg\n",
    1726         __FUNCTION__ , vpn , process->pid );
     1944        printk("\n[ERROR] in %s : vpn %x in process %x not in registered vseg / cycle %d\n",
     1945        __FUNCTION__ , vpn , process->pid, (uint32_t)hal_get_cycles() );
    17271946       
    17281947        return EXCP_USER_ERROR;
    17291948    }
    17301949
    1731  #if DEBUG_VMM_HANDLE_PAGE_FAULT
    1732 uint32_t   cycle = (uint32_t)hal_get_cycles();
    1733 thread_t * this  = CURRENT_THREAD;
     1950#if DEBUG_VMM_HANDLE_PAGE_FAULT
     1951cycle = (uint32_t)hal_get_cycles();
    17341952if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
    1735 printk("\n[%s] threadr[%x,%x] enter for vpn %x / %s / cycle %d\n",
    1736 __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(vseg->type), cycle );
     1953printk("\n[%s] threadr[%x,%x] found vseg %s / cycle %d\n",
     1954__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type), cycle );
    17371955#endif
    17381956
     
    19712189    error_t          error;
    19722190
     2191    thread_t * this = CURRENT_THREAD;
     2192
    19732193#if DEBUG_VMM_HANDLE_COW
    19742194uint32_t   cycle   = (uint32_t)hal_get_cycles();
    1975 thread_t * this    = CURRENT_THREAD;
    1976 xptr_t     this_xp = XPTR( local_cxy , this );
    19772195if( DEBUG_VMM_HANDLE_COW < cycle )
    19782196printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n",
    19792197__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
     2198hal_vmm_display( process , true );
    19802199#endif
    19812200
     
    19912210    if( error )
    19922211    {
    1993         printk("\n[PANIC] in %s : vpn %x in process %x not in a registered vseg\n",
    1994         __FUNCTION__, vpn, process->pid );
     2212        printk("\n[PANIC] in %s vpn %x in thread[%x,%x] not in a registered vseg\n",
     2213        __FUNCTION__, vpn, process->pid, this->trdid );
    19952214
    19962215        return EXCP_KERNEL_PANIC;
Note: See TracChangeset for help on using the changeset viewer.