source: trunk/kernel/mm/vmm.c @ 628

Last change on this file since 628 was 625, checked in by alain, 6 years ago

Fix a bug in the vmm_remove_vseg() function: the physical pages
associated to an user DATA vseg were released to the kernel when
the target process descriptor was in the reference cluster.
This physical pages release should be done only when the page
forks counter value is zero.
All other modifications are cosmetic.

File size: 82.0 KB
RevLine 
[1]1/*
[611]2 * vmm.c - virtual memory manager related operations definition.
[1]3 *
4 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
5 *           Mohamed Lamine Karaoui (2015)
[625]6 *           Alain Greiner (2016,2017,2018,2019)
[21]7 *
[1]8 * Copyright (c) UPMC Sorbonne Universites
9 *
10 * This file is part of ALMOS-MKH.
11 *
12 * ALMOS-MKH is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2.0 of the License.
15 *
16 * ALMOS-MKH is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
23 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
[14]26#include <kernel_config.h>
[457]27#include <hal_kernel_types.h>
[1]28#include <hal_special.h>
29#include <hal_gpt.h>
[409]30#include <hal_vmm.h>
[577]31#include <hal_macros.h>
[1]32#include <printk.h>
[23]33#include <memcpy.h>
[567]34#include <remote_rwlock.h>
35#include <remote_queuelock.h>
[1]36#include <list.h>
[408]37#include <xlist.h>
[1]38#include <bits.h>
39#include <process.h>
40#include <thread.h>
41#include <vseg.h>
42#include <cluster.h>
43#include <scheduler.h>
44#include <vfs.h>
45#include <mapper.h>
46#include <page.h>
47#include <kmem.h>
48#include <vmm.h>
[585]49#include <hal_exception.h>
[1]50
51//////////////////////////////////////////////////////////////////////////////////
52//   Extern global variables
53//////////////////////////////////////////////////////////////////////////////////
54
[567]55extern  process_t  process_zero;      // allocated in cluster.c
[1]56
[625]57////////////////////////////////////////////////////////////////////////////////////////////
58// This static function is called by the vmm_create_vseg() function, and implements
59// the VMM STACK specific allocator.
60////////////////////////////////////////////////////////////////////////////////////////////
61// @ vmm      : [in]  pointer on VMM.
62// @ ltid     : [in]  requested slot == local user thread identifier.
63// @ vpn_base : [out] first allocated page
64// @ vpn_size : [out] number of allocated pages
65////////////////////////////////////////////////////////////////////////////////////////////
66static void vmm_stack_alloc( vmm_t  * vmm,
67                             ltid_t   ltid,
68                             vpn_t  * vpn_base,
69                             vpn_t  * vpn_size )
[21]70{
[625]71
72// check ltid argument
73assert( (ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
74"slot index %d too large for an user stack vseg", ltid );
75
76    // get stack allocator pointer
77    stack_mgr_t * mgr = &vmm->stack_mgr;
78
79    // get lock on stack allocator
80    busylock_acquire( &mgr->lock );
81
82// check requested slot is available
83assert( (bitmap_state( &mgr->bitmap , ltid ) == false),
84"slot index %d already allocated", ltid );
85
86    // update bitmap
87    bitmap_set( &mgr->bitmap , ltid );
88
89    // release lock on stack allocator
90    busylock_release( &mgr->lock );
91
92    // returns vpn_base, vpn_size (first page non allocated)
93    *vpn_base = mgr->vpn_base + ltid * CONFIG_VMM_STACK_SIZE + 1;
94    *vpn_size = CONFIG_VMM_STACK_SIZE - 1;
95
96} // end vmm_stack_alloc()
97
98////////////////////////////////////////////////////////////////////////////////////////////
99// This static function is called by the vmm_remove_vseg() function, and implements
100// the VMM STACK specific desallocator.
101////////////////////////////////////////////////////////////////////////////////////////////
102// @ vmm      : [in] pointer on VMM.
103// @ vseg     : [in] pointer on released vseg.
104////////////////////////////////////////////////////////////////////////////////////////////
105static void vmm_stack_free( vmm_t  * vmm,
106                            vseg_t * vseg )
107{
108    // get stack allocator pointer
109    stack_mgr_t * mgr = &vmm->stack_mgr;
110
111    // compute slot index
112    uint32_t index = (vseg->vpn_base - 1 - mgr->vpn_base) / CONFIG_VMM_STACK_SIZE;
113
114// check index
115assert( (index <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
116"slot index %d too large for an user stack vseg", index );
117
118// check released slot is allocated
119assert( (bitmap_state( &mgr->bitmap , index ) == true),
120"released slot index %d non allocated", index );
121
122    // get lock on stack allocator
123    busylock_acquire( &mgr->lock );
124
125    // update stacks_bitmap
126    bitmap_clear( &mgr->bitmap , index );
127
128    // release lock on stack allocator
129    busylock_release( &mgr->lock );
130
131}  // end vmm_stack_free()
132
133////////////////////////////////////////////////////////////////////////////////////////////
134// This static function is called by the vmm_create_vseg() function, and implements
135// the VMM MMAP specific allocator.
136////////////////////////////////////////////////////////////////////////////////////////////
137// @ vmm      : [in] pointer on VMM.
138// @ npages   : [in] requested number of pages.
139// @ vpn_base : [out] first allocated page.
140// @ vpn_size : [out] actual number of allocated pages.
141////////////////////////////////////////////////////////////////////////////////////////////
142static error_t vmm_mmap_alloc( vmm_t * vmm,
143                               vpn_t   npages,
144                               vpn_t * vpn_base,
145                               vpn_t * vpn_size )
146{
147    uint32_t   order;
148    xptr_t     vseg_xp;
149    vseg_t   * vseg;
150    vpn_t      base;
151    vpn_t      size;
152    vpn_t      free;
153
154#if DEBUG_VMM_MMAP_ALLOC
155thread_t * this = CURRENT_THREAD;
156uint32_t cycle = (uint32_t)hal_get_cycles();
157if( DEBUG_VMM_MMAP_ALLOC < cycle )
158printk("\n[%s] thread[%x,%x] enter / cycle %d\n",
159__FUNCTION__, this->process->pid, this->trdid, cycle );
160#endif
161
162    // number of allocated pages must be power of 2
163    // compute actual size and order
164    size  = POW2_ROUNDUP( npages );
165    order = bits_log2( size );
166
167    // get mmap allocator pointer
168    mmap_mgr_t * mgr = &vmm->mmap_mgr;
169
170    // build extended pointer on root of zombi_list[order]
171    xptr_t root_xp = XPTR( local_cxy , &mgr->zombi_list[order] );
172
173    // take lock protecting zombi_lists
174    busylock_acquire( &mgr->lock );
175
176    // get vseg from zombi_list or from mmap zone
177    if( xlist_is_empty( root_xp ) )                   // from mmap zone
178    {
179        // check overflow
180        free = mgr->first_free_vpn;
181        if( (free + size) > mgr->vpn_size ) return -1;
182
183        // update MMAP allocator
184        mgr->first_free_vpn += size;
185
186        // compute base
187        base = free;
188    }
189    else                                              // from zombi_list
190    {
191        // get pointer on zombi vseg from zombi_list
192        vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
193        vseg    = GET_PTR( vseg_xp );
194
195        // remove vseg from free-list
196        xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
197
198        // compute base
199        base = vseg->vpn_base;
200    }
201
202    // release lock
203    busylock_release( &mgr->lock );
204
205#if DEBUG_VMM_MMAP_ALLOC
206cycle = (uint32_t)hal_get_cycles();
207if( DEBUG_VMM_DESTROY < cycle )
208printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n",
209__FUNCTION__, this->process->pid, this->trdid, base, size, cycle );
210#endif
211
212    // returns vpn_base, vpn_size
213    *vpn_base = base;
214    *vpn_size = size;
215    return 0;
216
217}  // end vmm_mmap_alloc()
218
219////////////////////////////////////////////////////////////////////////////////////////////
220// This static function is called by the vmm_remove_vseg() function, and implements
221// the VMM MMAP specific desallocator.
222////////////////////////////////////////////////////////////////////////////////////////////
223// @ vmm      : [in] pointer on VMM.
224// @ vseg     : [in] pointer on released vseg.
225////////////////////////////////////////////////////////////////////////////////////////////
226static void vmm_mmap_free( vmm_t  * vmm,
227                           vseg_t * vseg )
228{
229    // get pointer on mmap allocator
230    mmap_mgr_t * mgr = &vmm->mmap_mgr;
231
232    // compute zombi_list order
233    uint32_t order = bits_log2( vseg->vpn_size );
234
235    // take lock protecting zombi lists
236    busylock_acquire( &mgr->lock );
237
238    // update relevant zombi_list
239    xlist_add_first( XPTR( local_cxy , &mgr->zombi_list[order] ),
240                     XPTR( local_cxy , &vseg->xlist ) );
241
242    // release lock
243    busylock_release( &mgr->lock );
244
245}  // end of vmm_mmap_free()
246
247////////////////////////////////////////////////////////////////////////////////////////////
248// This static function registers one vseg in the VSL of a local process descriptor.
249////////////////////////////////////////////////////////////////////////////////////////////
250// vmm       : [in] pointer on VMM.
251// vseg      : [in] pointer on vseg.
252////////////////////////////////////////////////////////////////////////////////////////////
253void vmm_attach_vseg_to_vsl( vmm_t  * vmm,
254                             vseg_t * vseg )
255{
256    // update vseg descriptor
257    vseg->vmm = vmm;
258
259    // increment vsegs number
260    vmm->vsegs_nr++;
261
262    // add vseg in vmm list
263    xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ),
264                    XPTR( local_cxy , &vseg->xlist ) );
265
266}  // end vmm_attach_vseg_from_vsl()
267
268////////////////////////////////////////////////////////////////////////////////////////////
269// This static function removes one vseg from the VSL of a local process descriptor.
270////////////////////////////////////////////////////////////////////////////////////////////
271// vmm       : [in] pointer on VMM.
272// vseg      : [in] pointer on vseg.
273////////////////////////////////////////////////////////////////////////////////////////////
274void vmm_detach_vseg_from_vsl( vmm_t  * vmm,
275                               vseg_t * vseg )
276{
277    // update vseg descriptor
278    vseg->vmm = NULL;
279
280    // decrement vsegs number
281    vmm->vsegs_nr--;
282
283    // remove vseg from VSL
284    xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
285
286}  // end vmm_detach_from_vsl()
287
288
289
290
291////////////////////////////////////////////
292error_t vmm_user_init( process_t * process )
293{
[1]294    vseg_t  * vseg_args;
295    vseg_t  * vseg_envs;
296    intptr_t  base;
297    intptr_t  size;
[614]298    uint32_t  i;
[1]299
[625]300#if DEBUG_VMM_USER_INIT
[567]301thread_t * this = CURRENT_THREAD;
[433]302uint32_t cycle = (uint32_t)hal_get_cycles();
[625]303if( DEBUG_VMM_USER_INIT )
[614]304printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", 
305__FUNCTION__ , this->process->pid, this->trdid, process->pid, local_cxy, cycle );
[433]306#endif
[204]307
[1]308    // get pointer on VMM
309    vmm_t   * vmm = &process->vmm;
310
[625]311// check UTILS zone
[624]312assert( ((CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) <= 
313         (CONFIG_VMM_ELF_BASE - CONFIG_VMM_UTILS_BASE)) ,
314         "UTILS zone too small\n" );
[21]315
[625]316// check STACK zone
[567]317assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <=
318(CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) ,
319"STACK zone too small\n");
[1]320
[625]321    // register "args" vseg in VSL
[624]322    base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_SHIFT;
[1]323    size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT;
[406]324
[407]325    vseg_args = vmm_create_vseg( process,
326                                 VSEG_TYPE_DATA,
327                                 base,
328                                 size,
329                                 0,             // file_offset unused
330                                 0,             // file_size unused
331                                 XPTR_NULL,     // mapper_xp unused
332                                 local_cxy );
[415]333    if( vseg_args == NULL )
334    {
335        printk("\n[ERROR] in %s : cannot register args vseg\n", __FUNCTION__ );
336        return -1;
337    }
[204]338
[406]339    vmm->args_vpn_base = base;
[1]340
[625]341    // register "envs" vseg in VSL
[624]342    base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_SHIFT;
[1]343    size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT;
[406]344
[407]345    vseg_envs = vmm_create_vseg( process,
346                                 VSEG_TYPE_DATA,
347                                 base,
348                                 size,
349                                 0,             // file_offset unused
350                                 0,             // file_size unused
351                                 XPTR_NULL,     // mapper_xp unused
352                                 local_cxy );
[415]353    if( vseg_envs == NULL )
354    {
355        printk("\n[ERROR] in %s : cannot register envs vseg\n", __FUNCTION__ );
356        return -1;
357    }
[204]358
[406]359    vmm->envs_vpn_base = base;
[1]360
361    // initialize STACK allocator
362    vmm->stack_mgr.bitmap   = 0;
363    vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE;
[567]364    busylock_init( &vmm->stack_mgr.lock , LOCK_VMM_STACK );
[1]365
366    // initialize MMAP allocator
[407]367    vmm->mmap_mgr.vpn_base        = CONFIG_VMM_HEAP_BASE;
368    vmm->mmap_mgr.vpn_size        = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE;
369    vmm->mmap_mgr.first_free_vpn  = CONFIG_VMM_HEAP_BASE;
[567]370    busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP );
[625]371    for( i = 0 ; i < 32 ; i++ )
372    {
373        xlist_root_init( XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] ) );
374    }
[1]375
[21]376    // initialize instrumentation counters
[409]377        vmm->pgfault_nr = 0;
[1]378
[124]379    hal_fence();
[1]380
[625]381#if DEBUG_VMM_USER_INIT
[433]382cycle = (uint32_t)hal_get_cycles();
[625]383if( DEBUG_VMM_USER_INIT )
[614]384printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", 
385__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
[433]386#endif
[204]387
[415]388    return 0;
389
[625]390}  // end vmm_user_init()
[204]391
[611]392//////////////////////////////////////////
[625]393void vmm_user_reset( process_t * process )
[567]394{
[625]395    xptr_t       vseg_xp;
396        vseg_t     * vseg;
397    vseg_type_t  vseg_type;
[567]398
[625]399#if DEBUG_VMM_USER_RESET
400uint32_t cycle = (uint32_t)hal_get_cycles();
401thread_t * this = CURRENT_THREAD;
402if( DEBUG_VMM_USER_RESET < cycle )
403printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
404__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
405#endif
[567]406
[625]407#if (DEBUG_VMM_USER_RESET & 1 )
408if( DEBUG_VMM_USER_RESET < cycle )
409hal_vmm_display( process , true );
410#endif
[567]411
[625]412    // get pointer on local VMM
413    vmm_t * vmm = &process->vmm;
[624]414
[625]415    // build extended pointer on VSL root and VSL lock
416    xptr_t   root_xp = XPTR( local_cxy , &vmm->vsegs_root );
417    xptr_t   lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
[567]418
[625]419    // take the VSL lock
420        remote_rwlock_wr_acquire( lock_xp );
[567]421
[625]422    // scan the VSL to delete all non kernel vsegs
423    // (we don't use a FOREACH in case of item deletion)
424    xptr_t   iter_xp;
425    xptr_t   next_xp;
426        for( iter_xp = hal_remote_l64( root_xp ) ; 
427         iter_xp != root_xp ;
428         iter_xp = next_xp )
429        {
430        // save extended pointer on next item in xlist
431        next_xp = hal_remote_l64( iter_xp );
[611]432
[625]433        // get pointers on current vseg in VSL
434        vseg_xp   = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
435        vseg      = GET_PTR( vseg_xp );
436        vseg_type = vseg->type;
[567]437
[625]438#if( DEBUG_VMM_USER_RESET & 1 )
439if( DEBUG_VMM_USER_RESET < cycle )
440printk("\n[%s] found %s vseg / vpn_base %x / vpn_size %d\n",
441__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
442#endif
443        // delete non kernel vseg 
444        if( (vseg_type != VSEG_TYPE_KCODE) && 
445            (vseg_type != VSEG_TYPE_KDATA) && 
446            (vseg_type != VSEG_TYPE_KDEV ) )
447        {
448            // remove vseg from VSL
449            vmm_remove_vseg( process , vseg );
[567]450
[625]451#if( DEBUG_VMM_USER_RESET & 1 )
452if( DEBUG_VMM_USER_RESET < cycle )
453printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n",
454__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
455#endif
456        }
457        else
458        {
[567]459
[625]460#if( DEBUG_VMM_USER_RESET & 1 )
461if( DEBUG_VMM_USER_RESET < cycle )
462printk("\n[%s] keep %s vseg / vpn_base %x / vpn_size %d\n",
463__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
464#endif
465        }
466        }  // end loop on vsegs in VSL
[567]467
[625]468    // release the VSL lock
469        remote_rwlock_wr_release( lock_xp );
[567]470
[625]471// FIXME il faut gérer les process copies...
[611]472
[625]473#if DEBUG_VMM_USER_RESET
474cycle = (uint32_t)hal_get_cycles();
475if( DEBUG_VMM_USER_RESET < cycle )
476printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
477__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
478#endif
[611]479
[625]480}  // end vmm_user_reset()
[611]481
[595]482////////////////////////////////////////////////
[433]483void vmm_global_update_pte( process_t * process,
484                            vpn_t       vpn,
485                            uint32_t    attr,
486                            ppn_t       ppn )
[23]487{
[408]488    xlist_entry_t * process_root_ptr;
489    xptr_t          process_root_xp;
490    xptr_t          process_iter_xp;
[23]491
[408]492    xptr_t          remote_process_xp;
493    cxy_t           remote_process_cxy;
494    process_t     * remote_process_ptr;
495    xptr_t          remote_gpt_xp;
[23]496
[408]497    pid_t           pid;
498    cxy_t           owner_cxy;
499    lpid_t          owner_lpid;
[23]500
[438]501#if DEBUG_VMM_UPDATE_PTE
[433]502uint32_t cycle = (uint32_t)hal_get_cycles();
[595]503thread_t * this = CURRENT_THREAD;
[438]504if( DEBUG_VMM_UPDATE_PTE < cycle )
[595]505printk("\n[%s] thread[%x,%x] enter for process %x / vpn %x / cycle %d\n",
506__FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle );
[433]507#endif
508
[567]509// check cluster is reference
[585]510assert( (GET_CXY( process->ref_xp ) == local_cxy) , "not called in reference cluster\n");
[433]511
[408]512    // get extended pointer on root of process copies xlist in owner cluster
513    pid              = process->pid;
514    owner_cxy        = CXY_FROM_PID( pid );
515    owner_lpid       = LPID_FROM_PID( pid );
516    process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid];
517    process_root_xp  = XPTR( owner_cxy , process_root_ptr );
[23]518
[408]519    // loop on destination process copies
520    XLIST_FOREACH( process_root_xp , process_iter_xp )
521    {
522        // get cluster and local pointer on remote process
523        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
[433]524        remote_process_ptr = GET_PTR( remote_process_xp );
[408]525        remote_process_cxy = GET_CXY( remote_process_xp );
[407]526
[438]527#if (DEBUG_VMM_UPDATE_PTE & 0x1)
528if( DEBUG_VMM_UPDATE_PTE < cycle )
[595]529printk("\n[%s] threadr[%x,%x] handling vpn %x for process %x in cluster %x\n",
530__FUNCTION__, this->process->pid, this->trdid, vpn, process->pid, remote_process_cxy );
[433]531#endif
532
[408]533        // get extended pointer on remote gpt
534        remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt );
535
[433]536        // update remote GPT
537        hal_gpt_update_pte( remote_gpt_xp, vpn, attr, ppn );
[408]538    } 
539
[438]540#if DEBUG_VMM_UPDATE_PTE
[433]541cycle = (uint32_t)hal_get_cycles();
[438]542if( DEBUG_VMM_UPDATE_PTE < cycle )
[595]543printk("\n[%s] thread[%x,%x] exit for process %x / vpn %x / cycle %d\n",
544__FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle );
[433]545#endif
546
547}  // end vmm_global_update_pte()
548
[408]549///////////////////////////////////////
550void vmm_set_cow( process_t * process )
551{
552    vmm_t         * vmm;
553
554    xlist_entry_t * process_root_ptr;
555    xptr_t          process_root_xp;
556    xptr_t          process_iter_xp;
557
558    xptr_t          remote_process_xp;
559    cxy_t           remote_process_cxy;
560    process_t     * remote_process_ptr;
561    xptr_t          remote_gpt_xp;
562
563    xptr_t          vseg_root_xp;
564    xptr_t          vseg_iter_xp;
565
566    xptr_t          vseg_xp;
567    vseg_t        * vseg;
568
569    pid_t           pid;
570    cxy_t           owner_cxy;
571    lpid_t          owner_lpid;
572
[438]573#if DEBUG_VMM_SET_COW
[595]574uint32_t   cycle = (uint32_t)hal_get_cycles();
575thread_t * this  = CURRENT_THREAD;
[438]576if( DEBUG_VMM_SET_COW < cycle )
[595]577printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n",
578__FUNCTION__, this->process->pid, this->trdid, process->pid , cycle );
[433]579#endif
[408]580
[567]581// check cluster is reference
582assert( (GET_CXY( process->ref_xp ) == local_cxy) ,
583"local cluster is not process reference cluster\n");
[408]584
585    // get pointer on reference VMM
586    vmm = &process->vmm;
587
588    // get extended pointer on root of process copies xlist in owner cluster
589    pid              = process->pid;
590    owner_cxy        = CXY_FROM_PID( pid );
591    owner_lpid       = LPID_FROM_PID( pid );
592    process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid];
593    process_root_xp  = XPTR( owner_cxy , process_root_ptr );
594
595    // get extended pointer on root of vsegs xlist from reference VMM
596    vseg_root_xp  = XPTR( local_cxy , &vmm->vsegs_root ); 
597
598    // loop on destination process copies
599    XLIST_FOREACH( process_root_xp , process_iter_xp )
600    {
601        // get cluster and local pointer on remote process
602        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
[433]603        remote_process_ptr = GET_PTR( remote_process_xp );
[408]604        remote_process_cxy = GET_CXY( remote_process_xp );
605
[595]606#if (DEBUG_VMM_SET_COW & 1)
[438]607if( DEBUG_VMM_SET_COW < cycle )
[595]608printk("\n[%s] thread[%x,%x] handling process %x in cluster %x\n",
609__FUNCTION__, this->process->pid, this->trdid, process->pid , remote_process_cxy );
[433]610#endif
[408]611
612        // get extended pointer on remote gpt
613        remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt );
614
615        // loop on vsegs in (local) reference process VSL
616        XLIST_FOREACH( vseg_root_xp , vseg_iter_xp )
617        {
618            // get pointer on vseg
619            vseg_xp  = XLIST_ELEMENT( vseg_iter_xp , vseg_t , xlist );
[433]620            vseg     = GET_PTR( vseg_xp );
[408]621
[567]622assert( (GET_CXY( vseg_xp ) == local_cxy) ,
623"all vsegs in reference VSL must be local\n" );
[408]624
625            // get vseg type, base and size
626            uint32_t type     = vseg->type;
627            vpn_t    vpn_base = vseg->vpn_base;
628            vpn_t    vpn_size = vseg->vpn_size;
629
[595]630#if (DEBUG_VMM_SET_COW & 1)
[438]631if( DEBUG_VMM_SET_COW < cycle )
[595]632printk("\n[%s] thread[%x,%x] handling vseg %s / vpn_base = %x / vpn_size = %x\n",
633__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size );
[433]634#endif
635            // only DATA, ANON and REMOTE vsegs
[408]636            if( (type == VSEG_TYPE_DATA)  ||
637                (type == VSEG_TYPE_ANON)  ||
638                (type == VSEG_TYPE_REMOTE) )
639            {
[433]640                vpn_t      vpn;
641                uint32_t   attr;
642                ppn_t      ppn;
643                xptr_t     page_xp;
644                cxy_t      page_cxy;
645                page_t   * page_ptr;
646                xptr_t     forks_xp;
[469]647                xptr_t     lock_xp;
[433]648
649                // update flags in remote GPT
650                hal_gpt_set_cow( remote_gpt_xp,
651                                 vpn_base,
652                                 vpn_size ); 
653
654                // atomically increment pending forks counter in physical pages,
655                // for all vseg pages that are mapped in reference cluster
656                if( remote_process_cxy == local_cxy )
657                {
658                    // scan all pages in vseg
659                    for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
660                    {
661                        // get page attributes and PPN from reference GPT
[585]662                        hal_gpt_get_pte( remote_gpt_xp , vpn , &attr , &ppn ); 
[433]663
664                        // atomically update pending forks counter if page is mapped
665                        if( attr & GPT_MAPPED )
666                        {
[469]667                            // get pointers and cluster on page descriptor
[433]668                            page_xp  = ppm_ppn2page( ppn );
669                            page_cxy = GET_CXY( page_xp );
670                            page_ptr = GET_PTR( page_xp );
[469]671
672                            // get extended pointers on "forks" and "lock"
[433]673                            forks_xp = XPTR( page_cxy , &page_ptr->forks );
[469]674                            lock_xp  = XPTR( page_cxy , &page_ptr->lock );
675
[567]676                            // take lock protecting "forks" counter
677                            remote_busylock_acquire( lock_xp );
678
[469]679                            // increment "forks"
[433]680                            hal_remote_atomic_add( forks_xp , 1 );
[567]681
682                            // release lock protecting "forks" counter
683                            remote_busylock_release( lock_xp );
[433]684                        }
685                    }   // end loop on vpn
686                }   // end if local
687            }   // end if vseg type
688        }   // end loop on vsegs
[408]689    }   // end loop on process copies
690 
[438]691#if DEBUG_VMM_SET_COW
[433]692cycle = (uint32_t)hal_get_cycles();
[438]693if( DEBUG_VMM_SET_COW < cycle )
[595]694printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n",
695__FUNCTION__, this->process->pid, this->trdid, process->pid , cycle );
[433]696#endif
[408]697
698}  // end vmm_set-cow()
699
700/////////////////////////////////////////////////
701error_t vmm_fork_copy( process_t * child_process,
702                       xptr_t      parent_process_xp )
703{
704    error_t     error;
705    cxy_t       parent_cxy;
706    process_t * parent_process;
707    vmm_t     * parent_vmm;
708    xptr_t      parent_lock_xp;
709    vmm_t     * child_vmm;
710    xptr_t      iter_xp;
711    xptr_t      parent_vseg_xp;
712    vseg_t    * parent_vseg;
713    vseg_t    * child_vseg;
714    uint32_t    type;
715    bool_t      cow;
716    vpn_t       vpn;           
717    vpn_t       vpn_base;
718    vpn_t       vpn_size;
[469]719    xptr_t      page_xp;        // extended pointer on page descriptor
[408]720    page_t    * page_ptr;
721    cxy_t       page_cxy;
[469]722    xptr_t      forks_xp;       // extended pointer on forks counter in page descriptor
[408]723    xptr_t      parent_root_xp;
724    bool_t      mapped; 
725    ppn_t       ppn;
726
[438]727#if DEBUG_VMM_FORK_COPY
[433]728uint32_t cycle = (uint32_t)hal_get_cycles();
[595]729thread_t * this = CURRENT_THREAD;
[438]730if( DEBUG_VMM_FORK_COPY < cycle )
[595]731printk("\n[%s] thread %x enter / cycle %d\n",
732__FUNCTION__ , this->process->pid, this->trdid, cycle );
[433]733#endif
[408]734
735    // get parent process cluster and local pointer
736    parent_cxy     = GET_CXY( parent_process_xp );
[433]737    parent_process = GET_PTR( parent_process_xp );
[408]738
739    // get local pointers on parent and child VMM
740    parent_vmm = &parent_process->vmm; 
741    child_vmm  = &child_process->vmm;
742
[625]743    // initialize the locks protecting the child VSL and GPT
744    remote_rwlock_init( XPTR( local_cxy , &child_vmm->gpt_lock ) , LOCK_VMM_GPT );
745        remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsl_lock ) , LOCK_VMM_VSL );
[408]746
747    // initialize the child VSL as empty
748    xlist_root_init( XPTR( local_cxy, &child_vmm->vsegs_root ) );
749    child_vmm->vsegs_nr = 0;
750
[625]751    // create an empty child GPT
[408]752    error = hal_gpt_create( &child_vmm->gpt );
[407]753    if( error )
754    {
[408]755        printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );
756        return -1;
[407]757    }
758
[625]759    // build extended pointer on parent VSL root and lock
[408]760    parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root );
[625]761    parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsl_lock );
[408]762
[567]763    // take the lock protecting the parent VSL in read mode
764    remote_rwlock_rd_acquire( parent_lock_xp );
[415]765
[408]766    // loop on parent VSL xlist
767    XLIST_FOREACH( parent_root_xp , iter_xp )
[23]768    {
[625]769        // get pointers on current parent vseg
[408]770        parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
[433]771        parent_vseg    = GET_PTR( parent_vseg_xp );
[23]772
[408]773        // get vseg type
[567]774        type = hal_remote_l32( XPTR( parent_cxy , &parent_vseg->type ) );
[408]775       
[438]776#if DEBUG_VMM_FORK_COPY
[433]777cycle = (uint32_t)hal_get_cycles();
[438]778if( DEBUG_VMM_FORK_COPY < cycle )
[595]779printk("\n[%s] thread[%x,%x] found parent vseg %s / vpn_base = %x / cycle %d\n",
780__FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type),
[567]781hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
[433]782#endif
[23]783
[623]784        // all parent vsegs - but STACK and kernel vsegs - must be copied in child VSL
785        if( (type != VSEG_TYPE_STACK) && (type != VSEG_TYPE_KCODE) &&
786            (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) )
[23]787        {
[408]788            // allocate memory for a new child vseg
789            child_vseg = vseg_alloc();
790            if( child_vseg == NULL )   // release all allocated vsegs
[23]791            {
[408]792                vmm_destroy( child_process );
793                printk("\n[ERROR] in %s : cannot create vseg for child\n", __FUNCTION__ );
794                return -1;
[23]795            }
796
[408]797            // copy parent vseg to child vseg
798            vseg_init_from_ref( child_vseg , parent_vseg_xp );
[23]799
[625]800            // build extended pointer on VSL lock
801            xptr_t lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock );
802 
803            // take the VSL lock in write mode
804            remote_rwlock_wr_acquire( lock_xp );
805
[408]806            // register child vseg in child VSL
[611]807            vmm_attach_vseg_to_vsl( child_vmm , child_vseg );
[407]808
[625]809            // release the VSL lock
810            remote_rwlock_wr_release( lock_xp );
811
[438]812#if DEBUG_VMM_FORK_COPY
[433]813cycle = (uint32_t)hal_get_cycles();
[438]814if( DEBUG_VMM_FORK_COPY < cycle )
[595]815printk("\n[%s] thread[%x,%x] copied vseg %s / vpn_base = %x to child VSL / cycle %d\n",
816__FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type),
[567]817hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );
[433]818#endif
[625]819            // copy DATA, ANON, REMOTE, FILE parent GPT entries to child GPT
[408]820            if( type != VSEG_TYPE_CODE )
821            {
[625]822                // activate the COW for DATA, ANON, REMOTE vsegs only
[408]823                cow = ( type != VSEG_TYPE_FILE );
[23]824
[408]825                vpn_base = child_vseg->vpn_base;
826                vpn_size = child_vseg->vpn_size;
[23]827
[408]828                // scan pages in parent vseg
829                for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ )
830                {
831                    error = hal_gpt_pte_copy( &child_vmm->gpt,
[625]832                                              vpn,
[408]833                                              XPTR( parent_cxy , &parent_vmm->gpt ),
834                                              vpn,
835                                              cow,
836                                              &ppn,
837                                              &mapped );
838                    if( error )
839                    {
840                        vmm_destroy( child_process );
841                        printk("\n[ERROR] in %s : cannot copy GPT\n", __FUNCTION__ );
842                        return -1;
843                    }
844
[433]845                    // increment pending forks counter in page if mapped
[408]846                    if( mapped )
847                    {
[469]848                        // get pointers and cluster on page descriptor
849                        page_xp  = ppm_ppn2page( ppn );
[408]850                        page_cxy = GET_CXY( page_xp );
[433]851                        page_ptr = GET_PTR( page_xp );
[408]852
[469]853                        // get extended pointers on "forks" and "lock"
854                        forks_xp = XPTR( page_cxy , &page_ptr->forks );
855                        lock_xp  = XPTR( page_cxy , &page_ptr->lock );
856
[567]857                        // get lock protecting "forks" counter
858                        remote_busylock_acquire( lock_xp );
859
[469]860                        // increment "forks"
861                        hal_remote_atomic_add( forks_xp , 1 );
862
[567]863                        // release lock protecting "forks" counter
864                        remote_busylock_release( lock_xp );
865
[438]866#if DEBUG_VMM_FORK_COPY
[433]867cycle = (uint32_t)hal_get_cycles();
[438]868if( DEBUG_VMM_FORK_COPY < cycle )
[595]869printk("\n[%s] thread[%x,%x] copied vpn %x to child GPT / cycle %d\n",
870__FUNCTION__ , this->process->pid, this->trdid , vpn , cycle );
[433]871#endif
[408]872                    }
873                }
874            }   // end if no code & no stack
875        }   // end if no stack
876    }   // end loop on vsegs
877
[567]878    // release the parent VSL lock in read mode
879    remote_rwlock_rd_release( parent_lock_xp );
[408]880
[623]881    // update child VMM with kernel vsegs
882    error = hal_vmm_kernel_update( child_process );
[415]883
884    if( error )
885    {
[623]886        printk("\n[ERROR] in %s : cannot update child VMM\n", __FUNCTION__ );
[415]887        return -1;
888    }
889
[408]890    // initialize the child VMM STACK allocator
891    child_vmm->stack_mgr.bitmap   = 0;
892    child_vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE;
893
894    // initialize the child VMM MMAP allocator
[23]895    uint32_t i;
[408]896    child_vmm->mmap_mgr.vpn_base        = CONFIG_VMM_HEAP_BASE;
897    child_vmm->mmap_mgr.vpn_size        = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE;
898    child_vmm->mmap_mgr.first_free_vpn  = CONFIG_VMM_HEAP_BASE;
[625]899    for( i = 0 ; i < 32 ; i++ ) 
900    {
901        xlist_root_init( XPTR( local_cxy , &child_vmm->mmap_mgr.zombi_list[i] ) );
902    }
[23]903
[178]904    // initialize instrumentation counters
[408]905        child_vmm->pgfault_nr    = 0;
[23]906
[408]907    // copy base addresses from parent VMM to child VMM
908    child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base));
909    child_vmm->envs_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->envs_vpn_base));
910    child_vmm->heap_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->heap_vpn_base));
911    child_vmm->code_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->code_vpn_base));
912    child_vmm->data_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->data_vpn_base));
[23]913
[408]914    child_vmm->entry_point = (intptr_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->entry_point));
[23]915
[124]916    hal_fence();
[23]917
[438]918#if DEBUG_VMM_FORK_COPY
[433]919cycle = (uint32_t)hal_get_cycles();
[438]920if( DEBUG_VMM_FORK_COPY < cycle )
[595]921printk("\n[%s] thread[%x,%x] exit successfully / cycle %d\n",
922__FUNCTION__ , this->process->pid, this->trdid , cycle );
[433]923#endif
924
[23]925    return 0;
926
[408]927}  // vmm_fork_copy()
[204]928
[1]929///////////////////////////////////////
930void vmm_destroy( process_t * process )
931{
[408]932    xptr_t   vseg_xp;
[1]933        vseg_t * vseg;
934
[438]935#if DEBUG_VMM_DESTROY
[433]936uint32_t cycle = (uint32_t)hal_get_cycles();
[595]937thread_t * this = CURRENT_THREAD;
[438]938if( DEBUG_VMM_DESTROY < cycle )
[595]939printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n",
940__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle );
[433]941#endif
[416]942
[438]943#if (DEBUG_VMM_DESTROY & 1 )
[443]944if( DEBUG_VMM_DESTROY < cycle )
[624]945hal_vmm_display( process , true );
[437]946#endif
947
[433]948    // get pointer on local VMM
[1]949    vmm_t  * vmm = &process->vmm;
950
[625]951    // build extended pointer on VSL root, VSL lock and GPT lock
952    xptr_t   vsl_root_xp = XPTR( local_cxy , &vmm->vsegs_root );
953    xptr_t   vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
954    xptr_t   gpt_lock_xp = XPTR( local_cxy , &vmm->gpt_lock );
[408]955
[625]956    // take the VSL lock
957    remote_rwlock_wr_acquire( vsl_lock_xp );
958
[611]959    // scan the VSL to delete all registered vsegs
[625]960    // (we don't use a FOREACH in case of item deletion)
961    xptr_t  iter_xp;
962    xptr_t  next_xp;
963        for( iter_xp = hal_remote_l64( vsl_root_xp ) ; 
964         iter_xp != vsl_root_xp ;
965         iter_xp = next_xp )
[1]966        {
[625]967        // save extended pointer on next item in xlist
968        next_xp = hal_remote_l64( iter_xp );
[409]969
[625]970        // get pointers on current vseg in VSL
971        vseg_xp   = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
972        vseg      = GET_PTR( vseg_xp );
973
[611]974        // delete vseg and release physical pages
[625]975        vmm_remove_vseg( process , vseg );
[409]976
[443]977#if( DEBUG_VMM_DESTROY & 1 )
978if( DEBUG_VMM_DESTROY < cycle )
[611]979printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n",
[443]980__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
981#endif
982
[1]983        }
984
[625]985    // release the VSL lock
986    remote_rwlock_wr_release( vsl_lock_xp );
987
988    // remove all registered MMAP vsegs
989    // from zombi_lists in MMAP allocator
[1]990    uint32_t i;
991    for( i = 0 ; i<32 ; i++ )
992    {
[625]993        // build extended pointer on zombi_list[i]
994        xptr_t root_xp = XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] );
995 
996        // scan zombi_list[i]
997            while( !xlist_is_empty( root_xp ) )
[1]998            {
[625]999                    vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist );
1000            vseg    = GET_PTR( vseg_xp );
[443]1001
1002#if( DEBUG_VMM_DESTROY & 1 )
1003if( DEBUG_VMM_DESTROY < cycle )
[595]1004printk("\n[%s] found zombi vseg / vpn_base %x / vpn_size %d\n",
[443]1005__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
1006#endif
[611]1007            // clean vseg descriptor
1008            vseg->vmm = NULL;
1009
[625]1010            // remove vseg from  zombi_list
[611]1011            xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
1012
1013                    // release vseg descriptor
[1]1014            vseg_free( vseg );
[443]1015
1016#if( DEBUG_VMM_DESTROY & 1 )
1017if( DEBUG_VMM_DESTROY < cycle )
[595]1018printk("\n[%s] zombi vseg released / vpn_base %x / vpn_size %d\n",
[443]1019__FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );
1020#endif
[1]1021            }
1022    }
1023
[625]1024    // take the GPT lock
1025    remote_rwlock_wr_acquire( gpt_lock_xp );
1026
[409]1027    // release memory allocated to the GPT itself
[1]1028    hal_gpt_destroy( &vmm->gpt );
1029
[625]1030    // release the GPT lock
1031    remote_rwlock_wr_release( gpt_lock_xp );
1032
[438]1033#if DEBUG_VMM_DESTROY
[433]1034cycle = (uint32_t)hal_get_cycles();
[438]1035if( DEBUG_VMM_DESTROY < cycle )
[595]1036printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n",
1037__FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle );
[433]1038#endif
[416]1039
[204]1040}  // end vmm_destroy()
1041
[1]1042/////////////////////////////////////////////////
1043vseg_t * vmm_check_conflict( process_t * process,
[21]1044                             vpn_t       vpn_base,
[1]1045                             vpn_t       vpn_size )
1046{
1047    vmm_t        * vmm = &process->vmm;
[408]1048
1049    // scan the VSL
[1]1050        vseg_t       * vseg;
[408]1051    xptr_t         iter_xp;
1052    xptr_t         vseg_xp;
1053    xptr_t         root_xp = XPTR( local_cxy , &vmm->vsegs_root );
[1]1054
[408]1055        XLIST_FOREACH( root_xp , iter_xp )
[1]1056        {
[408]1057                vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
[433]1058        vseg    = GET_PTR( vseg_xp );
[204]1059
[21]1060                if( ((vpn_base + vpn_size) > vseg->vpn_base) &&
1061             (vpn_base < (vseg->vpn_base + vseg->vpn_size)) ) return vseg;
[1]1062        }
1063    return NULL;
1064
[204]1065}  // end vmm_check_conflict()
1066
[1]1067
1068
[407]1069////////////////////////////////////////////////
1070vseg_t * vmm_create_vseg( process_t   * process,
1071                              vseg_type_t   type,
1072                          intptr_t      base,
1073                              uint32_t      size,
1074                          uint32_t      file_offset,
1075                          uint32_t      file_size,
1076                          xptr_t        mapper_xp,
1077                          cxy_t         cxy )
[1]1078{
1079    vseg_t     * vseg;          // created vseg pointer
[204]1080    vpn_t        vpn_base;      // first page index
[595]1081    vpn_t        vpn_size;      // number of pages covered by vseg
[1]1082        error_t      error;
1083
[438]1084#if DEBUG_VMM_CREATE_VSEG
[595]1085thread_t * this  = CURRENT_THREAD;
1086uint32_t   cycle = (uint32_t)hal_get_cycles();
[438]1087if( DEBUG_VMM_CREATE_VSEG < cycle )
[614]1088printk("\n[%s] thread[%x,%x] enter for process %x / %s / cxy %x / cycle %d\n",
1089__FUNCTION__, this->process->pid, this->trdid, process->pid, vseg_type_str(type), cxy, cycle );
[433]1090#endif
[21]1091
[407]1092    // get pointer on VMM
1093        vmm_t * vmm    = &process->vmm;
[21]1094
[204]1095    // compute base, size, vpn_base, vpn_size, depending on vseg type
[407]1096    // we use the VMM specific allocators for "stack", "file", "anon", & "remote" vsegs
[595]1097
[1]1098    if( type == VSEG_TYPE_STACK )
1099    {
1100        // get vpn_base and vpn_size from STACK allocator
[625]1101        vmm_stack_alloc( vmm , base , &vpn_base , &vpn_size );
[1]1102
1103        // compute vseg base and size from vpn_base and vpn_size
1104        base = vpn_base << CONFIG_PPM_PAGE_SHIFT;
1105        size = vpn_size << CONFIG_PPM_PAGE_SHIFT;
1106    }
[595]1107    else if( type == VSEG_TYPE_FILE )
1108    {
1109        // compute page index (in mapper) for first byte
1110        vpn_t    vpn_min    = file_offset >> CONFIG_PPM_PAGE_SHIFT;
1111
1112        // compute page index (in mapper) for last byte
1113        vpn_t    vpn_max    = (file_offset + size - 1) >> CONFIG_PPM_PAGE_SHIFT;
1114
1115        // compute offset in first page
1116        uint32_t offset = file_offset & CONFIG_PPM_PAGE_MASK;
1117
1118        // compute number of pages required in virtual space
1119        vpn_t    npages      = vpn_max - vpn_min + 1;
1120
1121        // get vpn_base and vpn_size from MMAP allocator
1122        error = vmm_mmap_alloc( vmm , npages , &vpn_base , &vpn_size );
1123        if( error )
1124        {
1125            printk("\n[ERROR] in %s : no vspace for mmap vseg / process %x in cluster %x\n",
1126                   __FUNCTION__ , process->pid , local_cxy );
1127            return NULL;
1128        }
1129
1130        // set the vseg base (not always aligned for FILE)
1131        base = (vpn_base << CONFIG_PPM_PAGE_SHIFT) + offset; 
1132    }
[21]1133    else if( (type == VSEG_TYPE_ANON) ||
[1]1134             (type == VSEG_TYPE_REMOTE) )
1135    {
[595]1136        // compute number of required pages in virtual space
1137        vpn_t npages = size >> CONFIG_PPM_PAGE_SHIFT;
1138        if( size & CONFIG_PPM_PAGE_MASK) npages++;
1139       
[1]1140        // get vpn_base and vpn_size from MMAP allocator
1141        error = vmm_mmap_alloc( vmm , npages , &vpn_base , &vpn_size );
1142        if( error )
1143        {
1144            printk("\n[ERROR] in %s : no vspace for mmap vseg / process %x in cluster %x\n",
1145                   __FUNCTION__ , process->pid , local_cxy );
1146            return NULL;
1147        }
1148
[595]1149        // set vseg base (always aligned for ANON or REMOTE)
[1]1150        base = vpn_base << CONFIG_PPM_PAGE_SHIFT;
1151    }
[623]1152    else    // VSEG_TYPE_DATA, VSEG_TYPE_CODE or KERNEL vseg
[1]1153    {
[204]1154        uint32_t vpn_min = base >> CONFIG_PPM_PAGE_SHIFT;
1155        uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_SHIFT;
1156
1157        vpn_base = vpn_min;
1158            vpn_size = vpn_max - vpn_min + 1;
[1]1159    }
1160
1161    // check collisions
1162    vseg = vmm_check_conflict( process , vpn_base , vpn_size );
[624]1163
[1]1164    if( vseg != NULL )
1165    {
[614]1166        printk("\n[ERROR] in %s for process %x : new vseg [vpn_base %x / vpn_size %x]\n"
1167               "  overlap existing vseg [vpn_base %x / vpn_size %x]\n",
[407]1168        __FUNCTION__ , process->pid, vpn_base, vpn_size, vseg->vpn_base, vseg->vpn_size );
[1]1169        return NULL;
1170    }
1171
1172    // allocate physical memory for vseg descriptor
1173        vseg = vseg_alloc();
1174        if( vseg == NULL )
1175        {
1176            printk("\n[ERROR] in %s for process %x : cannot allocate memory for vseg\n",
[407]1177        __FUNCTION__ , process->pid );
[1]1178        return NULL;
1179        }
1180
[614]1181#if DEBUG_VMM_CREATE_VSEG
1182if( DEBUG_VMM_CREATE_VSEG < cycle )
1183printk("\n[%s] thread[%x,%x] : base %x / size %x / vpn_base %x / vpn_size %x\n",
1184__FUNCTION__, this->process->pid, this->trdid, base, size, vpn_base, vpn_size );
1185#endif
1186
[1]1187    // initialize vseg descriptor
[407]1188        vseg_init( vseg,
1189               type,
1190               base,
1191               size,
1192               vpn_base,
1193               vpn_size,
1194               file_offset,
1195               file_size,
1196               mapper_xp,
1197               cxy );
[1]1198
[625]1199    // build extended pointer on VSL lock
1200    xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
1201 
1202    // take the VSL lock in write mode
1203    remote_rwlock_wr_acquire( lock_xp );
1204
[408]1205    // attach vseg to VSL
[611]1206        vmm_attach_vseg_to_vsl( vmm , vseg );
[1]1207
[625]1208    // release the VSL lock
1209    remote_rwlock_wr_release( lock_xp );
1210
[438]1211#if DEBUG_VMM_CREATE_VSEG
[433]1212cycle = (uint32_t)hal_get_cycles();
[438]1213if( DEBUG_VMM_CREATE_VSEG < cycle )
[595]1214printk("\n[%s] thread[%x,%x] exit / %s / cxy %x / cycle %d\n",
1215__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), cxy, cycle );
[433]1216#endif
[21]1217
[1]1218        return vseg;
1219
[406]1220}  // vmm_create_vseg()
1221
[625]1222
1223//////////////////////////////////////////
1224void vmm_remove_vseg( process_t * process,
1225                      vseg_t    * vseg )
[1]1226{
[625]1227    vmm_t     * vmm;        // local pointer on process VMM
1228    bool_t      is_ref;     // local process is reference process
1229    uint32_t    vseg_type;  // vseg type
[21]1230    vpn_t       vpn;        // VPN of current PTE
1231    vpn_t       vpn_min;    // VPN of first PTE
[1]1232    vpn_t       vpn_max;    // VPN of last PTE (excluded)
[409]1233    ppn_t       ppn;        // current PTE ppn value
1234    uint32_t    attr;       // current PTE attributes
1235    kmem_req_t  req;        // request to release memory
1236    xptr_t      page_xp;    // extended pointer on page descriptor
1237    cxy_t       page_cxy;   // page descriptor cluster
1238    page_t    * page_ptr;   // page descriptor pointer
[625]1239    xptr_t      count_xp;   // extended pointer on page refcount
1240    uint32_t    count;      // current value of page refcount
[1]1241
[625]1242// check arguments
1243assert( (process != NULL), "process argument is NULL" );
1244assert( (vseg    != NULL), "vseg argument is NULL" );
[409]1245
[625]1246    // compute is_ref
1247    is_ref = (GET_CXY( process->ref_xp ) == local_cxy);
[1]1248
[625]1249    // get pointers on local process VMM
[611]1250    vmm = &process->vmm;
1251
[623]1252    // get relevant vseg infos
[624]1253    vseg_type = vseg->type;
1254    vpn_min   = vseg->vpn_base;
1255    vpn_max   = vpn_min + vseg->vpn_size;
[623]1256
[625]1257#if DEBUG_VMM_REMOVE_VSEG
1258uint32_t   cycle = (uint32_t)hal_get_cycles();
1259thread_t * this  = CURRENT_THREAD;
1260if( DEBUG_VMM_REMOVE_VSEG < cycle )
1261printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n",
1262__FUNCTION__, this->process->pid, this->trdid, 
1263process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
1264#endif
1265
1266    // loop on PTEs in GPT
[1]1267        for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
1268    {
[625]1269        // get ppn and attr
1270        hal_gpt_get_pte( XPTR( local_cxy , &vmm->gpt ) , vpn , &attr , &ppn );
[409]1271
[625]1272        if( attr & GPT_MAPPED )  // PTE is mapped
[409]1273        { 
[437]1274
[625]1275#if( DEBUG_VMM_REMOVE_VSEG & 1 )
1276if( DEBUG_VMM_REMOVE_VSEG < cycle )
1277printk("- unmap vpn %x / ppn %x / %s" , vpn , ppn, vseg_type_str(vseg_type) );
[437]1278#endif
[585]1279            // unmap GPT entry in local GPT
[625]1280            hal_gpt_reset_pte( &vmm->gpt , vpn );
[409]1281
[625]1282            // get pointers on physical page descriptor
1283            page_xp  = ppm_ppn2page( ppn );
1284            page_cxy = GET_CXY( page_xp );
1285            page_ptr = GET_PTR( page_xp );
[409]1286
[625]1287            // decrement page refcount
1288            count_xp = XPTR( page_cxy , &page_ptr->refcount );
1289            count    = hal_remote_atomic_add( count_xp , -1 );
[624]1290
[625]1291            // compute the ppn_release condition depending on vseg type
1292            bool_t ppn_release;
1293            if( (vseg_type == VSEG_TYPE_FILE)  ||
1294                (vseg_type == VSEG_TYPE_KCODE) || 
1295                (vseg_type == VSEG_TYPE_KDATA) || 
1296                (vseg_type == VSEG_TYPE_KDEV) )           
1297            {
1298                // no physical page release for FILE and KERNEL
1299                ppn_release = false;
1300            }
1301            else if( (vseg_type == VSEG_TYPE_CODE)  ||
1302                     (vseg_type == VSEG_TYPE_STACK) ) 
1303            {
1304                // always release physical page for private vsegs
1305                ppn_release = true;
1306            }
1307            else if( (vseg_type == VSEG_TYPE_ANON)  ||
1308                     (vseg_type == VSEG_TYPE_REMOTE) )
1309            {
1310                // release physical page if reference cluster
1311                ppn_release = is_ref;
1312            }
1313            else if( is_ref )  // vseg_type == DATA in reference cluster
1314            {
1315                // get extended pointers on forks and lock field in page descriptor
1316                xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks );
1317                xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
[433]1318
[625]1319                // take lock protecting "forks" counter
[621]1320                remote_busylock_acquire( lock_xp );
[623]1321
[625]1322                // get number of pending forks from page descriptor
1323                uint32_t forks = hal_remote_l32( forks_xp );
[623]1324
[625]1325                // decrement pending forks counter if required
1326                if( forks )  hal_remote_atomic_add( forks_xp , -1 );
[624]1327
[625]1328                // release lock protecting "forks" counter
1329                remote_busylock_release( lock_xp );
[624]1330
[625]1331                // release physical page if forks == 0
1332                ppn_release = (forks == 0); 
1333            }
1334            else              // vseg_type == DATA not in reference cluster
1335            {
1336                // no physical page release if not in reference cluster
1337                ppn_release = false;
1338            }
[611]1339
[625]1340            // release physical page to relevant kmem when required
1341            if( ppn_release )
1342            {
1343                if( page_cxy == local_cxy )
1344                {
1345                    req.type = KMEM_PAGE;
1346                    req.ptr  = page_ptr; 
1347                    kmem_free( &req );
[409]1348                }
[625]1349                else
1350                {
1351                    rpc_pmem_release_pages_client( page_cxy , page_ptr );
1352                }
1353            }
[623]1354
[625]1355#if( DEBUG_VMM_REMOVE_VSEG & 1 )
1356if( DEBUG_VMM_REMOVE_VSEG < cycle )
1357{
1358    if( ppn_release ) printk(" / released to kmem\n" );
1359    else              printk("\n");
1360}
1361#endif
[409]1362        }
[1]1363    }
[433]1364
[625]1365    // remove vseg from VSL
[611]1366    vmm_detach_vseg_from_vsl( vmm , vseg );
1367
[625]1368    // release vseg descriptor depending on vseg type
1369    if( vseg_type == VSEG_TYPE_STACK )
1370    {
1371        // release slot to local stack allocator
1372        vmm_stack_free( vmm , vseg );
1373
1374        // release vseg descriptor to local kmem
1375        vseg_free( vseg );
1376    }
1377    else if( (vseg_type == VSEG_TYPE_ANON) || 
1378             (vseg_type == VSEG_TYPE_FILE) || 
1379             (vseg_type == VSEG_TYPE_REMOTE) ) 
1380    {
1381        // release vseg to local mmap allocator
1382        vmm_mmap_free( vmm , vseg );
1383    }
1384    else
1385    {
1386        // release vseg descriptor to local kmem
1387        vseg_free( vseg );
1388    }
1389
1390#if DEBUG_VMM_REMOVE_VSEG
[433]1391cycle = (uint32_t)hal_get_cycles();
[625]1392if( DEBUG_VMM_REMOVE_VSEG < cycle )
1393printk("[%s] thread[%x,%x] exit / process %x / %s / base %x / cycle %d\n",
1394__FUNCTION__, this->process->pid, this->trdid, 
1395process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
[433]1396#endif
1397
[625]1398}  // end vmm_remove_vseg()
[1]1399
[625]1400
1401///////////////////////////////////
1402void vmm_delete_vseg( pid_t    pid,
1403                      intptr_t vaddr )
1404{
1405    process_t * process;    // local pointer on local process
1406    vseg_t    * vseg;       // local pointer on local vseg containing vaddr
1407
1408    // get local pointer on local process descriptor
1409    process = cluster_get_local_process_from_pid( pid );
1410
1411    if( process == NULL )
1412    {
1413        printk("\n[WARNING] in %s : cannot get local process descriptor\n",
1414        __FUNCTION__ );
1415        return;
1416    }
1417
1418    // get local pointer on local vseg containing vaddr
1419    vseg = vmm_vseg_from_vaddr( &process->vmm , vaddr );
1420
1421    if( vseg == NULL )
1422    {
1423        printk("\n[WARNING] in %s : cannot get vseg descriptor\n",
1424        __FUNCTION__ );
1425        return;
1426    }
1427
1428    // call relevant function
1429    vmm_remove_vseg( process , vseg );
1430
1431}  // end vmm_delete_vseg
1432
1433
[611]1434/////////////////////////////////////////////
1435vseg_t * vmm_vseg_from_vaddr( vmm_t    * vmm,
1436                              intptr_t   vaddr )
[406]1437{
[408]1438    xptr_t   vseg_xp;
1439    vseg_t * vseg;
[625]1440    xptr_t   iter_xp;
[406]1441
[408]1442    // get extended pointers on VSL lock and root
[625]1443    xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
[408]1444    xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root );
[406]1445
[408]1446    // get lock protecting the VSL
[567]1447    remote_rwlock_rd_acquire( lock_xp );
[408]1448
1449    // scan the list of vsegs in VSL
1450    XLIST_FOREACH( root_xp , iter_xp )
[406]1451    {
[625]1452        // get pointers on vseg
[408]1453        vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
[433]1454        vseg    = GET_PTR( vseg_xp );
[595]1455
[625]1456        // return success when match
[408]1457        if( (vaddr >= vseg->min) && (vaddr < vseg->max) )
[595]1458        { 
[408]1459            // return success
[567]1460            remote_rwlock_rd_release( lock_xp );
[408]1461            return vseg;
1462        }
[406]1463    }
1464
[408]1465    // return failure
[567]1466    remote_rwlock_rd_release( lock_xp );
[408]1467    return NULL;
[406]1468
[595]1469}  // end vmm_vseg_from_vaddr()
[406]1470
[1]1471/////////////////////////////////////////////
1472error_t vmm_resize_vseg( process_t * process,
1473                         intptr_t    base,
1474                         intptr_t    size )
1475{
[406]1476    error_t   error;
1477    vseg_t  * new;
1478    vpn_t     vpn_min;
1479    vpn_t     vpn_max;
[1]1480
[623]1481#if DEBUG_VMM_RESIZE_VSEG
1482uint32_t   cycle = (uint32_t)hal_get_cycles();
1483thread_t * this  = CURRENT_THREAD;
1484if( DEBUG_VMM_RESIZE_VSEG < cycle )
1485printk("\n[%s] thread[%x,%x] enter / process %x / base %x / size %d / cycle %d\n",
1486__FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle );
1487#endif
1488
[1]1489    // get pointer on process VMM
1490    vmm_t * vmm = &process->vmm;
1491
1492    intptr_t addr_min = base;
1493        intptr_t addr_max = base + size;
1494
1495    // get pointer on vseg
[595]1496        vseg_t * vseg = vmm_vseg_from_vaddr( vmm , base );
[1]1497
[623]1498        if( vseg == NULL)
1499    {
1500        printk("\n[ERROR] in %s : vseg(%x,%d) not found\n",
1501        __FUNCTION__, base , size );
1502        return -1;
1503    }
[21]1504
[623]1505    // resize depends on unmapped region base and size
[611]1506        if( (vseg->min > addr_min) || (vseg->max < addr_max) )        // not included in vseg
[1]1507    {
[623]1508        printk("\n[ERROR] in %s : unmapped region[%x->%x[ not included in vseg[%x->%x[\n",
1509        __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
1510
[611]1511        error = -1;
[1]1512    }
[611]1513        else if( (vseg->min == addr_min) && (vseg->max == addr_max) )  // vseg must be deleted
[1]1514    {
[623]1515
1516#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1517if( DEBUG_VMM_RESIZE_VSEG < cycle )
1518printk("\n[%s] unmapped region[%x->%x[ equal vseg[%x->%x[\n",
1519__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
1520#endif
[611]1521        vmm_delete_vseg( process->pid , vseg->min );
[623]1522
1523#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1524if( DEBUG_VMM_RESIZE_VSEG < cycle )
1525printk("\n[%s] thread[%x,%x] deleted vseg\n",
1526__FUNCTION__, this->process->pid, this->trdid );
1527#endif
[1]1528        error = 0;
1529    }
[611]1530        else if( vseg->min == addr_min )                               // vseg must be resized
[1]1531    {
[623]1532
1533#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1534if( DEBUG_VMM_RESIZE_VSEG < cycle )
1535printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
1536__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
1537#endif
1538        // update vseg min address
[406]1539        vseg->min = addr_max;
1540
1541        // update vpn_base and vpn_size
1542        vpn_min        = vseg->min >> CONFIG_PPM_PAGE_SHIFT;
1543        vpn_max        = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT;
1544        vseg->vpn_base = vpn_min;
1545        vseg->vpn_size = vpn_max - vpn_min + 1;
[623]1546
1547#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1548if( DEBUG_VMM_RESIZE_VSEG < cycle )
1549printk("\n[%s] thread[%x,%x] changed vseg_min\n",
1550__FUNCTION__, this->process->pid, this->trdid );
1551#endif
[406]1552        error = 0;
[1]1553    }
[611]1554        else if( vseg->max == addr_max )                              // vseg must be resized
[1]1555    {
[623]1556
1557#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1558if( DEBUG_VMM_RESIZE_VSEG < cycle )
1559printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
1560__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
1561#endif
[406]1562        // update vseg max address
1563        vseg->max = addr_min;
1564
1565        // update vpn_base and vpn_size
1566        vpn_min        = vseg->min >> CONFIG_PPM_PAGE_SHIFT;
1567        vpn_max        = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT;
1568        vseg->vpn_base = vpn_min;
1569        vseg->vpn_size = vpn_max - vpn_min + 1;
[623]1570
1571#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1572if( DEBUG_VMM_RESIZE_VSEG < cycle )
1573printk("\n[%s] thread[%x,%x] changed vseg_max\n",
1574__FUNCTION__, this->process->pid, this->trdid );
1575#endif
[406]1576        error = 0;
[623]1577
[1]1578    }
[611]1579    else                                                          // vseg cut in three regions
[1]1580    {
[623]1581
1582#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1583if( DEBUG_VMM_RESIZE_VSEG < cycle )
1584printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
1585__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
1586#endif
[406]1587        // resize existing vseg
1588        vseg->max = addr_min;
1589
1590        // update vpn_base and vpn_size
1591        vpn_min        = vseg->min >> CONFIG_PPM_PAGE_SHIFT;
1592        vpn_max        = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT;
1593        vseg->vpn_base = vpn_min;
1594        vseg->vpn_size = vpn_max - vpn_min + 1;
1595
1596        // create new vseg
[407]1597        new = vmm_create_vseg( process, 
1598                               vseg->type,
1599                               addr_min, 
1600                               (vseg->max - addr_max),
1601                               vseg->file_offset,
1602                               vseg->file_size,
1603                               vseg->mapper_xp,
1604                               vseg->cxy ); 
1605
[623]1606#if( DEBUG_VMM_RESIZE_VSEG & 1 )
1607if( DEBUG_VMM_RESIZE_VSEG < cycle )
1608printk("\n[%s] thread[%x,%x] replaced vseg by two smal vsegs\n",
1609__FUNCTION__, this->process->pid, this->trdid );
1610#endif
1611
1612        if( new == NULL ) error = -1;
[406]1613        else              error = 0;
[1]1614    }
1615
[623]1616#if DEBUG_VMM_RESIZE_VSEG
1617if( DEBUG_VMM_RESIZE_VSEG < cycle )
1618printk("\n[%s] thread[%x,%x] exit / process %x / base %x / size %d / cycle %d\n",
1619__FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle );
1620#endif
[1]1621
1622        return error;
1623
[406]1624}  // vmm_resize_vseg()
1625
[1]1626///////////////////////////////////////////
[388]1627error_t  vmm_get_vseg( process_t * process,
[394]1628                       intptr_t    vaddr,
[388]1629                       vseg_t   ** found_vseg )
[1]1630{
[595]1631    xptr_t    vseg_xp;
1632    vseg_t  * vseg;
1633    vmm_t   * vmm;
1634    error_t   error;
[1]1635
[440]1636    // get pointer on local VMM
1637    vmm = &process->vmm;
[1]1638
[440]1639    // try to get vseg from local VMM
[595]1640    vseg = vmm_vseg_from_vaddr( vmm , vaddr );
[440]1641
[388]1642    if( vseg == NULL )   // vseg not found in local cluster => try to get it from ref
1643        {
1644        // get extended pointer on reference process
1645        xptr_t ref_xp = process->ref_xp;
[1]1646
[388]1647        // get cluster and local pointer on reference process
1648        cxy_t       ref_cxy = GET_CXY( ref_xp );
[433]1649        process_t * ref_ptr = GET_PTR( ref_xp );
[388]1650
1651        if( local_cxy == ref_cxy )  return -1;   // local cluster is the reference
1652
1653        // get extended pointer on reference vseg
[394]1654        rpc_vmm_get_vseg_client( ref_cxy , ref_ptr , vaddr , &vseg_xp , &error );
[388]1655           
[440]1656        if( error )   return -1;                // vseg not found => illegal user vaddr
[388]1657       
1658        // allocate a vseg in local cluster
1659        vseg = vseg_alloc();
1660
[440]1661        if( vseg == NULL ) return -1;           // cannot allocate a local vseg
[388]1662
1663        // initialise local vseg from reference
1664        vseg_init_from_ref( vseg , vseg_xp );
1665
[625]1666        // build extended pointer on VSL lock
1667        xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
1668 
1669        // take the VSL lock in write mode
1670        remote_rwlock_wr_acquire( lock_xp );
1671
[611]1672        // register local vseg in local VSL
1673        vmm_attach_vseg_to_vsl( vmm , vseg );
[625]1674 
1675        // release the VSL lock
1676        remote_rwlock_wr_release( lock_xp );
[388]1677    }   
[595]1678
[388]1679    // success
1680    *found_vseg = vseg;
[394]1681    return 0;
[388]1682
1683}  // end vmm_get_vseg()
1684
[407]1685//////////////////////////////////////////////////////////////////////////////////////
1686// This static function compute the target cluster to allocate a physical page
1687// for a given <vpn> in a given <vseg>, allocates the page (with an RPC if required)
1688// and returns an extended pointer on the allocated page descriptor.
[585]1689// It can be called by a thread running in any cluster.
[407]1690// The vseg cannot have the FILE type.
1691//////////////////////////////////////////////////////////////////////////////////////
1692static xptr_t vmm_page_allocate( vseg_t * vseg,
1693                                 vpn_t    vpn )
1694{
[433]1695
[438]1696#if DEBUG_VMM_ALLOCATE_PAGE
[619]1697uint32_t   cycle   = (uint32_t)hal_get_cycles();
1698thread_t * this    = CURRENT_THREAD;
[438]1699if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
[595]1700printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
1701__FUNCTION__ , this->process->pid, this->trdid, vpn, cycle );
[433]1702#endif
1703
[407]1704    page_t     * page_ptr;
1705    cxy_t        page_cxy;
1706    kmem_req_t   req;
[577]1707    uint32_t     index;
[407]1708
[577]1709    uint32_t     type   = vseg->type;
1710    uint32_t     flags  = vseg->flags;
1711    uint32_t     x_size = LOCAL_CLUSTER->x_size;
1712    uint32_t     y_size = LOCAL_CLUSTER->y_size;
[407]1713
[567]1714// check vseg type
1715assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" );
[407]1716
1717    if( flags & VSEG_DISTRIB )    // distributed => cxy depends on vpn LSB
1718    {
[577]1719        index    = vpn & ((x_size * y_size) - 1);
1720        page_cxy = HAL_CXY_FROM_XY( (index / y_size) , (index % y_size) );
[561]1721
[577]1722        // If the cluster selected from VPN's LSBs is empty, we select one randomly
1723        if ( cluster_is_active( page_cxy ) == false )
1724        {
1725            page_cxy = cluster_random_select();
[561]1726        }
[407]1727    }
1728    else                          // other cases => cxy specified in vseg
1729    {
[561]1730        page_cxy = vseg->cxy;
[407]1731    }
1732
1733    // allocate a physical page from target cluster
1734    if( page_cxy == local_cxy )  // target cluster is the local cluster
1735    {
1736        req.type  = KMEM_PAGE;
1737        req.size  = 0;
1738        req.flags = AF_NONE;
1739        page_ptr  = (page_t *)kmem_alloc( &req );
1740    }
1741    else                           // target cluster is not the local cluster
1742    {
1743        rpc_pmem_get_pages_client( page_cxy , 0 , &page_ptr );
1744    }
1745
[438]1746#if DEBUG_VMM_ALLOCATE_PAGE
[595]1747cycle = (uint32_t)hal_get_cycles();
[438]1748if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )
[595]1749printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n",
1750__FUNCTION__ , this->process->pid, this->trdid, vpn,
1751ppm_page2ppn( XPTR( page_cxy , page_ptr ) , cycle );
[433]1752#endif
1753
[407]1754    if( page_ptr == NULL ) return XPTR_NULL;
1755    else                   return XPTR( page_cxy , page_ptr );
1756
1757}  // end vmm_page_allocate() 
1758
[313]1759////////////////////////////////////////
1760error_t vmm_get_one_ppn( vseg_t * vseg,
1761                         vpn_t    vpn,
1762                         ppn_t  * ppn )
1763{
1764    error_t    error;
[407]1765    xptr_t     page_xp;           // extended pointer on physical page descriptor
[606]1766    uint32_t   page_id;           // missing page index in vseg mapper
[406]1767    uint32_t   type;              // vseg type;
[313]1768
[406]1769    type      = vseg->type;
[606]1770    page_id   = vpn - vseg->vpn_base;
[313]1771
[438]1772#if DEBUG_VMM_GET_ONE_PPN
[595]1773uint32_t   cycle = (uint32_t)hal_get_cycles();
1774thread_t * this  = CURRENT_THREAD;
1775if( DEBUG_VMM_GET_ONE_PPN < cycle )
[606]1776printk("\n[%s] thread[%x,%x] enter for vpn %x / type %s / page_id  %d / cycle %d\n",
1777__FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle );
[433]1778#endif
[313]1779
[406]1780    // FILE type : get the physical page from the file mapper
[313]1781    if( type == VSEG_TYPE_FILE )
1782    {
[406]1783        // get extended pointer on mapper
[407]1784        xptr_t mapper_xp = vseg->mapper_xp;
[313]1785
[567]1786assert( (mapper_xp != XPTR_NULL),
1787"mapper not defined for a FILE vseg\n" );
[406]1788       
[606]1789        // get extended pointer on page descriptor
1790        page_xp = mapper_remote_get_page( mapper_xp , page_id );
[406]1791
[606]1792        if ( page_xp == XPTR_NULL ) return EINVAL;
[313]1793    }
1794
[406]1795    // Other types : allocate a physical page from target cluster,
[407]1796    // as defined by vseg type and vpn value
[313]1797    else
1798    {
[433]1799        // allocate one physical page
[407]1800        page_xp = vmm_page_allocate( vseg , vpn );
[406]1801
[407]1802        if( page_xp == XPTR_NULL ) return ENOMEM;
[313]1803
[406]1804        // initialise missing page from .elf file mapper for DATA and CODE types
[440]1805        // the vseg->mapper_xp field is an extended pointer on the .elf file mapper
[313]1806        if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) )
1807        {
[406]1808            // get extended pointer on mapper
1809            xptr_t     mapper_xp = vseg->mapper_xp;
[313]1810
[567]1811assert( (mapper_xp != XPTR_NULL),
1812"mapper not defined for a CODE or DATA vseg\n" );
[406]1813       
1814            // compute missing page offset in vseg
[606]1815            uint32_t offset = page_id << CONFIG_PPM_PAGE_SHIFT;
[406]1816
[313]1817            // compute missing page offset in .elf file
[406]1818            uint32_t elf_offset = vseg->file_offset + offset;
[313]1819
[438]1820#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
[469]1821if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
[595]1822printk("\n[%s] thread[%x,%x] for vpn = %x / elf_offset = %x\n",
1823__FUNCTION__, this->process->pid, this->trdid, vpn, elf_offset );
[433]1824#endif
[406]1825            // compute extended pointer on page base
[407]1826            xptr_t base_xp  = ppm_page2base( page_xp );
[313]1827
[406]1828            // file_size (in .elf mapper) can be smaller than vseg_size (BSS)
1829            uint32_t file_size = vseg->file_size;
1830
1831            if( file_size < offset )                 // missing page fully in  BSS
[313]1832            {
[406]1833
[438]1834#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
[469]1835if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
[595]1836printk("\n[%s] thread[%x,%x] for vpn  %x / fully in BSS\n",
1837__FUNCTION__, this->process->pid, this->trdid, vpn );
[433]1838#endif
[407]1839                if( GET_CXY( page_xp ) == local_cxy )
[313]1840                {
[315]1841                    memset( GET_PTR( base_xp ) , 0 , CONFIG_PPM_PAGE_SIZE );
[313]1842                }
1843                else
1844                {
[315]1845                   hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE );       
[313]1846                }
1847            }
[406]1848            else if( file_size >= (offset + CONFIG_PPM_PAGE_SIZE) )  // fully in  mapper
[315]1849            {
[406]1850
[438]1851#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
[469]1852if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
[595]1853printk("\n[%s] thread[%x,%x] for vpn  %x / fully in mapper\n",
1854__FUNCTION__, this->process->pid, this->trdid, vpn );
[433]1855#endif
[606]1856                error = mapper_move_kernel( mapper_xp,
1857                                            true,             // to_buffer
1858                                            elf_offset,
1859                                            base_xp,
1860                                            CONFIG_PPM_PAGE_SIZE ); 
[313]1861                if( error ) return EINVAL;
1862            }
[406]1863            else  // both in mapper and in BSS :
1864                  // - (file_size - offset)             bytes from mapper
1865                  // - (page_size + offset - file_size) bytes from BSS
[313]1866            {
[406]1867
[438]1868#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
[469]1869if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )
[610]1870printk("\n[%s] thread[%x,%x] for vpn  %x / both mapper & BSS\n"
[433]1871"      %d bytes from mapper / %d bytes from BSS\n",
[595]1872__FUNCTION__, this->process->pid, this->trdid, vpn,
[407]1873file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size  );
[433]1874#endif
[313]1875                // initialize mapper part
[606]1876                error = mapper_move_kernel( mapper_xp,
1877                                            true,         // to buffer
1878                                            elf_offset,
1879                                            base_xp,
1880                                            file_size - offset ); 
[313]1881                if( error ) return EINVAL;
1882
1883                // initialize BSS part
[407]1884                if( GET_CXY( page_xp ) == local_cxy )
[313]1885                {
[406]1886                    memset( GET_PTR( base_xp ) + file_size - offset , 0 , 
1887                            offset + CONFIG_PPM_PAGE_SIZE - file_size );
[313]1888                }
1889                else
1890                {
[406]1891                   hal_remote_memset( base_xp + file_size - offset , 0 , 
1892                                      offset + CONFIG_PPM_PAGE_SIZE - file_size );
[313]1893                }
1894            }   
1895        }  // end initialisation for CODE or DATA types   
1896    } 
1897
1898    // return ppn
[407]1899    *ppn = ppm_page2ppn( page_xp );
[406]1900
[438]1901#if DEBUG_VMM_GET_ONE_PPN
[595]1902cycle = (uint32_t)hal_get_cycles();
1903if( DEBUG_VMM_GET_ONE_PPN < cycle )
1904printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle\n",
1905__FUNCTION__ , this->process->pid, this->trdid , vpn , *ppn, cycle );
[433]1906#endif
[406]1907
[313]1908    return 0;
1909
1910}  // end vmm_get_one_ppn()
1911
[585]1912///////////////////////////////////////////////////
1913error_t vmm_handle_page_fault( process_t * process,
1914                               vpn_t       vpn )
[1]1915{
[585]1916    vseg_t         * vseg;            // vseg containing vpn
1917    uint32_t         new_attr;        // new PTE_ATTR value
1918    ppn_t            new_ppn;         // new PTE_PPN value
1919    uint32_t         ref_attr;        // PTE_ATTR value in reference GPT
1920    ppn_t            ref_ppn;         // PTE_PPN value in reference GPT
1921    cxy_t            ref_cxy;         // reference cluster for missing vpn
1922    process_t      * ref_ptr;         // reference process for missing vpn
1923    xptr_t           local_gpt_xp;    // extended pointer on local GPT
1924    xptr_t           local_lock_xp;   // extended pointer on local GPT lock
1925    xptr_t           ref_gpt_xp;      // extended pointer on reference GPT
1926    xptr_t           ref_lock_xp;     // extended pointer on reference GPT lock
1927    error_t          error;           // value returned by called functions
[1]1928
[625]1929#if DEBUG_VMM_HANDLE_PAGE_FAULT
1930uint32_t   cycle = (uint32_t)hal_get_cycles();
1931thread_t * this  = CURRENT_THREAD;
1932if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
1933printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
1934__FUNCTION__, this->process->pid, this->trdid, vpn, cycle );
1935hal_vmm_display( process , true );
1936#endif
1937
[585]1938    // get local vseg (access to reference VSL can be required)
1939    error = vmm_get_vseg( process, 
1940                          (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT,
1941                          &vseg );
1942    if( error )
1943    {
[625]1944        printk("\n[ERROR] in %s : vpn %x in process %x not in registered vseg / cycle %d\n",
1945        __FUNCTION__ , vpn , process->pid, (uint32_t)hal_get_cycles() );
[585]1946       
1947        return EXCP_USER_ERROR;
1948    }
1949
[625]1950#if DEBUG_VMM_HANDLE_PAGE_FAULT
1951cycle = (uint32_t)hal_get_cycles();
[585]1952if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
[625]1953printk("\n[%s] threadr[%x,%x] found vseg %s / cycle %d\n",
1954__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type), cycle );
[433]1955#endif
[406]1956
[585]1957    //////////////// private vseg => access only the local GPT
1958    if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) )
[438]1959    {
[585]1960        // build extended pointer on local GPT and local GPT lock
1961        local_gpt_xp  = XPTR( local_cxy , &process->vmm.gpt );
1962        local_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock );
[407]1963
[585]1964        // take local GPT lock in write mode
1965        remote_rwlock_wr_acquire( local_lock_xp );
[407]1966
[585]1967        // check VPN still unmapped in local GPT
[595]1968
[585]1969        // do nothing if VPN has been mapped by a a concurrent page_fault
1970        hal_gpt_get_pte( local_gpt_xp,
1971                         vpn,
1972                         &new_attr,
1973                         &new_ppn );
[407]1974
[585]1975        if( (new_attr & GPT_MAPPED) == 0 )       // VPN still unmapped
1976        { 
1977            // allocate and initialise a physical page depending on the vseg type
1978            error = vmm_get_one_ppn( vseg , vpn , &new_ppn );
[407]1979
[585]1980            if( error )
[408]1981            {
1982                printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n",
1983                __FUNCTION__ , process->pid , vpn );
[1]1984
[585]1985                // release local GPT lock in write mode
1986                remote_rwlock_wr_release( local_lock_xp );
[406]1987
[585]1988                return EXCP_KERNEL_PANIC;
[407]1989            }
1990
[408]1991            // define new_attr from vseg flags
[407]1992            new_attr = GPT_MAPPED | GPT_SMALL;
1993            if( vseg->flags & VSEG_USER  ) new_attr |= GPT_USER;
1994            if( vseg->flags & VSEG_WRITE ) new_attr |= GPT_WRITABLE;
1995            if( vseg->flags & VSEG_EXEC  ) new_attr |= GPT_EXECUTABLE;
1996            if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE;
1997
[585]1998            // set PTE (PPN & attribute) to local GPT
1999            error = hal_gpt_set_pte( local_gpt_xp,
[408]2000                                     vpn,
2001                                     new_attr,
2002                                     new_ppn );
[585]2003            if ( error )
[407]2004            {
[585]2005                printk("\n[ERROR] in %s : cannot update local GPT / process %x / vpn = %x\n",
[407]2006                __FUNCTION__ , process->pid , vpn );
[585]2007
2008                // release local GPT lock in write mode
2009                remote_rwlock_wr_release( local_lock_xp );
2010
2011                return EXCP_KERNEL_PANIC;
[407]2012            }
2013        }
[585]2014
2015        // release local GPT lock in write mode
2016        remote_rwlock_wr_release( local_lock_xp );
2017
2018#if DEBUG_VMM_HANDLE_PAGE_FAULT
2019cycle = (uint32_t)hal_get_cycles();
2020if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
[595]2021printk("\n[%s] private page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n",
[585]2022__FUNCTION__, vpn, new_ppn, new_attr, cycle );
2023#endif
2024        return EXCP_NON_FATAL;
2025
2026    }   // end local GPT access
2027
2028    //////////// public vseg => access reference GPT
2029    else                               
2030    {
2031        // get reference process cluster and local pointer
2032        ref_cxy = GET_CXY( process->ref_xp );
2033        ref_ptr = GET_PTR( process->ref_xp );
2034
2035        // build extended pointer on reference GPT and reference GPT lock
2036        ref_gpt_xp  = XPTR( ref_cxy , &ref_ptr->vmm.gpt );
2037        ref_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt_lock );
2038
2039        // build extended pointer on local GPT and local GPT lock
2040        local_gpt_xp  = XPTR( local_cxy , &process->vmm.gpt );
2041        local_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock );
2042
2043        // take reference GPT lock in read mode
2044        remote_rwlock_rd_acquire( ref_lock_xp );
2045
2046        // get directly PPN & attributes from reference GPT
2047        // this can avoids a costly RPC for a false page fault
2048        hal_gpt_get_pte( ref_gpt_xp,
2049                         vpn,
2050                         &ref_attr,
2051                         &ref_ppn );
2052
2053        // release reference GPT lock in read mode
2054        remote_rwlock_rd_release( ref_lock_xp );
2055
2056        if( ref_attr & GPT_MAPPED )        // false page fault => update local GPT
[1]2057        {
[585]2058            // take local GPT lock in write mode
2059            remote_rwlock_wr_acquire( local_lock_xp );
2060           
2061            // check VPN still unmapped in local GPT
2062            hal_gpt_get_pte( local_gpt_xp,
2063                             vpn,
2064                             &new_attr,
2065                             &new_ppn );
[1]2066
[585]2067            if( (new_attr & GPT_MAPPED) == 0 )       // VPN still unmapped
2068            { 
2069                // update local GPT from reference GPT
2070                error = hal_gpt_set_pte( local_gpt_xp,
2071                                         vpn,
2072                                         ref_attr,
2073                                         ref_ppn );
2074                if( error )
2075                {
[595]2076                    printk("\n[ERROR] in %s : cannot update local GPT / process %x / vpn %x\n",
[585]2077                    __FUNCTION__ , process->pid , vpn );
2078
2079                    // release local GPT lock in write mode
2080                    remote_rwlock_wr_release( local_lock_xp );
2081           
2082                    return EXCP_KERNEL_PANIC;
2083                }
2084            }
2085            else    // VPN has been mapped by a a concurrent page_fault
2086            {
2087                // keep PTE from local GPT
2088                ref_attr = new_attr;
2089                ref_ppn  = new_ppn;
2090            }
2091
2092            // release local GPT lock in write mode
2093            remote_rwlock_wr_release( local_lock_xp );
2094           
2095#if DEBUG_VMM_HANDLE_PAGE_FAULT
[433]2096cycle = (uint32_t)hal_get_cycles();
[585]2097if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
[595]2098printk("\n[%s] false page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n",
[585]2099__FUNCTION__, vpn, ref_ppn, ref_attr, cycle );
[433]2100#endif
[585]2101            return EXCP_NON_FATAL;
2102        }
2103        else                            // true page fault => update reference GPT
2104        {
2105            // take reference GPT lock in write mode
2106            remote_rwlock_wr_acquire( ref_lock_xp );
2107           
2108            // check VPN still unmapped in reference GPT
2109            // do nothing if VPN has been mapped by a a concurrent page_fault
2110            hal_gpt_get_pte( ref_gpt_xp,
2111                             vpn,
2112                             &ref_attr,
2113                             &ref_ppn );
[406]2114
[585]2115            if( (ref_attr & GPT_MAPPED) == 0 )       // VPN actually unmapped
2116            { 
2117                // allocate and initialise a physical page depending on the vseg type
2118                error = vmm_get_one_ppn( vseg , vpn , &new_ppn );
[1]2119
[585]2120                if( error )
2121                {
2122                    printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n",
2123                    __FUNCTION__ , process->pid , vpn );
[313]2124
[585]2125                   // release reference GPT lock in write mode
2126                   remote_rwlock_wr_release( ref_lock_xp );
2127                   
2128                   return EXCP_KERNEL_PANIC;
2129                }
[1]2130
[585]2131                // define new_attr from vseg flags
2132                new_attr = GPT_MAPPED | GPT_SMALL;
2133                if( vseg->flags & VSEG_USER  ) new_attr |= GPT_USER;
2134                if( vseg->flags & VSEG_WRITE ) new_attr |= GPT_WRITABLE;
2135                if( vseg->flags & VSEG_EXEC  ) new_attr |= GPT_EXECUTABLE;
2136                if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE;
[440]2137
[585]2138                // update reference GPT
2139                error = hal_gpt_set_pte( ref_gpt_xp,
2140                                         vpn,
2141                                         new_attr,
2142                                         new_ppn );
2143
2144                // update local GPT (protected by reference GPT lock)
2145                error |= hal_gpt_set_pte( local_gpt_xp,
2146                                          vpn,
2147                                          new_attr,
2148                                          new_ppn );
2149
2150                if( error )
2151                {
2152                    printk("\n[ERROR] in %s : cannot update GPT / process %x / vpn = %x\n",
2153                    __FUNCTION__ , process->pid , vpn );
2154
2155                    // release reference GPT lock in write mode
2156                    remote_rwlock_wr_release( ref_lock_xp );
2157
2158                    return EXCP_KERNEL_PANIC;
2159                }
2160            }
2161
2162            // release reference GPT lock in write mode
2163            remote_rwlock_wr_release( ref_lock_xp );
2164
[440]2165#if DEBUG_VMM_HANDLE_PAGE_FAULT
[585]2166cycle = (uint32_t)hal_get_cycles();
[469]2167if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
[595]2168printk("\n[%s] true page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n",
[585]2169__FUNCTION__, vpn, new_ppn, new_attr, cycle );
[435]2170#endif
[585]2171            return EXCP_NON_FATAL;
2172        }
2173    }
2174}   // end vmm_handle_page_fault()
[435]2175
[585]2176////////////////////////////////////////////
2177error_t vmm_handle_cow( process_t * process,
2178                        vpn_t       vpn )
2179{
2180    vseg_t         * vseg;            // vseg containing vpn
2181    cxy_t            ref_cxy;         // reference cluster for missing vpn
2182    process_t      * ref_ptr;         // reference process for missing vpn
2183    xptr_t           gpt_xp;          // extended pointer on GPT
2184    xptr_t           gpt_lock_xp;     // extended pointer on GPT lock
2185    uint32_t         old_attr;        // current PTE_ATTR value
2186    ppn_t            old_ppn;         // current PTE_PPN value
2187    uint32_t         new_attr;        // new PTE_ATTR value
2188    ppn_t            new_ppn;         // new PTE_PPN value
2189    error_t          error;
[1]2190
[625]2191    thread_t * this = CURRENT_THREAD;
2192
[585]2193#if DEBUG_VMM_HANDLE_COW
[619]2194uint32_t   cycle   = (uint32_t)hal_get_cycles();
[585]2195if( DEBUG_VMM_HANDLE_COW < cycle )
[595]2196printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n",
[619]2197__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
[625]2198hal_vmm_display( process , true );
[585]2199#endif
2200
[610]2201    // access local GPT to get GPT_COW flag
2202    bool_t cow = hal_gpt_pte_is_cow( &(process->vmm.gpt), vpn );
2203
2204    if( cow == false ) return EXCP_USER_ERROR;
2205
[585]2206    // get local vseg
2207    error = vmm_get_vseg( process, 
2208                          (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT,
2209                          &vseg );
[440]2210    if( error )
[1]2211    {
[625]2212        printk("\n[PANIC] in %s vpn %x in thread[%x,%x] not in a registered vseg\n",
2213        __FUNCTION__, vpn, process->pid, this->trdid );
[585]2214
2215        return EXCP_KERNEL_PANIC;
[440]2216    }
[407]2217
[619]2218#if( DEBUG_VMM_HANDLE_COW & 1)
2219if( DEBUG_VMM_HANDLE_COW < cycle )
2220printk("\n[%s] thread[%x,%x] get vseg for vpn %x\n",
2221__FUNCTION__, this->process->pid, this->trdid, vpn );
2222#endif
2223
[585]2224    // get reference GPT cluster and local pointer
2225    ref_cxy = GET_CXY( process->ref_xp );
2226    ref_ptr = GET_PTR( process->ref_xp );
[407]2227
[610]2228    // build relevant extended pointers on  relevant GPT and  GPT lock
[585]2229    // - access local GPT for a private vseg 
2230    // - access reference GPT for a public vseg
2231    if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) )
[440]2232    {
[585]2233        gpt_xp      = XPTR( local_cxy , &process->vmm.gpt );
2234        gpt_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock );
[1]2235    }
[440]2236    else
[1]2237    {
[585]2238        gpt_xp      = XPTR( ref_cxy , &ref_ptr->vmm.gpt );
2239        gpt_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt_lock );
[1]2240    }
2241
[585]2242    // take GPT lock in write mode
2243    remote_rwlock_wr_acquire( gpt_lock_xp );
[441]2244
[585]2245    // get current PTE from reference GPT
2246    hal_gpt_get_pte( gpt_xp,
2247                     vpn,
2248                     &old_attr,
2249                     &old_ppn );
[441]2250
[619]2251#if( DEBUG_VMM_HANDLE_COW & 1)
2252if( DEBUG_VMM_HANDLE_COW < cycle )
2253printk("\n[%s] thread[%x,%x] get pte for vpn %x : ppn %x / attr %x\n",
2254__FUNCTION__, this->process->pid, this->trdid, vpn, old_ppn, old_attr );
2255#endif
2256
[585]2257    // the PTE must be mapped for a COW
2258    if( (old_attr & GPT_MAPPED) == 0 )
2259    {
2260        printk("\n[PANIC] in %s : VPN %x in process %x unmapped\n",
2261        __FUNCTION__, vpn, process->pid );
[407]2262
[585]2263        // release GPT lock in write mode
[619]2264        remote_rwlock_wr_release( gpt_lock_xp );
[407]2265
[585]2266        return EXCP_KERNEL_PANIC;
[407]2267    }
2268
[619]2269    // get pointers on physical page descriptor
[585]2270    xptr_t   page_xp  = ppm_ppn2page( old_ppn );
2271    cxy_t    page_cxy = GET_CXY( page_xp );
2272    page_t * page_ptr = GET_PTR( page_xp );
[435]2273
[585]2274    // get extended pointers on forks and lock field in page descriptor
2275    xptr_t forks_xp       = XPTR( page_cxy , &page_ptr->forks );
2276    xptr_t forks_lock_xp  = XPTR( page_cxy , &page_ptr->lock );
[407]2277
[585]2278    // take lock protecting "forks" counter
2279    remote_busylock_acquire( forks_lock_xp );
[407]2280
[585]2281    // get number of pending forks from page descriptor
2282    uint32_t forks = hal_remote_l32( forks_xp );
[441]2283
[619]2284#if( DEBUG_VMM_HANDLE_COW & 1)
2285if( DEBUG_VMM_HANDLE_COW < cycle )
2286printk("\n[%s] thread[%x,%x] get forks = %d for vpn %x\n",
2287__FUNCTION__, this->process->pid, this->trdid, forks, vpn );
2288#endif
2289
[585]2290    if( forks )        // pending fork => allocate a new page, and copy old to new
2291    {
[619]2292        // decrement pending forks counter in page descriptor
2293        hal_remote_atomic_add( forks_xp , -1 );
2294
2295        // release lock protecting "forks" counter
2296        remote_busylock_release( forks_lock_xp );
2297
2298        // allocate a new page
[585]2299        page_xp = vmm_page_allocate( vseg , vpn );
[619]2300
[585]2301        if( page_xp == XPTR_NULL ) 
2302        {
2303            printk("\n[PANIC] in %s : no memory for vpn %x in process %x\n",
2304            __FUNCTION__ , vpn, process->pid );
[441]2305
[585]2306            // release GPT lock in write mode
2307            remote_rwlock_wr_acquire( gpt_lock_xp );
[441]2308
[585]2309            return EXCP_KERNEL_PANIC;
2310        }
[441]2311
[585]2312        // compute allocated page PPN
2313        new_ppn = ppm_page2ppn( page_xp );
[441]2314
[619]2315#if( DEBUG_VMM_HANDLE_COW & 1)
2316if( DEBUG_VMM_HANDLE_COW < cycle )
2317printk("\n[%s] thread[%x,%x] get new ppn %x for vpn %x\n",
2318__FUNCTION__, this->process->pid, this->trdid, new_ppn, vpn );
2319#endif
2320
[585]2321        // copy old page content to new page
[619]2322        hal_remote_memcpy( ppm_ppn2base( new_ppn ),
2323                           ppm_ppn2base( old_ppn ),
2324                           CONFIG_PPM_PAGE_SIZE );
[441]2325
[585]2326#if(DEBUG_VMM_HANDLE_COW & 1)
2327if( DEBUG_VMM_HANDLE_COW < cycle )
[619]2328printk("\n[%s] thread[%x,%x] copied old page to new page\n",
2329__FUNCTION__, this->process->pid, this->trdid );
[585]2330#endif
[440]2331
[585]2332    }             
2333    else               // no pending fork => keep the existing page
2334    {
[619]2335        // release lock protecting "forks" counter
2336        remote_busylock_release( forks_lock_xp );
[1]2337
[585]2338#if(DEBUG_VMM_HANDLE_COW & 1)
2339if( DEBUG_VMM_HANDLE_COW < cycle )
[619]2340printk("\n[%s] thread[%x,%x]  no pending forks / keep existing PPN %x\n",
2341__FUNCTION__, this->process->pid, this->trdid, old_ppn );
[585]2342#endif
2343        new_ppn = old_ppn;
2344    }
[1]2345
[585]2346    // build new_attr : reset COW and set WRITABLE,
2347    new_attr = (old_attr | GPT_WRITABLE) & (~GPT_COW);
2348
[619]2349    // update the relevant GPT
[585]2350    // - private vseg => update local GPT
2351    // - public vseg => update all GPT copies
2352    if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) )
[1]2353    {
[585]2354        hal_gpt_set_pte( gpt_xp,
2355                         vpn,
2356                         new_attr,
2357                         new_ppn );
[1]2358    }
[585]2359    else
[1]2360    {
[585]2361        if( ref_cxy == local_cxy )                  // reference cluster is local
2362        {
2363            vmm_global_update_pte( process,
2364                                   vpn,
2365                                   new_attr,
2366                                   new_ppn );
2367        }
2368        else                                        // reference cluster is remote
2369        {
2370            rpc_vmm_global_update_pte_client( ref_cxy,
2371                                              ref_ptr,
2372                                              vpn,
2373                                              new_attr,
2374                                              new_ppn );
2375        }
[1]2376    }
2377
[585]2378    // release GPT lock in write mode
2379    remote_rwlock_wr_release( gpt_lock_xp );
[21]2380
[585]2381#if DEBUG_VMM_HANDLE_COW
2382cycle = (uint32_t)hal_get_cycles();
2383if( DEBUG_VMM_HANDLE_COW < cycle )
[595]2384printk("\n[%s] thread[%x,%x] exit for vpn %x / core[%x,%d] / cycle %d\n",
[619]2385__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
[585]2386#endif
[313]2387
[585]2388     return EXCP_NON_FATAL;
[1]2389
[585]2390}   // end vmm_handle_cow()
2391
Note: See TracBrowser for help on using the repository browser.