1 | /* |
---|
2 | * vmm.c - virtual memory manager related operations definition. |
---|
3 | * |
---|
4 | * Authors Ghassan Almaless (2008,2009,2010,2011,2012) |
---|
5 | * Alain Greiner (2016,2017,2018,2019,2020) |
---|
6 | * |
---|
7 | * Copyright (c) UPMC Sorbonne Universites |
---|
8 | * |
---|
9 | * This file is part of ALMOS-MKH. |
---|
10 | * |
---|
11 | * ALMOS-MKH is free software; you can redistribute it and/or modify it |
---|
12 | * under the terms of the GNU General Public License as published by |
---|
13 | * the Free Software Foundation; version 2.0 of the License. |
---|
14 | * |
---|
15 | * ALMOS-MKH is distributed in the hope that it will be useful, but |
---|
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
18 | * General Public License for more details. |
---|
19 | * |
---|
20 | * You should have received a copy of the GNU General Public License |
---|
21 | * along with ALMOS-MKH; if not, write to the Free Software Foundation, |
---|
22 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
23 | */ |
---|
24 | |
---|
25 | #include <kernel_config.h> |
---|
26 | #include <hal_kernel_types.h> |
---|
27 | #include <hal_special.h> |
---|
28 | #include <hal_gpt.h> |
---|
29 | #include <hal_vmm.h> |
---|
30 | #include <hal_irqmask.h> |
---|
31 | #include <hal_macros.h> |
---|
32 | #include <printk.h> |
---|
33 | #include <memcpy.h> |
---|
34 | #include <remote_queuelock.h> |
---|
35 | #include <list.h> |
---|
36 | #include <xlist.h> |
---|
37 | #include <bits.h> |
---|
38 | #include <process.h> |
---|
39 | #include <thread.h> |
---|
40 | #include <vseg.h> |
---|
41 | #include <cluster.h> |
---|
42 | #include <scheduler.h> |
---|
43 | #include <vfs.h> |
---|
44 | #include <mapper.h> |
---|
45 | #include <page.h> |
---|
46 | #include <kmem.h> |
---|
47 | #include <vmm.h> |
---|
48 | #include <hal_exception.h> |
---|
49 | |
---|
50 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
51 | // Extern global variables |
---|
52 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
53 | |
---|
54 | extern process_t process_zero; // allocated in cluster.c |
---|
55 | |
---|
56 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
57 | // This static function is called by the vmm_user_init() function. |
---|
58 | // It initialises the free lists of vsegs used by the VMM MMAP allocator. |
---|
59 | // It makes the assumption that HEAP_BASE == 1 Gbytes and HEAP_SIZE == 2 Gbytes. |
---|
60 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
61 | static void vmm_stack_init( vmm_t * vmm ) |
---|
62 | { |
---|
63 | |
---|
64 | // check STACK zone |
---|
65 | assert( __FUNCTION__, ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <= |
---|
66 | (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , "STACK zone too small\n"); |
---|
67 | |
---|
68 | // get pointer on STACK allocator |
---|
69 | stack_mgr_t * mgr = &vmm->stack_mgr; |
---|
70 | |
---|
71 | mgr->bitmap = 0; |
---|
72 | mgr->vpn_base = CONFIG_VMM_STACK_BASE; |
---|
73 | busylock_init( &mgr->lock , LOCK_VMM_STACK ); |
---|
74 | |
---|
75 | } |
---|
76 | |
---|
77 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
78 | // This static function is called by the vmm_create_vseg() function, and implements |
---|
79 | // the VMM STACK specific allocator. Depending on the local thread index <ltid>, |
---|
80 | // it ckeks availability of the corresponding slot in the process STACKS region, |
---|
81 | // allocates a vseg descriptor, and initializes the "vpn_base" and "vpn_size" fields. |
---|
82 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
83 | // @ vmm : [in] pointer on VMM. |
---|
84 | // @ ltid : [in] requested slot == local user thread identifier. |
---|
85 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
86 | static vseg_t * vmm_stack_alloc( vmm_t * vmm, |
---|
87 | ltid_t ltid ) |
---|
88 | { |
---|
89 | |
---|
90 | // check ltid argument |
---|
91 | assert( __FUNCTION__, (ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)), |
---|
92 | "slot index %d too large for an user stack vseg", ltid ); |
---|
93 | |
---|
94 | // get stack allocator pointer |
---|
95 | stack_mgr_t * mgr = &vmm->stack_mgr; |
---|
96 | |
---|
97 | // get lock protecting stack allocator |
---|
98 | busylock_acquire( &mgr->lock ); |
---|
99 | |
---|
100 | // check requested slot is available |
---|
101 | assert( __FUNCTION__, (bitmap_state( &mgr->bitmap , ltid ) == false), |
---|
102 | "slot index %d already allocated", ltid ); |
---|
103 | |
---|
104 | // allocate a vseg descriptor |
---|
105 | vseg_t * vseg = vseg_alloc(); |
---|
106 | |
---|
107 | if( vseg == NULL ) |
---|
108 | { |
---|
109 | // release lock protecting free lists |
---|
110 | busylock_release( &mgr->lock ); |
---|
111 | |
---|
112 | printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n", |
---|
113 | __FUNCTION__ , local_cxy ); |
---|
114 | |
---|
115 | return NULL; |
---|
116 | } |
---|
117 | |
---|
118 | // update bitmap |
---|
119 | bitmap_set( &mgr->bitmap , ltid ); |
---|
120 | |
---|
121 | // release lock on stack allocator |
---|
122 | busylock_release( &mgr->lock ); |
---|
123 | |
---|
124 | // set "vpn_base" & "vpn_size" fields (first page non allocated) |
---|
125 | vseg->vpn_base = mgr->vpn_base + (ltid * CONFIG_VMM_STACK_SIZE) + 1; |
---|
126 | vseg->vpn_size = CONFIG_VMM_STACK_SIZE - 1; |
---|
127 | |
---|
128 | return vseg; |
---|
129 | |
---|
130 | } // end vmm_stack_alloc() |
---|
131 | |
---|
132 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
133 | // This static function is called by the vmm_remove_vseg() function, and implements |
---|
134 | // the VMM STACK specific desallocator. |
---|
135 | // It updates the bitmap to release the corresponding slot in the process STACKS region, |
---|
136 | // and releases memory allocated to vseg descriptor. |
---|
137 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
138 | // @ vmm : [in] pointer on VMM. |
---|
139 | // @ vseg : [in] pointer on released vseg. |
---|
140 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
141 | static void vmm_stack_free( vmm_t * vmm, |
---|
142 | vseg_t * vseg ) |
---|
143 | { |
---|
144 | // get stack allocator pointer |
---|
145 | stack_mgr_t * mgr = &vmm->stack_mgr; |
---|
146 | |
---|
147 | // compute slot index |
---|
148 | uint32_t index = (vseg->vpn_base - 1 - mgr->vpn_base) / CONFIG_VMM_STACK_SIZE; |
---|
149 | |
---|
150 | // check index |
---|
151 | assert( __FUNCTION__, (index <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)), |
---|
152 | "slot index %d too large for an user stack vseg", index ); |
---|
153 | |
---|
154 | // check released slot is allocated |
---|
155 | assert( __FUNCTION__, (bitmap_state( &mgr->bitmap , index ) == true), |
---|
156 | "released slot index %d non allocated", index ); |
---|
157 | |
---|
158 | // get lock on stack allocator |
---|
159 | busylock_acquire( &mgr->lock ); |
---|
160 | |
---|
161 | // update stacks_bitmap |
---|
162 | bitmap_clear( &mgr->bitmap , index ); |
---|
163 | |
---|
164 | // release lock on stack allocator |
---|
165 | busylock_release( &mgr->lock ); |
---|
166 | |
---|
167 | // release memory allocated to vseg descriptor |
---|
168 | vseg_free( vseg ); |
---|
169 | |
---|
170 | } // end vmm_stack_free() |
---|
171 | |
---|
172 | |
---|
173 | |
---|
174 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
175 | // This function display the current state of the VMM MMAP allocator of a process VMM |
---|
176 | // identified by the <vmm> argument. |
---|
177 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
178 | void vmm_mmap_display( vmm_t * vmm ) |
---|
179 | { |
---|
180 | uint32_t order; |
---|
181 | xptr_t root_xp; |
---|
182 | xptr_t iter_xp; |
---|
183 | |
---|
184 | // get pointer on process |
---|
185 | process_t * process = (process_t *)(((char*)vmm) - OFFSETOF( process_t , vmm )); |
---|
186 | |
---|
187 | // get process PID |
---|
188 | pid_t pid = process->pid; |
---|
189 | |
---|
190 | // get pointer on VMM MMAP allocator |
---|
191 | mmap_mgr_t * mgr = &vmm->mmap_mgr; |
---|
192 | |
---|
193 | // display header |
---|
194 | printk("***** VMM MMAP allocator / process %x *****\n", pid ); |
---|
195 | |
---|
196 | // scan the array of free lists of vsegs |
---|
197 | for( order = 0 ; order <= CONFIG_VMM_HEAP_MAX_ORDER ; order++ ) |
---|
198 | { |
---|
199 | root_xp = XPTR( local_cxy , &mgr->free_list_root[order] ); |
---|
200 | |
---|
201 | if( !xlist_is_empty( root_xp ) ) |
---|
202 | { |
---|
203 | printk(" - %d (%x pages) : ", order , 1<<order ); |
---|
204 | |
---|
205 | XLIST_FOREACH( root_xp , iter_xp ) |
---|
206 | { |
---|
207 | xptr_t vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
208 | vseg_t * vseg = GET_PTR( vseg_xp ); |
---|
209 | |
---|
210 | printk("%x | ", vseg->vpn_base ); |
---|
211 | } |
---|
212 | |
---|
213 | printk("\n"); |
---|
214 | } |
---|
215 | } |
---|
216 | } // end vmm_mmap_display() |
---|
217 | |
---|
218 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
219 | // This static function is called by the vmm_user_init() function. |
---|
220 | // It initialises the free lists of vsegs used by the VMM MMAP allocator. |
---|
221 | // TODO this function is only valid for 32 bits cores, and makes three assumptions: |
---|
222 | // HEAP_BASE == 1 Gbytes / HEAP_SIZE == 2 Gbytes / MMAP_MAX_SIZE == 1 Gbytes |
---|
223 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
224 | void vmm_mmap_init( vmm_t * vmm ) |
---|
225 | { |
---|
226 | |
---|
227 | // check HEAP base and size |
---|
228 | assert( __FUNCTION__, (CONFIG_VMM_HEAP_BASE == 0x40000) & (CONFIG_VMM_STACK_BASE == 0xc0000), |
---|
229 | "CONFIG_VMM_HEAP_BASE != 0x40000 or CONFIG_VMM_STACK_BASE != 0xc0000" ); |
---|
230 | |
---|
231 | // check MMAP vseg max order |
---|
232 | assert( __FUNCTION__, (CONFIG_VMM_HEAP_MAX_ORDER == 18), "max mmap vseg size is 256K pages" ); |
---|
233 | |
---|
234 | // get pointer on MMAP allocator |
---|
235 | mmap_mgr_t * mgr = &vmm->mmap_mgr; |
---|
236 | |
---|
237 | // initialize HEAP base and size |
---|
238 | mgr->vpn_base = CONFIG_VMM_HEAP_BASE; |
---|
239 | mgr->vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; |
---|
240 | |
---|
241 | // initialize lock |
---|
242 | busylock_init( &mgr->lock , LOCK_VMM_MMAP ); |
---|
243 | |
---|
244 | // initialize free lists |
---|
245 | uint32_t i; |
---|
246 | for( i = 0 ; i <= CONFIG_VMM_HEAP_MAX_ORDER ; i++ ) |
---|
247 | { |
---|
248 | xlist_root_init( XPTR( local_cxy , &mgr->free_list_root[i] ) ); |
---|
249 | } |
---|
250 | |
---|
251 | // allocate and register first 1 Gbytes vseg |
---|
252 | vseg_t * vseg0 = vseg_alloc(); |
---|
253 | |
---|
254 | assert( __FUNCTION__, (vseg0 != NULL) , "cannot allocate vseg" ); |
---|
255 | |
---|
256 | vseg0->vpn_base = CONFIG_VMM_HEAP_BASE; |
---|
257 | vseg0->vpn_size = CONFIG_VMM_HEAP_BASE; |
---|
258 | |
---|
259 | xlist_add_first( XPTR( local_cxy , &mgr->free_list_root[CONFIG_VMM_HEAP_MAX_ORDER] ), |
---|
260 | XPTR( local_cxy , &vseg0->xlist ) ); |
---|
261 | |
---|
262 | // allocate and register second 1 Gbytes vseg |
---|
263 | vseg_t * vseg1 = vseg_alloc(); |
---|
264 | |
---|
265 | assert( __FUNCTION__, (vseg1 != NULL) , "cannot allocate vseg" ); |
---|
266 | |
---|
267 | vseg1->vpn_base = CONFIG_VMM_HEAP_BASE << 1; |
---|
268 | vseg1->vpn_size = CONFIG_VMM_HEAP_BASE; |
---|
269 | |
---|
270 | xlist_add_first( XPTR( local_cxy , &mgr->free_list_root[CONFIG_VMM_HEAP_MAX_ORDER] ), |
---|
271 | XPTR( local_cxy , &vseg1->xlist ) ); |
---|
272 | |
---|
273 | #if DEBUG_VMM_MMAP |
---|
274 | thread_t * this = CURRENT_THREAD; |
---|
275 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
276 | printk("\n[%s] thread[%x,%x] / cycle %d\n", |
---|
277 | __FUNCTION__, this->process->pid, this->trdid, cycle ); |
---|
278 | vmm_mmap_display( vmm ); |
---|
279 | #endif |
---|
280 | |
---|
281 | } // end vmm_mmap_init() |
---|
282 | |
---|
283 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
284 | // This static function is called by the vmm_create_vseg() function, and implements |
---|
285 | // the VMM MMAP specific allocator. Depending on the requested number of pages <npages>, |
---|
286 | // it get a free vseg from the relevant free_list, and initializes the "vpn_base" and |
---|
287 | // "vpn_size" fields. |
---|
288 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
289 | // @ vmm : [in] pointer on VMM. |
---|
290 | // @ npages : [in] requested number of pages. |
---|
291 | // @ returns local pointer on vseg if success / returns NULL if failure. |
---|
292 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
293 | static vseg_t * vmm_mmap_alloc( vmm_t * vmm, |
---|
294 | vpn_t npages ) |
---|
295 | { |
---|
296 | |
---|
297 | #if DEBUG_VMM_MMAP |
---|
298 | thread_t * this = CURRENT_THREAD; |
---|
299 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
300 | if( DEBUG_VMM_MMAP < cycle ) |
---|
301 | printk("\n[%s] thread[%x,%x] for %x pages / cycle %d\n", |
---|
302 | __FUNCTION__, this->process->pid, this->trdid, npages, cycle ); |
---|
303 | #endif |
---|
304 | |
---|
305 | // number of allocated pages must be power of 2 |
---|
306 | // compute actual size and order |
---|
307 | vpn_t required_vpn_size = POW2_ROUNDUP( npages ); |
---|
308 | uint32_t required_order = bits_log2( required_vpn_size ); |
---|
309 | |
---|
310 | // get mmap allocator pointer |
---|
311 | mmap_mgr_t * mgr = &vmm->mmap_mgr; |
---|
312 | |
---|
313 | // take lock protecting free lists in MMAP allocator |
---|
314 | busylock_acquire( &mgr->lock ); |
---|
315 | |
---|
316 | // initialises the while loop variables |
---|
317 | uint32_t current_order = required_order; |
---|
318 | vseg_t * current_vseg = NULL; |
---|
319 | |
---|
320 | // search a free vseg equal or larger than requested size |
---|
321 | while( current_order <= CONFIG_VMM_HEAP_MAX_ORDER ) |
---|
322 | { |
---|
323 | // build extended pointer on free_pages_root[current_order] |
---|
324 | xptr_t root_xp = XPTR( local_cxy , &mgr->free_list_root[current_order] ); |
---|
325 | |
---|
326 | if( !xlist_is_empty( root_xp ) ) |
---|
327 | { |
---|
328 | // get extended pointer on first vseg in this free_list |
---|
329 | xptr_t current_vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); |
---|
330 | current_vseg = GET_PTR( current_vseg_xp ); |
---|
331 | |
---|
332 | // build extended pointer on xlist field in vseg descriptor |
---|
333 | xptr_t list_entry_xp = XPTR( local_cxy , ¤t_vseg->xlist ); |
---|
334 | |
---|
335 | // remove this vseg from the free_list |
---|
336 | xlist_unlink( list_entry_xp ); |
---|
337 | |
---|
338 | break; |
---|
339 | } |
---|
340 | |
---|
341 | // increment loop index |
---|
342 | current_order++; |
---|
343 | |
---|
344 | } // end while loop |
---|
345 | |
---|
346 | if( current_vseg == NULL ) // return failure |
---|
347 | { |
---|
348 | // release lock protecting free lists |
---|
349 | busylock_release( &mgr->lock ); |
---|
350 | |
---|
351 | printk("\n[ERROR] %s cannot allocate ) %d page(s) in cluster %x\n", |
---|
352 | __FUNCTION__, npages , local_cxy ); |
---|
353 | |
---|
354 | return NULL; |
---|
355 | } |
---|
356 | |
---|
357 | // split recursively the found vseg in smaller vsegs |
---|
358 | // if required, and update the free-lists accordingly |
---|
359 | while( current_order > required_order ) |
---|
360 | { |
---|
361 | // get found vseg base and size |
---|
362 | vpn_t vpn_base = current_vseg->vpn_base; |
---|
363 | vpn_t vpn_size = current_vseg->vpn_size; |
---|
364 | |
---|
365 | // allocate a new vseg for the upper half of current vseg |
---|
366 | vseg_t * new_vseg = vseg_alloc(); |
---|
367 | |
---|
368 | if( new_vseg == NULL ) |
---|
369 | { |
---|
370 | // release lock protecting free lists |
---|
371 | busylock_release( &mgr->lock ); |
---|
372 | |
---|
373 | printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n", |
---|
374 | __FUNCTION__ , local_cxy ); |
---|
375 | |
---|
376 | return NULL; |
---|
377 | } |
---|
378 | |
---|
379 | // initialise new vseg (upper half of found vseg) |
---|
380 | new_vseg->vmm = vmm; |
---|
381 | new_vseg->vpn_base = vpn_base + (vpn_size >> 1); |
---|
382 | new_vseg->vpn_size = vpn_size >> 1; |
---|
383 | |
---|
384 | // insert new vseg in relevant free_list |
---|
385 | xlist_add_first( XPTR( local_cxy , &mgr->free_list_root[current_order-1] ), |
---|
386 | XPTR( local_cxy , &new_vseg->xlist ) ); |
---|
387 | |
---|
388 | // update found vseg |
---|
389 | current_vseg->vpn_size = vpn_size>>1; |
---|
390 | |
---|
391 | // update order |
---|
392 | current_order --; |
---|
393 | } |
---|
394 | |
---|
395 | // release lock protecting free lists |
---|
396 | busylock_release( &mgr->lock ); |
---|
397 | |
---|
398 | #if DEBUG_VMM_MMAP |
---|
399 | vmm_mmap_display( vmm ); |
---|
400 | #endif |
---|
401 | |
---|
402 | return current_vseg; |
---|
403 | |
---|
404 | } // end vmm_mmap_alloc() |
---|
405 | |
---|
406 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
407 | // This static function implements the VMM MMAP specific desallocator. |
---|
408 | // It is called by the vmm_remove_vseg() function. |
---|
409 | // It releases the vseg to the relevant free_list, after trying (recursively) to |
---|
410 | // merge it to the buddy vseg. |
---|
411 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
412 | // @ vmm : [in] pointer on VMM. |
---|
413 | // @ vseg : [in] pointer on released vseg. |
---|
414 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
415 | static void vmm_mmap_free( vmm_t * vmm, |
---|
416 | vseg_t * vseg ) |
---|
417 | { |
---|
418 | |
---|
419 | #if DEBUG_VMM_MMAP |
---|
420 | thread_t * this = CURRENT_THREAD; |
---|
421 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
422 | if( DEBUG_VMM_MMAP < cycle ) |
---|
423 | printk("\n[%s] thread[%x,%x] for vpn_base %x / vpn_size %x / cycle %d\n", |
---|
424 | __FUNCTION__, this->process->pid, this->trdid, vseg->vpn_base, vseg->vpn_size, cycle ); |
---|
425 | #endif |
---|
426 | |
---|
427 | vseg_t * buddy_vseg; |
---|
428 | |
---|
429 | // get mmap allocator pointer |
---|
430 | mmap_mgr_t * mgr = &vmm->mmap_mgr; |
---|
431 | |
---|
432 | // take lock protecting free lists |
---|
433 | busylock_acquire( &mgr->lock ); |
---|
434 | |
---|
435 | // initialise loop variables |
---|
436 | // released_vseg is the currently released vseg |
---|
437 | vseg_t * released_vseg = vseg; |
---|
438 | uint32_t released_order = bits_log2( vseg->vpn_size ); |
---|
439 | |
---|
440 | // iteratively merge the released vseg to the buddy vseg |
---|
441 | // release the current page and exit when buddy not found |
---|
442 | while( released_order <= CONFIG_VMM_HEAP_MAX_ORDER ) |
---|
443 | { |
---|
444 | // compute buddy_vseg vpn_base |
---|
445 | vpn_t buddy_vpn_base = released_vseg->vpn_base ^ (1 << released_order); |
---|
446 | |
---|
447 | // build extended pointer on free_pages_root[current_order] |
---|
448 | xptr_t root_xp = XPTR( local_cxy , &mgr->free_list_root[released_order] ); |
---|
449 | |
---|
450 | // scan this free list to find the buddy vseg |
---|
451 | xptr_t iter_xp; |
---|
452 | buddy_vseg = NULL; |
---|
453 | XLIST_FOREACH( root_xp , iter_xp ) |
---|
454 | { |
---|
455 | xptr_t current_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
456 | vseg_t * current_vseg = GET_PTR( current_vseg_xp ); |
---|
457 | |
---|
458 | if( current_vseg->vpn_base == buddy_vpn_base ) |
---|
459 | { |
---|
460 | buddy_vseg = current_vseg; |
---|
461 | break; |
---|
462 | } |
---|
463 | } |
---|
464 | |
---|
465 | if( buddy_vseg != NULL ) // buddy found => merge released & buddy |
---|
466 | { |
---|
467 | // update released vseg fields |
---|
468 | released_vseg->vpn_size = buddy_vseg->vpn_size<<1; |
---|
469 | if( released_vseg->vpn_base > buddy_vseg->vpn_base) |
---|
470 | released_vseg->vpn_base = buddy_vseg->vpn_base; |
---|
471 | |
---|
472 | // remove buddy vseg from free_list |
---|
473 | xlist_unlink( XPTR( local_cxy , &buddy_vseg->xlist ) ); |
---|
474 | |
---|
475 | // release memory allocated to buddy descriptor |
---|
476 | vseg_free( buddy_vseg ); |
---|
477 | } |
---|
478 | else // buddy not found => register & exit |
---|
479 | { |
---|
480 | // register released vseg in free list |
---|
481 | xlist_add_first( root_xp , XPTR( local_cxy , &released_vseg->xlist ) ); |
---|
482 | |
---|
483 | // exit while loop |
---|
484 | break; |
---|
485 | } |
---|
486 | |
---|
487 | // increment released_order |
---|
488 | released_order++; |
---|
489 | } |
---|
490 | |
---|
491 | // release lock |
---|
492 | busylock_release( &mgr->lock ); |
---|
493 | |
---|
494 | #if DEBUG_VMM_MMAP |
---|
495 | vmm_mmap_display( vmm ); |
---|
496 | #endif |
---|
497 | |
---|
498 | } // end vmm_mmap_free() |
---|
499 | |
---|
500 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
501 | // This static function registers one vseg in the VSL of a local process descriptor. |
---|
502 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
503 | // vmm : [in] pointer on VMM. |
---|
504 | // vseg : [in] pointer on vseg. |
---|
505 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
506 | void vmm_attach_vseg_to_vsl( vmm_t * vmm, |
---|
507 | vseg_t * vseg ) |
---|
508 | { |
---|
509 | // update vseg descriptor |
---|
510 | vseg->vmm = vmm; |
---|
511 | |
---|
512 | // increment vsegs number |
---|
513 | vmm->vsegs_nr++; |
---|
514 | |
---|
515 | // add vseg in vmm list |
---|
516 | xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ), |
---|
517 | XPTR( local_cxy , &vseg->xlist ) ); |
---|
518 | |
---|
519 | } // end vmm_attach_vseg_from_vsl() |
---|
520 | |
---|
521 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
522 | // This static function removes one vseg from the VSL of a local process descriptor. |
---|
523 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
524 | // vmm : [in] pointer on VMM. |
---|
525 | // vseg : [in] pointer on vseg. |
---|
526 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
527 | void vmm_detach_vseg_from_vsl( vmm_t * vmm, |
---|
528 | vseg_t * vseg ) |
---|
529 | { |
---|
530 | // update vseg descriptor |
---|
531 | vseg->vmm = NULL; |
---|
532 | |
---|
533 | // decrement vsegs number |
---|
534 | vmm->vsegs_nr--; |
---|
535 | |
---|
536 | // remove vseg from VSL |
---|
537 | xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); |
---|
538 | |
---|
539 | } // end vmm_detach_from_vsl() |
---|
540 | |
---|
541 | //////////////////////////////////////////// |
---|
542 | error_t vmm_user_init( process_t * process ) |
---|
543 | { |
---|
544 | |
---|
545 | #if DEBUG_VMM_USER_INIT |
---|
546 | thread_t * this = CURRENT_THREAD; |
---|
547 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
548 | if( DEBUG_VMM_USER_INIT ) |
---|
549 | printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", |
---|
550 | __FUNCTION__ , this->process->pid, this->trdid, process->pid, local_cxy, cycle ); |
---|
551 | #endif |
---|
552 | |
---|
553 | // get pointer on VMM |
---|
554 | vmm_t * vmm = &process->vmm; |
---|
555 | |
---|
556 | // check UTILS zone |
---|
557 | assert( __FUNCTION__ , ((CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) <= |
---|
558 | (CONFIG_VMM_ELF_BASE - CONFIG_VMM_UTILS_BASE)) , "UTILS zone too small\n" ); |
---|
559 | |
---|
560 | // initialize lock protecting the VSL |
---|
561 | remote_queuelock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL ); |
---|
562 | |
---|
563 | // initialize STACK allocator |
---|
564 | vmm_stack_init( vmm ); |
---|
565 | |
---|
566 | // initialize MMAP allocator |
---|
567 | vmm_mmap_init( vmm ); |
---|
568 | |
---|
569 | // initialize instrumentation counters |
---|
570 | vmm->false_pgfault_nr = 0; |
---|
571 | vmm->local_pgfault_nr = 0; |
---|
572 | vmm->global_pgfault_nr = 0; |
---|
573 | vmm->false_pgfault_cost = 0; |
---|
574 | vmm->local_pgfault_cost = 0; |
---|
575 | vmm->global_pgfault_cost = 0; |
---|
576 | |
---|
577 | hal_fence(); |
---|
578 | |
---|
579 | #if DEBUG_VMM_USER_INIT |
---|
580 | cycle = (uint32_t)hal_get_cycles(); |
---|
581 | if( DEBUG_VMM_USER_INIT ) |
---|
582 | printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", |
---|
583 | __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle ); |
---|
584 | #endif |
---|
585 | |
---|
586 | return 0; |
---|
587 | |
---|
588 | } // end vmm_user_init() |
---|
589 | |
---|
590 | ////////////////////////////////////////// |
---|
591 | void vmm_user_reset( process_t * process ) |
---|
592 | { |
---|
593 | xptr_t vseg_xp; |
---|
594 | vseg_t * vseg; |
---|
595 | vseg_type_t vseg_type; |
---|
596 | |
---|
597 | #if DEBUG_VMM_USER_RESET |
---|
598 | uint32_t cycle; |
---|
599 | thread_t * this = CURRENT_THREAD; |
---|
600 | #endif |
---|
601 | |
---|
602 | #if (DEBUG_VMM_USER_RESET & 1 ) |
---|
603 | cycle = (uint32_t)hal_get_cycles(); |
---|
604 | if( DEBUG_VMM_USER_RESET < cycle ) |
---|
605 | printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", |
---|
606 | __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle ); |
---|
607 | #endif |
---|
608 | |
---|
609 | #if (DEBUG_VMM_USER_RESET & 1 ) |
---|
610 | if( DEBUG_VMM_USER_RESET < cycle ) |
---|
611 | hal_vmm_display( XPTR( local_cxy , process ) , true ); |
---|
612 | #endif |
---|
613 | |
---|
614 | // get pointer on local VMM |
---|
615 | vmm_t * vmm = &process->vmm; |
---|
616 | |
---|
617 | // build extended pointer on VSL root and VSL lock |
---|
618 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
619 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); |
---|
620 | |
---|
621 | // take the VSL lock |
---|
622 | remote_queuelock_acquire( lock_xp ); |
---|
623 | |
---|
624 | // scan the VSL to delete all non kernel vsegs |
---|
625 | // (we don't use a FOREACH in case of item deletion) |
---|
626 | xptr_t iter_xp; |
---|
627 | xptr_t next_xp; |
---|
628 | for( iter_xp = hal_remote_l64( root_xp ) ; |
---|
629 | iter_xp != root_xp ; |
---|
630 | iter_xp = next_xp ) |
---|
631 | { |
---|
632 | // save extended pointer on next item in xlist |
---|
633 | next_xp = hal_remote_l64( iter_xp ); |
---|
634 | |
---|
635 | // get pointers on current vseg in VSL |
---|
636 | vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
637 | vseg = GET_PTR( vseg_xp ); |
---|
638 | vseg_type = vseg->type; |
---|
639 | |
---|
640 | #if( DEBUG_VMM_USER_RESET & 1 ) |
---|
641 | if( DEBUG_VMM_USER_RESET < cycle ) |
---|
642 | printk("\n[%s] found %s vseg / vpn_base %x / vpn_size %d\n", |
---|
643 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
644 | #endif |
---|
645 | // delete non kernel vseg |
---|
646 | if( (vseg_type != VSEG_TYPE_KCODE) && |
---|
647 | (vseg_type != VSEG_TYPE_KDATA) && |
---|
648 | (vseg_type != VSEG_TYPE_KDEV ) ) |
---|
649 | { |
---|
650 | // remove vseg from VSL |
---|
651 | vmm_remove_vseg( process , vseg ); |
---|
652 | |
---|
653 | #if( DEBUG_VMM_USER_RESET & 1 ) |
---|
654 | if( DEBUG_VMM_USER_RESET < cycle ) |
---|
655 | printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n", |
---|
656 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
657 | #endif |
---|
658 | } |
---|
659 | else |
---|
660 | { |
---|
661 | |
---|
662 | #if( DEBUG_VMM_USER_RESET & 1 ) |
---|
663 | if( DEBUG_VMM_USER_RESET < cycle ) |
---|
664 | printk("\n[%s] keep %s vseg / vpn_base %x / vpn_size %d\n", |
---|
665 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
666 | #endif |
---|
667 | } |
---|
668 | } // end loop on vsegs in VSL |
---|
669 | |
---|
670 | // release the VSL lock |
---|
671 | remote_queuelock_release( lock_xp ); |
---|
672 | |
---|
673 | // FIXME il faut gérer les process copies... |
---|
674 | |
---|
675 | // re-initialise VMM |
---|
676 | vmm_user_init( process ); |
---|
677 | |
---|
678 | #if DEBUG_VMM_USER_RESET |
---|
679 | cycle = (uint32_t)hal_get_cycles(); |
---|
680 | if( DEBUG_VMM_USER_RESET < cycle ) |
---|
681 | printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", |
---|
682 | __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle ); |
---|
683 | #endif |
---|
684 | |
---|
685 | #if (DEBUG_VMM_USER_RESET & 1 ) |
---|
686 | if( DEBUG_VMM_USER_RESET < cycle ) |
---|
687 | hal_vmm_display( XPTR( local_cxy , process ) , true ); |
---|
688 | #endif |
---|
689 | |
---|
690 | } // end vmm_user_reset() |
---|
691 | |
---|
692 | ///////////////////////////////////////////////// |
---|
693 | void vmm_global_delete_vseg( process_t * process, |
---|
694 | intptr_t base ) |
---|
695 | { |
---|
696 | cxy_t owner_cxy; |
---|
697 | lpid_t owner_lpid; |
---|
698 | reg_t save_sr; |
---|
699 | |
---|
700 | xptr_t process_lock_xp; |
---|
701 | xptr_t process_root_xp; |
---|
702 | xptr_t process_iter_xp; |
---|
703 | |
---|
704 | xptr_t remote_process_xp; |
---|
705 | cxy_t remote_process_cxy; |
---|
706 | process_t * remote_process_ptr; |
---|
707 | |
---|
708 | xptr_t vsl_root_xp; |
---|
709 | xptr_t vsl_lock_xp; |
---|
710 | xptr_t vsl_iter_xp; |
---|
711 | |
---|
712 | rpc_desc_t rpc; // shared rpc descriptor for parallel RPCs |
---|
713 | uint32_t responses; // RPC responses counter |
---|
714 | |
---|
715 | thread_t * this = CURRENT_THREAD; |
---|
716 | pid_t pid = process->pid; |
---|
717 | cluster_t * cluster = LOCAL_CLUSTER; |
---|
718 | |
---|
719 | #if DEBUG_VMM_GLOBAL_DELETE_VSEG |
---|
720 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
721 | #endif |
---|
722 | |
---|
723 | #if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1) |
---|
724 | if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle ) |
---|
725 | printk("\n[%s] thread[%x,%x] enters / process %x / base %x / cycle %d\n", |
---|
726 | __FUNCTION__, this->process->pid, this->trdid, process->pid, base, cycle ); |
---|
727 | #endif |
---|
728 | |
---|
729 | // initialize a shared RPC descriptor |
---|
730 | rpc.rsp = &responses; |
---|
731 | rpc.blocking = false; // non blocking behaviour for rpc_send() |
---|
732 | rpc.index = RPC_VMM_REMOVE_VSEG; |
---|
733 | rpc.thread = this; |
---|
734 | rpc.lid = this->core->lid; |
---|
735 | rpc.args[0] = this->process->pid; |
---|
736 | rpc.args[1] = base; |
---|
737 | |
---|
738 | // get owner process cluster and local index |
---|
739 | owner_cxy = CXY_FROM_PID( pid ); |
---|
740 | owner_lpid = LPID_FROM_PID( pid ); |
---|
741 | |
---|
742 | // get extended pointer on root and lock of process copies xlist in owner cluster |
---|
743 | process_root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[owner_lpid] ); |
---|
744 | process_lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[owner_lpid] ); |
---|
745 | |
---|
746 | // mask IRQs |
---|
747 | hal_disable_irq( &save_sr ); |
---|
748 | |
---|
749 | // client thread blocks itself |
---|
750 | thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC ); |
---|
751 | |
---|
752 | // take the lock protecting process copies |
---|
753 | remote_queuelock_acquire( process_lock_xp ); |
---|
754 | |
---|
755 | // initialize responses counter |
---|
756 | responses = 0; |
---|
757 | |
---|
758 | // loop on process copies |
---|
759 | XLIST_FOREACH( process_root_xp , process_iter_xp ) |
---|
760 | { |
---|
761 | // get cluster and local pointer on remote process |
---|
762 | remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); |
---|
763 | remote_process_ptr = GET_PTR( remote_process_xp ); |
---|
764 | remote_process_cxy = GET_CXY( remote_process_xp ); |
---|
765 | |
---|
766 | // build extended pointers on remote VSL root and lock |
---|
767 | vsl_root_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsegs_root ); |
---|
768 | vsl_lock_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsl_lock ); |
---|
769 | |
---|
770 | // get lock on remote VSL |
---|
771 | remote_queuelock_acquire( vsl_lock_xp ); |
---|
772 | |
---|
773 | // loop on vsegs in remote process VSL |
---|
774 | XLIST_FOREACH( vsl_root_xp , vsl_iter_xp ) |
---|
775 | { |
---|
776 | // get pointers on current vseg |
---|
777 | xptr_t vseg_xp = XLIST_ELEMENT( vsl_iter_xp , vseg_t , xlist ); |
---|
778 | vseg_t * vseg_ptr = GET_PTR( vseg_xp ); |
---|
779 | |
---|
780 | // get current vseg base address |
---|
781 | intptr_t vseg_base = (intptr_t)hal_remote_lpt( XPTR( remote_process_cxy, |
---|
782 | &vseg_ptr->min ) ); |
---|
783 | |
---|
784 | if( vseg_base == base ) // found searched vseg |
---|
785 | { |
---|
786 | // atomically increment responses counter |
---|
787 | hal_atomic_add( &responses , 1 ); |
---|
788 | |
---|
789 | #if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1) |
---|
790 | if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle ) |
---|
791 | printk("\n[%s] thread[%x,%x] register RPC request in cluster %x\n", |
---|
792 | __FUNCTION__, this->process->pid, this->trdid, remote_process_cxy ); |
---|
793 | #endif |
---|
794 | // send RPC to remote cluster |
---|
795 | rpc_send( remote_process_cxy , &rpc ); |
---|
796 | |
---|
797 | // exit loop on vsegs |
---|
798 | break; |
---|
799 | } |
---|
800 | } // end of loop on vsegs |
---|
801 | |
---|
802 | // release lock on remote VSL |
---|
803 | remote_queuelock_release( vsl_lock_xp ); |
---|
804 | |
---|
805 | } // end of loop on process copies |
---|
806 | |
---|
807 | // release the lock protecting process copies |
---|
808 | remote_queuelock_release( process_lock_xp ); |
---|
809 | |
---|
810 | #if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1) |
---|
811 | if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle ) |
---|
812 | printk("\n[%s] thread[%x,%x] deschedule / process %x / base %x\n", |
---|
813 | __FUNCTION__, this->process->pid, this->trdid, process->pid, base ); |
---|
814 | #endif |
---|
815 | |
---|
816 | // client thread deschedule |
---|
817 | sched_yield("blocked on rpc_vmm_delete_vseg"); |
---|
818 | |
---|
819 | // restore IRQs |
---|
820 | hal_restore_irq( save_sr ); |
---|
821 | |
---|
822 | #if DEBUG_VMM_GLOBAL_DELETE_VSEG |
---|
823 | cycle = (uint32_t)hal_get_cycles(); |
---|
824 | if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle ) |
---|
825 | printk("\n[%s] thread[%x,%x] exit / process %x / base %x / cycle %d\n", |
---|
826 | __FUNCTION__, this->process->pid, this->trdid, process->pid, base, cycle ); |
---|
827 | #endif |
---|
828 | |
---|
829 | } // end vmm_global_delete_vseg() |
---|
830 | |
---|
831 | //////////////////////////////////////////////// |
---|
832 | void vmm_global_resize_vseg( process_t * process, |
---|
833 | intptr_t base, |
---|
834 | intptr_t new_base, |
---|
835 | intptr_t new_size ) |
---|
836 | { |
---|
837 | cxy_t owner_cxy; |
---|
838 | lpid_t owner_lpid; |
---|
839 | reg_t save_sr; |
---|
840 | |
---|
841 | xptr_t process_lock_xp; |
---|
842 | xptr_t process_root_xp; |
---|
843 | xptr_t process_iter_xp; |
---|
844 | |
---|
845 | xptr_t remote_process_xp; |
---|
846 | cxy_t remote_process_cxy; |
---|
847 | process_t * remote_process_ptr; |
---|
848 | |
---|
849 | xptr_t vsl_root_xp; |
---|
850 | xptr_t vsl_lock_xp; |
---|
851 | xptr_t vsl_iter_xp; |
---|
852 | |
---|
853 | rpc_desc_t rpc; // shared rpc descriptor for parallel RPCs |
---|
854 | uint32_t responses; // RPC responses counter |
---|
855 | |
---|
856 | thread_t * this = CURRENT_THREAD; |
---|
857 | pid_t pid = process->pid; |
---|
858 | cluster_t * cluster = LOCAL_CLUSTER; |
---|
859 | |
---|
860 | #if DEBUG_VMM_GLOBAL_RESIZE_VSEG |
---|
861 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
862 | #endif |
---|
863 | |
---|
864 | #if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1) |
---|
865 | if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle ) |
---|
866 | printk("\n[%s] thread[%x,%x] : process %x / base %x / new_base %x / new_size %x / cycle %d\n", |
---|
867 | __FUNCTION__, this->process->pid, this->trdid, process->pid, base, new_base, new_size, cycle ); |
---|
868 | #endif |
---|
869 | |
---|
870 | // initialize a shared RPC descriptor |
---|
871 | rpc.rsp = &responses; |
---|
872 | rpc.blocking = false; // non blocking behaviour for rpc_send() |
---|
873 | rpc.index = RPC_VMM_REMOVE_VSEG; |
---|
874 | rpc.thread = this; |
---|
875 | rpc.lid = this->core->lid; |
---|
876 | rpc.args[0] = this->process->pid; |
---|
877 | rpc.args[1] = base; |
---|
878 | rpc.args[2] = new_base; |
---|
879 | rpc.args[3] = new_size; |
---|
880 | |
---|
881 | // get owner process cluster and local index |
---|
882 | owner_cxy = CXY_FROM_PID( pid ); |
---|
883 | owner_lpid = LPID_FROM_PID( pid ); |
---|
884 | |
---|
885 | // get extended pointer on root and lock of process copies xlist in owner cluster |
---|
886 | process_root_xp = XPTR( owner_cxy , &cluster->pmgr.copies_root[owner_lpid] ); |
---|
887 | process_lock_xp = XPTR( owner_cxy , &cluster->pmgr.copies_lock[owner_lpid] ); |
---|
888 | |
---|
889 | // mask IRQs |
---|
890 | hal_disable_irq( &save_sr ); |
---|
891 | |
---|
892 | // client thread blocks itself |
---|
893 | thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC ); |
---|
894 | |
---|
895 | // take the lock protecting process copies |
---|
896 | remote_queuelock_acquire( process_lock_xp ); |
---|
897 | |
---|
898 | // initialize responses counter |
---|
899 | responses = 0; |
---|
900 | |
---|
901 | // loop on process copies |
---|
902 | XLIST_FOREACH( process_root_xp , process_iter_xp ) |
---|
903 | { |
---|
904 | // get cluster and local pointer on remote process |
---|
905 | remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); |
---|
906 | remote_process_ptr = GET_PTR( remote_process_xp ); |
---|
907 | remote_process_cxy = GET_CXY( remote_process_xp ); |
---|
908 | |
---|
909 | // build extended pointers on remote VSL root and lock |
---|
910 | vsl_root_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsegs_root ); |
---|
911 | vsl_lock_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsl_lock ); |
---|
912 | |
---|
913 | // get lock on remote VSL |
---|
914 | remote_queuelock_acquire( vsl_lock_xp ); |
---|
915 | |
---|
916 | // loop on vsegs in remote process VSL |
---|
917 | XLIST_FOREACH( vsl_root_xp , vsl_iter_xp ) |
---|
918 | { |
---|
919 | // get pointers on current vseg |
---|
920 | xptr_t vseg_xp = XLIST_ELEMENT( vsl_iter_xp , vseg_t , xlist ); |
---|
921 | vseg_t * vseg_ptr = GET_PTR( vseg_xp ); |
---|
922 | |
---|
923 | // get current vseg base address |
---|
924 | intptr_t vseg_base = (intptr_t)hal_remote_lpt( XPTR( remote_process_cxy, |
---|
925 | &vseg_ptr->min ) ); |
---|
926 | |
---|
927 | if( vseg_base == base ) // found searched vseg |
---|
928 | { |
---|
929 | // atomically increment responses counter |
---|
930 | hal_atomic_add( &responses , 1 ); |
---|
931 | |
---|
932 | #if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1) |
---|
933 | if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle ) |
---|
934 | printk("\n[%s] thread[%x,%x] register RPC request in cluster %x\n", |
---|
935 | __FUNCTION__, this->process->pid, this->trdid, remote_process_cxy ); |
---|
936 | #endif |
---|
937 | // send RPC to remote cluster |
---|
938 | rpc_send( remote_process_cxy , & rpc ); |
---|
939 | |
---|
940 | // exit loop on vsegs |
---|
941 | break; |
---|
942 | } |
---|
943 | |
---|
944 | } // end of loop on vsegs |
---|
945 | |
---|
946 | #if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1) |
---|
947 | if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle ) |
---|
948 | hal_vmm_display( remote_process_xp , false ); |
---|
949 | #endif |
---|
950 | |
---|
951 | // release lock on remote VSL |
---|
952 | remote_queuelock_release( vsl_lock_xp ); |
---|
953 | |
---|
954 | } // end of loop on process copies |
---|
955 | |
---|
956 | // release the lock protecting process copies |
---|
957 | remote_queuelock_release( process_lock_xp ); |
---|
958 | |
---|
959 | #if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1) |
---|
960 | if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle ) |
---|
961 | printk("\n[%s] thread[%x,%x] deschedule / process %x / base %x\n", |
---|
962 | __FUNCTION__, this->process->pid, this->trdid, process->pid, base ); |
---|
963 | #endif |
---|
964 | |
---|
965 | // client thread deschedule |
---|
966 | sched_yield("blocked on rpc_vmm_delete_vseg"); |
---|
967 | |
---|
968 | // restore IRQs |
---|
969 | hal_restore_irq( save_sr ); |
---|
970 | |
---|
971 | #if DEBUG_VMM_GLOBAL_RESIZE_VSEG |
---|
972 | cycle = (uint32_t)hal_get_cycles(); |
---|
973 | if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle ) |
---|
974 | printk("\n[%s] thread[%x,%x] exit for process %x / base %x / cycle %d\n", |
---|
975 | __FUNCTION__, this->process->pid, this->trdid, process->pid , base, cycle ); |
---|
976 | #endif |
---|
977 | |
---|
978 | } // end vmm_global_resize_vseg() |
---|
979 | |
---|
980 | //////////////////////////////////////////////// |
---|
981 | void vmm_global_update_pte( process_t * process, |
---|
982 | vpn_t vpn, |
---|
983 | uint32_t attr, |
---|
984 | ppn_t ppn ) |
---|
985 | { |
---|
986 | pid_t pid; |
---|
987 | cxy_t owner_cxy; |
---|
988 | lpid_t owner_lpid; |
---|
989 | |
---|
990 | xlist_entry_t * process_root_ptr; |
---|
991 | xptr_t process_root_xp; |
---|
992 | xptr_t process_iter_xp; |
---|
993 | |
---|
994 | xptr_t remote_process_xp; |
---|
995 | cxy_t remote_process_cxy; |
---|
996 | process_t * remote_process_ptr; |
---|
997 | xptr_t remote_gpt_xp; |
---|
998 | |
---|
999 | #if DEBUG_VMM_GLOBAL_UPDATE_PTE |
---|
1000 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1001 | thread_t * this = CURRENT_THREAD; |
---|
1002 | #endif |
---|
1003 | |
---|
1004 | |
---|
1005 | #if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1) |
---|
1006 | if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle ) |
---|
1007 | printk("\n[%s] thread[%x,%x] enter for process %x / vpn %x / attr %x / ppn %x / ycle %d\n", |
---|
1008 | __FUNCTION__, this->process->pid, this->trdid, process->pid, vpn, attr, ppn, cycle ); |
---|
1009 | #endif |
---|
1010 | |
---|
1011 | // get owner process cluster and local index |
---|
1012 | pid = process->pid; |
---|
1013 | owner_cxy = CXY_FROM_PID( pid ); |
---|
1014 | owner_lpid = LPID_FROM_PID( pid ); |
---|
1015 | |
---|
1016 | // get extended pointer on root of process copies xlist in owner cluster |
---|
1017 | process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; |
---|
1018 | process_root_xp = XPTR( owner_cxy , process_root_ptr ); |
---|
1019 | |
---|
1020 | // loop on process copies |
---|
1021 | XLIST_FOREACH( process_root_xp , process_iter_xp ) |
---|
1022 | { |
---|
1023 | // get cluster and local pointer on remote process |
---|
1024 | remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); |
---|
1025 | remote_process_ptr = GET_PTR( remote_process_xp ); |
---|
1026 | remote_process_cxy = GET_CXY( remote_process_xp ); |
---|
1027 | |
---|
1028 | #if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1) |
---|
1029 | if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle ) |
---|
1030 | printk("\n[%s] thread[%x,%x] handling vpn %x for process %x in cluster %x\n", |
---|
1031 | __FUNCTION__, this->process->pid, this->trdid, vpn, process->pid, remote_process_cxy ); |
---|
1032 | #endif |
---|
1033 | |
---|
1034 | // get extended pointer on remote gpt |
---|
1035 | remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt ); |
---|
1036 | |
---|
1037 | // update remote GPT |
---|
1038 | hal_gpt_update_pte( remote_gpt_xp, vpn, attr, ppn ); |
---|
1039 | } |
---|
1040 | |
---|
1041 | #if DEBUG_VMM_GLOBAL_UPDATE_PTE |
---|
1042 | cycle = (uint32_t)hal_get_cycles(); |
---|
1043 | if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle ) |
---|
1044 | printk("\n[%s] thread[%x,%x] exit for process %x / vpn %x / cycle %d\n", |
---|
1045 | __FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle ); |
---|
1046 | #endif |
---|
1047 | |
---|
1048 | #if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1) |
---|
1049 | hal_vmm_display( process , true ); |
---|
1050 | #endif |
---|
1051 | |
---|
1052 | } // end vmm_global_update_pte() |
---|
1053 | |
---|
1054 | /////////////////////////////////////// |
---|
1055 | void vmm_set_cow( process_t * process ) |
---|
1056 | { |
---|
1057 | vmm_t * vmm; |
---|
1058 | |
---|
1059 | xlist_entry_t * process_root_ptr; |
---|
1060 | xptr_t process_root_xp; |
---|
1061 | xptr_t process_iter_xp; |
---|
1062 | |
---|
1063 | xptr_t remote_process_xp; |
---|
1064 | cxy_t remote_process_cxy; |
---|
1065 | process_t * remote_process_ptr; |
---|
1066 | xptr_t remote_gpt_xp; |
---|
1067 | |
---|
1068 | xptr_t vseg_root_xp; |
---|
1069 | xptr_t vseg_iter_xp; |
---|
1070 | |
---|
1071 | xptr_t vseg_xp; |
---|
1072 | vseg_t * vseg; |
---|
1073 | |
---|
1074 | pid_t pid; |
---|
1075 | cxy_t owner_cxy; |
---|
1076 | lpid_t owner_lpid; |
---|
1077 | |
---|
1078 | // get target process PID |
---|
1079 | pid = process->pid; |
---|
1080 | |
---|
1081 | #if DEBUG_VMM_SET_COW |
---|
1082 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1083 | thread_t * this = CURRENT_THREAD; |
---|
1084 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
1085 | printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n", |
---|
1086 | __FUNCTION__, this->process->pid, this->trdid, pid , cycle ); |
---|
1087 | #endif |
---|
1088 | |
---|
1089 | #if (DEBUG_VMM_SET_COW & 1) |
---|
1090 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
1091 | hal_vmm_display( process , true ); |
---|
1092 | #endif |
---|
1093 | |
---|
1094 | // check cluster is reference |
---|
1095 | assert( __FUNCTION__, (XPTR( local_cxy , process ) == process->ref_xp), |
---|
1096 | "local cluster must be process reference cluster\n"); |
---|
1097 | |
---|
1098 | // get pointer on reference VMM |
---|
1099 | vmm = &process->vmm; |
---|
1100 | |
---|
1101 | // get extended pointer on root of process copies xlist in owner cluster |
---|
1102 | owner_cxy = CXY_FROM_PID( pid ); |
---|
1103 | owner_lpid = LPID_FROM_PID( pid ); |
---|
1104 | process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; |
---|
1105 | process_root_xp = XPTR( owner_cxy , process_root_ptr ); |
---|
1106 | |
---|
1107 | // get extended pointer on root of vsegs xlist from reference VMM |
---|
1108 | vseg_root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
1109 | |
---|
1110 | // loop on target process copies |
---|
1111 | XLIST_FOREACH( process_root_xp , process_iter_xp ) |
---|
1112 | { |
---|
1113 | // get cluster and local pointer on remote process copy |
---|
1114 | remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); |
---|
1115 | remote_process_ptr = GET_PTR( remote_process_xp ); |
---|
1116 | remote_process_cxy = GET_CXY( remote_process_xp ); |
---|
1117 | |
---|
1118 | #if (DEBUG_VMM_SET_COW & 1) |
---|
1119 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
1120 | printk("\n[%s] thread[%x,%x] (%x) handles process %x in cluster %x\n", |
---|
1121 | __FUNCTION__, this->process->pid, this->trdid, this, pid, remote_process_cxy ); |
---|
1122 | #endif |
---|
1123 | |
---|
1124 | // get extended pointer on remote gpt |
---|
1125 | remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt ); |
---|
1126 | |
---|
1127 | // loop on vsegs in (local) reference process VSL |
---|
1128 | XLIST_FOREACH( vseg_root_xp , vseg_iter_xp ) |
---|
1129 | { |
---|
1130 | // get pointer on vseg |
---|
1131 | vseg_xp = XLIST_ELEMENT( vseg_iter_xp , vseg_t , xlist ); |
---|
1132 | vseg = GET_PTR( vseg_xp ); |
---|
1133 | |
---|
1134 | // get vseg type, base and size |
---|
1135 | uint32_t type = vseg->type; |
---|
1136 | vpn_t vpn_base = vseg->vpn_base; |
---|
1137 | vpn_t vpn_size = vseg->vpn_size; |
---|
1138 | |
---|
1139 | #if (DEBUG_VMM_SET_COW & 1) |
---|
1140 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
1141 | printk("\n[%s] thread[%x,%x] found vseg %s / vpn_base = %x / vpn_size = %x\n", |
---|
1142 | __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size ); |
---|
1143 | #endif |
---|
1144 | // only DATA, ANON and REMOTE vsegs |
---|
1145 | if( (type == VSEG_TYPE_DATA) || |
---|
1146 | (type == VSEG_TYPE_ANON) || |
---|
1147 | (type == VSEG_TYPE_REMOTE) ) |
---|
1148 | { |
---|
1149 | vpn_t vpn; |
---|
1150 | uint32_t attr; |
---|
1151 | ppn_t ppn; |
---|
1152 | xptr_t page_xp; |
---|
1153 | cxy_t page_cxy; |
---|
1154 | page_t * page_ptr; |
---|
1155 | xptr_t forks_xp; |
---|
1156 | xptr_t lock_xp; |
---|
1157 | |
---|
1158 | // update flags in remote GPT |
---|
1159 | hal_gpt_set_cow( remote_gpt_xp, |
---|
1160 | vpn_base, |
---|
1161 | vpn_size ); |
---|
1162 | |
---|
1163 | // atomically increment pending forks counter in physical pages, |
---|
1164 | // this is only done once, when handling the reference copy |
---|
1165 | if( remote_process_cxy == local_cxy ) |
---|
1166 | { |
---|
1167 | |
---|
1168 | #if (DEBUG_VMM_SET_COW & 1) |
---|
1169 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
1170 | printk("\n[%s] thread[%x,%x] handles vseg %s / vpn_base = %x / vpn_size = %x\n", |
---|
1171 | __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size ); |
---|
1172 | #endif |
---|
1173 | // scan all pages in vseg |
---|
1174 | for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) |
---|
1175 | { |
---|
1176 | // get page attributes and PPN from reference GPT |
---|
1177 | hal_gpt_get_pte( remote_gpt_xp , vpn , &attr , &ppn ); |
---|
1178 | |
---|
1179 | // atomically update pending forks counter if page is mapped |
---|
1180 | if( attr & GPT_MAPPED ) |
---|
1181 | { |
---|
1182 | // get pointers and cluster on page descriptor |
---|
1183 | page_xp = ppm_ppn2page( ppn ); |
---|
1184 | page_cxy = GET_CXY( page_xp ); |
---|
1185 | page_ptr = GET_PTR( page_xp ); |
---|
1186 | |
---|
1187 | // get extended pointers on "forks" and "lock" |
---|
1188 | forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
1189 | lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
1190 | |
---|
1191 | // take lock protecting "forks" counter |
---|
1192 | remote_busylock_acquire( lock_xp ); |
---|
1193 | |
---|
1194 | // increment "forks" |
---|
1195 | hal_remote_atomic_add( forks_xp , 1 ); |
---|
1196 | |
---|
1197 | // release lock protecting "forks" counter |
---|
1198 | remote_busylock_release( lock_xp ); |
---|
1199 | } |
---|
1200 | } // end loop on vpn |
---|
1201 | |
---|
1202 | #if (DEBUG_VMM_SET_COW & 1) |
---|
1203 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
1204 | printk("\n[%s] thread[%x,%x] completes vseg %s / vpn_base = %x / vpn_size = %x\n", |
---|
1205 | __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size ); |
---|
1206 | #endif |
---|
1207 | } // end if local |
---|
1208 | } // end if vseg type |
---|
1209 | } // end loop on vsegs |
---|
1210 | } // end loop on process copies |
---|
1211 | |
---|
1212 | #if DEBUG_VMM_SET_COW |
---|
1213 | cycle = (uint32_t)hal_get_cycles(); |
---|
1214 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
1215 | printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", |
---|
1216 | __FUNCTION__, this->process->pid, this->trdid, process->pid , cycle ); |
---|
1217 | #endif |
---|
1218 | |
---|
1219 | } // end vmm_set-cow() |
---|
1220 | |
---|
1221 | ///////////////////////////////////////////////// |
---|
1222 | error_t vmm_fork_copy( process_t * child_process, |
---|
1223 | xptr_t parent_process_xp ) |
---|
1224 | { |
---|
1225 | error_t error; |
---|
1226 | cxy_t parent_cxy; |
---|
1227 | process_t * parent_process; |
---|
1228 | vmm_t * parent_vmm; |
---|
1229 | xptr_t parent_lock_xp; |
---|
1230 | vmm_t * child_vmm; |
---|
1231 | xptr_t iter_xp; |
---|
1232 | xptr_t parent_vseg_xp; |
---|
1233 | vseg_t * parent_vseg; |
---|
1234 | vseg_t * child_vseg; |
---|
1235 | uint32_t type; |
---|
1236 | vpn_t vpn; |
---|
1237 | vpn_t vpn_base; |
---|
1238 | vpn_t vpn_size; |
---|
1239 | xptr_t parent_root_xp; |
---|
1240 | bool_t mapped; |
---|
1241 | ppn_t ppn; |
---|
1242 | |
---|
1243 | #if DEBUG_VMM_FORK_COPY |
---|
1244 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1245 | thread_t * this = CURRENT_THREAD; |
---|
1246 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
1247 | printk("\n[%s] thread %x enter / cycle %d\n", |
---|
1248 | __FUNCTION__ , this->process->pid, this->trdid, cycle ); |
---|
1249 | #endif |
---|
1250 | |
---|
1251 | // get parent process cluster and local pointer |
---|
1252 | parent_cxy = GET_CXY( parent_process_xp ); |
---|
1253 | parent_process = GET_PTR( parent_process_xp ); |
---|
1254 | |
---|
1255 | // get local pointers on parent and child VMM |
---|
1256 | parent_vmm = &parent_process->vmm; |
---|
1257 | child_vmm = &child_process->vmm; |
---|
1258 | |
---|
1259 | // build extended pointer on parent VSL root and lock |
---|
1260 | parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root ); |
---|
1261 | parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsl_lock ); |
---|
1262 | |
---|
1263 | // take the lock protecting the parent VSL |
---|
1264 | remote_queuelock_acquire( parent_lock_xp ); |
---|
1265 | |
---|
1266 | // loop on parent VSL xlist |
---|
1267 | XLIST_FOREACH( parent_root_xp , iter_xp ) |
---|
1268 | { |
---|
1269 | // get pointers on current parent vseg |
---|
1270 | parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
1271 | parent_vseg = GET_PTR( parent_vseg_xp ); |
---|
1272 | |
---|
1273 | // get vseg type |
---|
1274 | type = hal_remote_l32( XPTR( parent_cxy , &parent_vseg->type ) ); |
---|
1275 | |
---|
1276 | #if DEBUG_VMM_FORK_COPY |
---|
1277 | cycle = (uint32_t)hal_get_cycles(); |
---|
1278 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
1279 | printk("\n[%s] thread[%x,%x] found parent vseg %s / vpn_base = %x / cycle %d\n", |
---|
1280 | __FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type), |
---|
1281 | hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); |
---|
1282 | #endif |
---|
1283 | |
---|
1284 | // all parent vsegs - but STACK and kernel vsegs - must be copied in child VSL |
---|
1285 | if( (type != VSEG_TYPE_STACK) && (type != VSEG_TYPE_KCODE) && |
---|
1286 | (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) ) |
---|
1287 | { |
---|
1288 | // allocate memory for a new child vseg |
---|
1289 | child_vseg = vseg_alloc(); |
---|
1290 | if( child_vseg == NULL ) // release all allocated vsegs |
---|
1291 | { |
---|
1292 | vmm_destroy( child_process ); |
---|
1293 | printk("\n[ERROR] in %s : cannot create vseg for child\n", __FUNCTION__ ); |
---|
1294 | return -1; |
---|
1295 | } |
---|
1296 | |
---|
1297 | // copy parent vseg to child vseg |
---|
1298 | vseg_init_from_ref( child_vseg , parent_vseg_xp ); |
---|
1299 | |
---|
1300 | // build extended pointer on child VSL lock |
---|
1301 | xptr_t child_lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock ); |
---|
1302 | |
---|
1303 | // take the child VSL lock |
---|
1304 | remote_queuelock_acquire( child_lock_xp ); |
---|
1305 | |
---|
1306 | // register child vseg in child VSL |
---|
1307 | vmm_attach_vseg_to_vsl( child_vmm , child_vseg ); |
---|
1308 | |
---|
1309 | // release the child VSL lock |
---|
1310 | remote_queuelock_release( child_lock_xp ); |
---|
1311 | |
---|
1312 | #if DEBUG_VMM_FORK_COPY |
---|
1313 | cycle = (uint32_t)hal_get_cycles(); |
---|
1314 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
1315 | printk("\n[%s] thread[%x,%x] copied vseg %s / vpn_base = %x to child VSL / cycle %d\n", |
---|
1316 | __FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type), |
---|
1317 | hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); |
---|
1318 | #endif |
---|
1319 | // copy DATA, ANON, REMOTE, FILE parent GPT entries to child GPT |
---|
1320 | if( type != VSEG_TYPE_CODE ) |
---|
1321 | { |
---|
1322 | // activate the COW for DATA, ANON, REMOTE vsegs only |
---|
1323 | // cow = ( type != VSEG_TYPE_FILE ); |
---|
1324 | |
---|
1325 | vpn_base = child_vseg->vpn_base; |
---|
1326 | vpn_size = child_vseg->vpn_size; |
---|
1327 | |
---|
1328 | // scan pages in parent vseg |
---|
1329 | for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) |
---|
1330 | { |
---|
1331 | error = hal_gpt_pte_copy( &child_vmm->gpt, |
---|
1332 | vpn, |
---|
1333 | XPTR( parent_cxy , &parent_vmm->gpt ), |
---|
1334 | vpn, |
---|
1335 | false, // does not handle COW flag |
---|
1336 | &ppn, // unused |
---|
1337 | &mapped ); // unused |
---|
1338 | if( error ) |
---|
1339 | { |
---|
1340 | vmm_destroy( child_process ); |
---|
1341 | printk("\n[ERROR] in %s : cannot copy GPT\n", __FUNCTION__ ); |
---|
1342 | return -1; |
---|
1343 | } |
---|
1344 | |
---|
1345 | #if DEBUG_VMM_FORK_COPY |
---|
1346 | cycle = (uint32_t)hal_get_cycles(); |
---|
1347 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
1348 | printk("\n[%s] thread[%x,%x] copied vpn %x to child GPT / cycle %d\n", |
---|
1349 | __FUNCTION__ , this->process->pid, this->trdid , vpn , cycle ); |
---|
1350 | #endif |
---|
1351 | } |
---|
1352 | } // end if no code & no stack |
---|
1353 | } // end if no stack |
---|
1354 | } // end loop on vsegs |
---|
1355 | |
---|
1356 | // release the parent VSL lock in read mode |
---|
1357 | remote_queuelock_release( parent_lock_xp ); |
---|
1358 | |
---|
1359 | /* deprecated [AG] : this is already done by the vmm_user_init() funcfion |
---|
1360 | |
---|
1361 | // initialize the child VMM STACK allocator |
---|
1362 | vmm_stack_init( child_vmm ); |
---|
1363 | |
---|
1364 | // initialize the child VMM MMAP allocator |
---|
1365 | vmm_mmap_init( child_vmm ); |
---|
1366 | |
---|
1367 | // initialize instrumentation counters |
---|
1368 | child_vmm->false_pgfault_nr = 0; |
---|
1369 | child_vmm->local_pgfault_nr = 0; |
---|
1370 | child_vmm->global_pgfault_nr = 0; |
---|
1371 | child_vmm->false_pgfault_cost = 0; |
---|
1372 | child_vmm->local_pgfault_cost = 0; |
---|
1373 | child_vmm->global_pgfault_cost = 0; |
---|
1374 | */ |
---|
1375 | // copy base addresses from parent VMM to child VMM |
---|
1376 | child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base)); |
---|
1377 | child_vmm->envs_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->envs_vpn_base)); |
---|
1378 | child_vmm->heap_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->heap_vpn_base)); |
---|
1379 | child_vmm->code_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->code_vpn_base)); |
---|
1380 | child_vmm->data_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->data_vpn_base)); |
---|
1381 | |
---|
1382 | child_vmm->entry_point = (intptr_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->entry_point)); |
---|
1383 | |
---|
1384 | hal_fence(); |
---|
1385 | |
---|
1386 | #if DEBUG_VMM_FORK_COPY |
---|
1387 | cycle = (uint32_t)hal_get_cycles(); |
---|
1388 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
1389 | printk("\n[%s] thread[%x,%x] exit successfully / cycle %d\n", |
---|
1390 | __FUNCTION__ , this->process->pid, this->trdid , cycle ); |
---|
1391 | #endif |
---|
1392 | |
---|
1393 | return 0; |
---|
1394 | |
---|
1395 | } // vmm_fork_copy() |
---|
1396 | |
---|
1397 | /////////////////////////////////////// |
---|
1398 | void vmm_destroy( process_t * process ) |
---|
1399 | { |
---|
1400 | xptr_t vseg_xp; |
---|
1401 | vseg_t * vseg; |
---|
1402 | |
---|
1403 | #if DEBUG_VMM_DESTROY |
---|
1404 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1405 | thread_t * this = CURRENT_THREAD; |
---|
1406 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
1407 | printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", |
---|
1408 | __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle ); |
---|
1409 | #endif |
---|
1410 | |
---|
1411 | #if (DEBUG_VMM_DESTROY & 1 ) |
---|
1412 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
1413 | hal_vmm_display( XPTR( local_cxy, process ) , true ); |
---|
1414 | #endif |
---|
1415 | |
---|
1416 | // get pointer on local VMM |
---|
1417 | vmm_t * vmm = &process->vmm; |
---|
1418 | |
---|
1419 | // build extended pointer on VSL root, VSL lock and GPT lock |
---|
1420 | xptr_t vsl_root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
1421 | xptr_t vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); |
---|
1422 | |
---|
1423 | // take the VSL lock |
---|
1424 | remote_queuelock_acquire( vsl_lock_xp ); |
---|
1425 | |
---|
1426 | // scan the VSL to delete all registered vsegs |
---|
1427 | // (we don't use a FOREACH in case of item deletion) |
---|
1428 | xptr_t iter_xp; |
---|
1429 | xptr_t next_xp; |
---|
1430 | for( iter_xp = hal_remote_l64( vsl_root_xp ) ; |
---|
1431 | iter_xp != vsl_root_xp ; |
---|
1432 | iter_xp = next_xp ) |
---|
1433 | { |
---|
1434 | // save extended pointer on next item in xlist |
---|
1435 | next_xp = hal_remote_l64( iter_xp ); |
---|
1436 | |
---|
1437 | // get pointers on current vseg in VSL |
---|
1438 | vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
1439 | vseg = GET_PTR( vseg_xp ); |
---|
1440 | |
---|
1441 | // delete vseg and release physical pages |
---|
1442 | vmm_remove_vseg( process , vseg ); |
---|
1443 | |
---|
1444 | #if( DEBUG_VMM_DESTROY & 1 ) |
---|
1445 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
1446 | printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n", |
---|
1447 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
1448 | #endif |
---|
1449 | |
---|
1450 | } |
---|
1451 | |
---|
1452 | // release the VSL lock |
---|
1453 | remote_queuelock_release( vsl_lock_xp ); |
---|
1454 | |
---|
1455 | // remove all registered MMAP vsegs from free_lists in MMAP allocator |
---|
1456 | uint32_t i; |
---|
1457 | for( i = 0 ; i <= CONFIG_VMM_HEAP_MAX_ORDER ; i++ ) |
---|
1458 | { |
---|
1459 | // build extended pointer on free list root |
---|
1460 | xptr_t root_xp = XPTR( local_cxy , &vmm->mmap_mgr.free_list_root[i] ); |
---|
1461 | |
---|
1462 | // scan zombi_list[i] |
---|
1463 | while( !xlist_is_empty( root_xp ) ) |
---|
1464 | { |
---|
1465 | vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); |
---|
1466 | vseg = GET_PTR( vseg_xp ); |
---|
1467 | |
---|
1468 | #if( DEBUG_VMM_DESTROY & 1 ) |
---|
1469 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
1470 | printk("\n[%s] found zombi vseg / vpn_base %x / vpn_size %d\n", |
---|
1471 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
1472 | #endif |
---|
1473 | // clean vseg descriptor |
---|
1474 | vseg->vmm = NULL; |
---|
1475 | |
---|
1476 | // remove vseg from zombi_list |
---|
1477 | xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); |
---|
1478 | |
---|
1479 | // release vseg descriptor |
---|
1480 | vseg_free( vseg ); |
---|
1481 | |
---|
1482 | #if( DEBUG_VMM_DESTROY & 1 ) |
---|
1483 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
1484 | printk("\n[%s] zombi vseg released / vpn_base %x / vpn_size %d\n", |
---|
1485 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
1486 | #endif |
---|
1487 | } |
---|
1488 | } |
---|
1489 | |
---|
1490 | // release memory allocated to the GPT itself |
---|
1491 | hal_gpt_destroy( &vmm->gpt ); |
---|
1492 | |
---|
1493 | #if DEBUG_VMM_DESTROY |
---|
1494 | cycle = (uint32_t)hal_get_cycles(); |
---|
1495 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
1496 | printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", |
---|
1497 | __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle ); |
---|
1498 | #endif |
---|
1499 | |
---|
1500 | } // end vmm_destroy() |
---|
1501 | |
---|
1502 | ///////////////////////////////////////////////// |
---|
1503 | vseg_t * vmm_check_conflict( process_t * process, |
---|
1504 | vpn_t vpn_base, |
---|
1505 | vpn_t vpn_size ) |
---|
1506 | { |
---|
1507 | vmm_t * vmm = &process->vmm; |
---|
1508 | |
---|
1509 | // scan the VSL |
---|
1510 | vseg_t * vseg; |
---|
1511 | xptr_t iter_xp; |
---|
1512 | xptr_t vseg_xp; |
---|
1513 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
1514 | |
---|
1515 | XLIST_FOREACH( root_xp , iter_xp ) |
---|
1516 | { |
---|
1517 | vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
1518 | vseg = GET_PTR( vseg_xp ); |
---|
1519 | |
---|
1520 | if( ((vpn_base + vpn_size) > vseg->vpn_base) && |
---|
1521 | (vpn_base < (vseg->vpn_base + vseg->vpn_size)) ) return vseg; |
---|
1522 | } |
---|
1523 | return NULL; |
---|
1524 | |
---|
1525 | } // end vmm_check_conflict() |
---|
1526 | |
---|
1527 | //////////////////////////////////////////////// |
---|
1528 | vseg_t * vmm_create_vseg( process_t * process, |
---|
1529 | vseg_type_t type, |
---|
1530 | intptr_t base, // ltid for VSEG_TYPE_STACK |
---|
1531 | uint32_t size, |
---|
1532 | uint32_t file_offset, |
---|
1533 | uint32_t file_size, |
---|
1534 | xptr_t mapper_xp, |
---|
1535 | cxy_t cxy ) |
---|
1536 | { |
---|
1537 | vseg_t * vseg; // pointer on allocated vseg descriptor |
---|
1538 | |
---|
1539 | #if DEBUG_VMM_CREATE_VSEG |
---|
1540 | thread_t * this = CURRENT_THREAD; |
---|
1541 | uint32_t cycle; |
---|
1542 | #endif |
---|
1543 | |
---|
1544 | #if (DEBUG_VMM_CREATE_VSEG & 1) |
---|
1545 | cycle = (uint32_t)hal_get_cycles(); |
---|
1546 | if( DEBUG_VMM_CREATE_VSEG < cycle ) |
---|
1547 | printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cxy %x / cycle %d\n", |
---|
1548 | __FUNCTION__, this->process->pid, this->trdid, |
---|
1549 | process->pid, vseg_type_str(type), base, cxy, cycle ); |
---|
1550 | #endif |
---|
1551 | |
---|
1552 | // get pointer on VMM |
---|
1553 | vmm_t * vmm = &process->vmm; |
---|
1554 | |
---|
1555 | // allocate a vseg descriptor and initialize it, depending on type |
---|
1556 | // we use specific allocators for "stack" and "mmap" types |
---|
1557 | |
---|
1558 | ///////////////////////////// |
---|
1559 | if( type == VSEG_TYPE_STACK ) |
---|
1560 | { |
---|
1561 | // get vseg from STACK allocator |
---|
1562 | vseg = vmm_stack_alloc( vmm , base ); // base == ltid |
---|
1563 | |
---|
1564 | if( vseg == NULL ) |
---|
1565 | { |
---|
1566 | printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", |
---|
1567 | __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); |
---|
1568 | return NULL; |
---|
1569 | } |
---|
1570 | |
---|
1571 | // initialize vseg |
---|
1572 | vseg->type = type; |
---|
1573 | vseg->vmm = vmm; |
---|
1574 | vseg->min = vseg->vpn_base << CONFIG_PPM_PAGE_SHIFT; |
---|
1575 | vseg->max = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_SHIFT); |
---|
1576 | vseg->cxy = cxy; |
---|
1577 | |
---|
1578 | vseg_init_flags( vseg , type ); |
---|
1579 | } |
---|
1580 | ///////////////////////////////// |
---|
1581 | else if( type == VSEG_TYPE_FILE ) |
---|
1582 | { |
---|
1583 | // compute page index (in mapper) for first and last byte |
---|
1584 | vpn_t vpn_min = file_offset >> CONFIG_PPM_PAGE_SHIFT; |
---|
1585 | vpn_t vpn_max = (file_offset + size - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
1586 | |
---|
1587 | // compute offset in first page and number of pages |
---|
1588 | uint32_t offset = file_offset & CONFIG_PPM_PAGE_MASK; |
---|
1589 | vpn_t npages = vpn_max - vpn_min + 1; |
---|
1590 | |
---|
1591 | // get vseg from MMAP allocator |
---|
1592 | vseg = vmm_mmap_alloc( vmm , npages ); |
---|
1593 | |
---|
1594 | if( vseg == NULL ) |
---|
1595 | { |
---|
1596 | printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", |
---|
1597 | __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); |
---|
1598 | return NULL; |
---|
1599 | } |
---|
1600 | |
---|
1601 | // initialize vseg |
---|
1602 | vseg->type = type; |
---|
1603 | vseg->vmm = vmm; |
---|
1604 | vseg->min = (vseg->vpn_base << CONFIG_PPM_PAGE_SHIFT) + offset; |
---|
1605 | vseg->max = vseg->min + size; |
---|
1606 | vseg->file_offset = file_offset; |
---|
1607 | vseg->file_size = file_size; |
---|
1608 | vseg->mapper_xp = mapper_xp; |
---|
1609 | vseg->cxy = cxy; |
---|
1610 | |
---|
1611 | vseg_init_flags( vseg , type ); |
---|
1612 | } |
---|
1613 | ///////////////////////////////////////////////////////////////// |
---|
1614 | else if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_REMOTE) ) |
---|
1615 | { |
---|
1616 | // compute number of required pages in virtual space |
---|
1617 | vpn_t npages = size >> CONFIG_PPM_PAGE_SHIFT; |
---|
1618 | if( size & CONFIG_PPM_PAGE_MASK) npages++; |
---|
1619 | |
---|
1620 | // allocate vseg from MMAP allocator |
---|
1621 | vseg = vmm_mmap_alloc( vmm , npages ); |
---|
1622 | |
---|
1623 | if( vseg == NULL ) |
---|
1624 | { |
---|
1625 | printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", |
---|
1626 | __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); |
---|
1627 | return NULL; |
---|
1628 | } |
---|
1629 | |
---|
1630 | // initialize vseg |
---|
1631 | vseg->type = type; |
---|
1632 | vseg->vmm = vmm; |
---|
1633 | vseg->min = vseg->vpn_base << CONFIG_PPM_PAGE_SHIFT; |
---|
1634 | vseg->max = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_SHIFT); |
---|
1635 | vseg->cxy = cxy; |
---|
1636 | |
---|
1637 | vseg_init_flags( vseg , type ); |
---|
1638 | } |
---|
1639 | ///////////////////////////////////////////////////////////////// |
---|
1640 | else // VSEG_TYPE_DATA, VSEG_TYPE_CODE or KERNEL vseg |
---|
1641 | { |
---|
1642 | uint32_t vpn_min = base >> CONFIG_PPM_PAGE_SHIFT; |
---|
1643 | uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
1644 | |
---|
1645 | // allocate vseg descriptor |
---|
1646 | vseg = vseg_alloc(); |
---|
1647 | |
---|
1648 | if( vseg == NULL ) |
---|
1649 | { |
---|
1650 | printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", |
---|
1651 | __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); |
---|
1652 | return NULL; |
---|
1653 | } |
---|
1654 | // initialize vseg |
---|
1655 | vseg->type = type; |
---|
1656 | vseg->vmm = vmm; |
---|
1657 | vseg->min = base; |
---|
1658 | vseg->max = base + size; |
---|
1659 | vseg->vpn_base = base >> CONFIG_PPM_PAGE_SHIFT; |
---|
1660 | vseg->vpn_size = vpn_max - vpn_min + 1; |
---|
1661 | vseg->file_offset = file_offset; |
---|
1662 | vseg->file_size = file_size; |
---|
1663 | vseg->mapper_xp = mapper_xp; |
---|
1664 | vseg->cxy = cxy; |
---|
1665 | |
---|
1666 | vseg_init_flags( vseg , type ); |
---|
1667 | } |
---|
1668 | |
---|
1669 | // check collisions |
---|
1670 | vseg_t * existing_vseg = vmm_check_conflict( process , vseg->vpn_base , vseg->vpn_size ); |
---|
1671 | |
---|
1672 | if( existing_vseg != NULL ) |
---|
1673 | { |
---|
1674 | printk("\n[ERROR] in %s for process %x : new vseg %s [vpn_base %x / vpn_size %x]\n" |
---|
1675 | " overlap existing vseg %s [vpn_base %x / vpn_size %x]\n", |
---|
1676 | __FUNCTION__ , process->pid, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size, |
---|
1677 | vseg_type_str(existing_vseg->type), existing_vseg->vpn_base, existing_vseg->vpn_size ); |
---|
1678 | vseg_free( vseg ); |
---|
1679 | return NULL; |
---|
1680 | } |
---|
1681 | |
---|
1682 | // build extended pointer on VSL lock |
---|
1683 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); |
---|
1684 | |
---|
1685 | // take the VSL lock in write mode |
---|
1686 | remote_queuelock_acquire( lock_xp ); |
---|
1687 | |
---|
1688 | // attach vseg to VSL |
---|
1689 | vmm_attach_vseg_to_vsl( vmm , vseg ); |
---|
1690 | |
---|
1691 | // release the VSL lock |
---|
1692 | remote_queuelock_release( lock_xp ); |
---|
1693 | |
---|
1694 | #if DEBUG_VMM_CREATE_VSEG |
---|
1695 | cycle = (uint32_t)hal_get_cycles(); |
---|
1696 | if( DEBUG_VMM_CREATE_VSEG < cycle ) |
---|
1697 | printk("\n[%s] thread[%x,%x] exit / %s / vpn_base %x / vpn_size %x / cycle %d\n", |
---|
1698 | __FUNCTION__, this->process->pid, this->trdid, |
---|
1699 | vseg_type_str(type), vseg->vpn_base, vseg->vpn_size, cycle ); |
---|
1700 | #endif |
---|
1701 | |
---|
1702 | return vseg; |
---|
1703 | |
---|
1704 | } // vmm_create_vseg() |
---|
1705 | |
---|
1706 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
1707 | // This static function is called by the vmm_remove_vseg() and vmm_resize_vseg() functions |
---|
1708 | // to update the physical page descriptor identified by the <ppn> argument. |
---|
1709 | // It decrements the refcount, set the dirty bit when required, and releases the physical |
---|
1710 | // page to kmem depending on the vseg type. |
---|
1711 | // - KERNEL : refcount decremented / not released to kmem / dirty bit not set |
---|
1712 | // - FILE : refcount decremented / not released to kmem / dirty bit set when required. |
---|
1713 | // - CODE : refcount decremented / released to kmem / dirty bit not set. |
---|
1714 | // - STAK : refcount decremented / released to kmem / dirty bit not set. |
---|
1715 | // - DATA : refcount decremented / released to kmem if ref / dirty bit not set. |
---|
1716 | // - MMAP : refcount decremented / released to kmem if ref / dirty bit not set. |
---|
1717 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
1718 | // @ process : local pointer on process. |
---|
1719 | // @ vseg : local pointer on vseg. |
---|
1720 | // @ ppn : released pysical page index. |
---|
1721 | // @ dirty : set the dirty bit in page descriptor when non zero. |
---|
1722 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
1723 | static void vmm_ppn_release( process_t * process, |
---|
1724 | vseg_t * vseg, |
---|
1725 | ppn_t ppn, |
---|
1726 | uint32_t dirty ) |
---|
1727 | { |
---|
1728 | bool_t do_kmem_release; |
---|
1729 | |
---|
1730 | // get vseg type |
---|
1731 | vseg_type_t type = vseg->type; |
---|
1732 | |
---|
1733 | // compute is_ref <=> this vseg is the reference vseg |
---|
1734 | bool_t is_ref = (GET_CXY( process->ref_xp ) == local_cxy); |
---|
1735 | |
---|
1736 | // get pointers on physical page descriptor |
---|
1737 | xptr_t page_xp = ppm_ppn2page( ppn ); |
---|
1738 | cxy_t page_cxy = GET_CXY( page_xp ); |
---|
1739 | page_t * page_ptr = GET_PTR( page_xp ); |
---|
1740 | |
---|
1741 | // decrement page refcount |
---|
1742 | xptr_t count_xp = XPTR( page_cxy , &page_ptr->refcount ); |
---|
1743 | hal_remote_atomic_add( count_xp , -1 ); |
---|
1744 | |
---|
1745 | // compute the do_kmem_release condition depending on vseg type |
---|
1746 | if( (type == VSEG_TYPE_KCODE) || |
---|
1747 | (type == VSEG_TYPE_KDATA) || |
---|
1748 | (type == VSEG_TYPE_KDEV) ) |
---|
1749 | { |
---|
1750 | // no physical page release for KERNEL |
---|
1751 | do_kmem_release = false; |
---|
1752 | } |
---|
1753 | else if( type == VSEG_TYPE_FILE ) |
---|
1754 | { |
---|
1755 | // no physical page release for KERNEL |
---|
1756 | do_kmem_release = false; |
---|
1757 | |
---|
1758 | // set dirty bit if required |
---|
1759 | if( dirty ) ppm_page_do_dirty( page_xp ); |
---|
1760 | } |
---|
1761 | else if( (type == VSEG_TYPE_CODE) || |
---|
1762 | (type == VSEG_TYPE_STACK) ) |
---|
1763 | { |
---|
1764 | // always release physical page for private vsegs |
---|
1765 | do_kmem_release = true; |
---|
1766 | } |
---|
1767 | else if( (type == VSEG_TYPE_ANON) || |
---|
1768 | (type == VSEG_TYPE_REMOTE) ) |
---|
1769 | { |
---|
1770 | // release physical page if reference cluster |
---|
1771 | do_kmem_release = is_ref; |
---|
1772 | } |
---|
1773 | else if( is_ref ) // vseg_type == DATA in reference cluster |
---|
1774 | { |
---|
1775 | // get extended pointers on forks and lock field in page descriptor |
---|
1776 | xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
1777 | xptr_t lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
1778 | |
---|
1779 | // take lock protecting "forks" counter |
---|
1780 | remote_busylock_acquire( lock_xp ); |
---|
1781 | |
---|
1782 | // get number of pending forks from page descriptor |
---|
1783 | uint32_t forks = hal_remote_l32( forks_xp ); |
---|
1784 | |
---|
1785 | // decrement pending forks counter if required |
---|
1786 | if( forks ) hal_remote_atomic_add( forks_xp , -1 ); |
---|
1787 | |
---|
1788 | // release lock protecting "forks" counter |
---|
1789 | remote_busylock_release( lock_xp ); |
---|
1790 | |
---|
1791 | // release physical page if forks == 0 |
---|
1792 | do_kmem_release = (forks == 0); |
---|
1793 | } |
---|
1794 | else // vseg_type == DATA not in reference cluster |
---|
1795 | { |
---|
1796 | // no physical page release if not in reference cluster |
---|
1797 | do_kmem_release = false; |
---|
1798 | } |
---|
1799 | |
---|
1800 | // release physical page to relevant kmem when required |
---|
1801 | if( do_kmem_release ) |
---|
1802 | { |
---|
1803 | kmem_req_t req; |
---|
1804 | req.type = KMEM_PPM; |
---|
1805 | req.ptr = GET_PTR( ppm_ppn2base( ppn ) ); |
---|
1806 | |
---|
1807 | kmem_remote_free( page_cxy , &req ); |
---|
1808 | |
---|
1809 | #if DEBUG_VMM_PPN_RELEASE |
---|
1810 | thread_t * this = CURRENT_THREAD; |
---|
1811 | if( DEBUG_VMM_PPN_RELEASE < cycle ) |
---|
1812 | printk("\n[%s] thread[%x,%x] released ppn %x to kmem\n", |
---|
1813 | __FUNCTION__, this->process->pid, this->trdid, ppn ); |
---|
1814 | #endif |
---|
1815 | |
---|
1816 | } |
---|
1817 | } // end vmm_ppn_release() |
---|
1818 | |
---|
1819 | ////////////////////////////////////////// |
---|
1820 | void vmm_remove_vseg( process_t * process, |
---|
1821 | vseg_t * vseg ) |
---|
1822 | { |
---|
1823 | uint32_t vseg_type; // vseg type |
---|
1824 | vpn_t vpn; // VPN of current PTE |
---|
1825 | vpn_t vpn_min; // VPN of first PTE |
---|
1826 | vpn_t vpn_max; // VPN of last PTE (excluded) |
---|
1827 | ppn_t ppn; // current PTE ppn value |
---|
1828 | uint32_t attr; // current PTE attributes |
---|
1829 | |
---|
1830 | // check arguments |
---|
1831 | assert( __FUNCTION__, (process != NULL), "process argument is NULL" ); |
---|
1832 | assert( __FUNCTION__, (vseg != NULL), "vseg argument is NULL" ); |
---|
1833 | |
---|
1834 | // get pointers on local process VMM |
---|
1835 | vmm_t * vmm = &process->vmm; |
---|
1836 | |
---|
1837 | // build extended pointer on GPT |
---|
1838 | xptr_t gpt_xp = XPTR( local_cxy , &vmm->gpt ); |
---|
1839 | |
---|
1840 | // get relevant vseg infos |
---|
1841 | vseg_type = vseg->type; |
---|
1842 | vpn_min = vseg->vpn_base; |
---|
1843 | vpn_max = vpn_min + vseg->vpn_size; |
---|
1844 | |
---|
1845 | #if DEBUG_VMM_REMOVE_VSEG |
---|
1846 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1847 | thread_t * this = CURRENT_THREAD; |
---|
1848 | #endif |
---|
1849 | |
---|
1850 | #if (DEBUG_VMM_REMOVE_VSEG & 1 ) |
---|
1851 | if( DEBUG_VMM_REMOVE_VSEG < cycle ) |
---|
1852 | printk("\n[%s] thread[%x,%x] enters / process %x / type %s / base %x / cycle %d\n", |
---|
1853 | __FUNCTION__, this->process->pid, this->trdid, |
---|
1854 | process->pid, vseg_type_str(vseg->type), vseg->min, cycle ); |
---|
1855 | #endif |
---|
1856 | |
---|
1857 | // loop on PTEs in GPT to unmap all mapped PTE |
---|
1858 | for( vpn = vpn_min ; vpn < vpn_max ; vpn++ ) |
---|
1859 | { |
---|
1860 | // get ppn and attr |
---|
1861 | hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn ); |
---|
1862 | |
---|
1863 | if( attr & GPT_MAPPED ) // PTE is mapped |
---|
1864 | { |
---|
1865 | |
---|
1866 | #if( DEBUG_VMM_REMOVE_VSEG & 1 ) |
---|
1867 | if( DEBUG_VMM_REMOVE_VSEG < cycle ) |
---|
1868 | printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / type %s\n", |
---|
1869 | __FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) ); |
---|
1870 | #endif |
---|
1871 | // unmap GPT entry in local GPT |
---|
1872 | hal_gpt_reset_pte( gpt_xp , vpn ); |
---|
1873 | |
---|
1874 | // release physical page depending on vseg type |
---|
1875 | vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY ); |
---|
1876 | } |
---|
1877 | } |
---|
1878 | |
---|
1879 | // remove vseg from VSL |
---|
1880 | vmm_detach_vseg_from_vsl( vmm , vseg ); |
---|
1881 | |
---|
1882 | // release vseg descriptor depending on vseg type |
---|
1883 | if( vseg_type == VSEG_TYPE_STACK ) |
---|
1884 | { |
---|
1885 | // release slot to local stack allocator |
---|
1886 | vmm_stack_free( vmm , vseg ); |
---|
1887 | } |
---|
1888 | else if( (vseg_type == VSEG_TYPE_ANON) || |
---|
1889 | (vseg_type == VSEG_TYPE_FILE) || |
---|
1890 | (vseg_type == VSEG_TYPE_REMOTE) ) |
---|
1891 | { |
---|
1892 | // release vseg to local mmap allocator |
---|
1893 | vmm_mmap_free( vmm , vseg ); |
---|
1894 | } |
---|
1895 | else |
---|
1896 | { |
---|
1897 | // release vseg descriptor to local kmem |
---|
1898 | vseg_free( vseg ); |
---|
1899 | } |
---|
1900 | |
---|
1901 | #if DEBUG_VMM_REMOVE_VSEG |
---|
1902 | cycle = (uint32_t)hal_get_cycles(); |
---|
1903 | if( DEBUG_VMM_REMOVE_VSEG < cycle ) |
---|
1904 | printk("\n[%s] thread[%x,%x] exit / process %x / type %s / base %x / cycle %d\n", |
---|
1905 | __FUNCTION__, this->process->pid, this->trdid, |
---|
1906 | process->pid, vseg_type_str(vseg->type), vseg->min, cycle ); |
---|
1907 | #endif |
---|
1908 | |
---|
1909 | } // end vmm_remove_vseg() |
---|
1910 | |
---|
1911 | ///////////////////////////////////////////// |
---|
1912 | void vmm_resize_vseg( process_t * process, |
---|
1913 | vseg_t * vseg, |
---|
1914 | intptr_t new_base, |
---|
1915 | intptr_t new_size ) |
---|
1916 | { |
---|
1917 | vpn_t vpn; |
---|
1918 | ppn_t ppn; |
---|
1919 | uint32_t attr; |
---|
1920 | |
---|
1921 | // check arguments |
---|
1922 | assert( __FUNCTION__, (process != NULL), "process argument is NULL" ); |
---|
1923 | assert( __FUNCTION__, (vseg != NULL), "vseg argument is NULL" ); |
---|
1924 | |
---|
1925 | #if DEBUG_VMM_RESIZE_VSEG |
---|
1926 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1927 | thread_t * this = CURRENT_THREAD; |
---|
1928 | #endif |
---|
1929 | |
---|
1930 | #if (DEBUG_VMM_RESIZE_VSEG & 1) |
---|
1931 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1932 | printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n", |
---|
1933 | __FUNCTION__, this->process->pid, this->trdid, |
---|
1934 | process->pid, vseg_type_str(vseg->type), old_base, cycle ); |
---|
1935 | #endif |
---|
1936 | |
---|
1937 | // get existing vseg vpn_min and vpn_max |
---|
1938 | vpn_t old_vpn_min = vseg->vpn_base; |
---|
1939 | vpn_t old_vpn_max = old_vpn_min + vseg->vpn_size - 1; |
---|
1940 | |
---|
1941 | // compute new vseg vpn_min & vpn_max |
---|
1942 | intptr_t min = new_base; |
---|
1943 | intptr_t max = new_base + new_size; |
---|
1944 | vpn_t new_vpn_min = min >> CONFIG_PPM_PAGE_SHIFT; |
---|
1945 | vpn_t new_vpn_max = (max - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
1946 | |
---|
1947 | // build extended pointer on GPT |
---|
1948 | xptr_t gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); |
---|
1949 | |
---|
1950 | // loop on PTEs in GPT to unmap PTE if (old_vpn_min <= vpn < new_vpn_min) |
---|
1951 | for( vpn = old_vpn_min ; vpn < new_vpn_min ; vpn++ ) |
---|
1952 | { |
---|
1953 | // get ppn and attr |
---|
1954 | hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn ); |
---|
1955 | |
---|
1956 | if( attr & GPT_MAPPED ) // PTE is mapped |
---|
1957 | { |
---|
1958 | |
---|
1959 | #if( DEBUG_VMM_RESIZE_VSEG & 1 ) |
---|
1960 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1961 | printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s", |
---|
1962 | __FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) ); |
---|
1963 | #endif |
---|
1964 | // unmap GPT entry |
---|
1965 | hal_gpt_reset_pte( gpt_xp , vpn ); |
---|
1966 | |
---|
1967 | // release physical page when required |
---|
1968 | vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY ); |
---|
1969 | } |
---|
1970 | } |
---|
1971 | |
---|
1972 | // loop on PTEs in GPT to unmap PTE if (new vpn_max <= vpn < old_vpn_max) |
---|
1973 | for( vpn = new_vpn_max ; vpn < old_vpn_max ; vpn++ ) |
---|
1974 | { |
---|
1975 | // get ppn and attr |
---|
1976 | hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn ); |
---|
1977 | |
---|
1978 | if( attr & GPT_MAPPED ) // PTE is mapped |
---|
1979 | { |
---|
1980 | |
---|
1981 | #if( DEBUG_VMM_RESIZE_VSEG & 1 ) |
---|
1982 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1983 | printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s", |
---|
1984 | __FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) ); |
---|
1985 | #endif |
---|
1986 | // unmap GPT entry in local GPT |
---|
1987 | hal_gpt_reset_pte( gpt_xp , vpn ); |
---|
1988 | |
---|
1989 | // release physical page when required |
---|
1990 | vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY ); |
---|
1991 | } |
---|
1992 | } |
---|
1993 | |
---|
1994 | // resize vseg in VSL |
---|
1995 | vseg->min = min; |
---|
1996 | vseg->max = max; |
---|
1997 | vseg->vpn_base = new_vpn_min; |
---|
1998 | vseg->vpn_size = new_vpn_max - new_vpn_min + 1; |
---|
1999 | |
---|
2000 | #if DEBUG_VMM_RESIZE_VSEG |
---|
2001 | cycle = (uint32_t)hal_get_cycles(); |
---|
2002 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
2003 | printk("[%s] thread[%x,%x] exit / process %x / %s / base %x / cycle %d\n", |
---|
2004 | __FUNCTION__, this->process->pid, this->trdid, |
---|
2005 | process->pid, vseg_type_str(vseg->type), vseg->min, cycle ); |
---|
2006 | #endif |
---|
2007 | |
---|
2008 | } // end vmm_resize_vseg |
---|
2009 | |
---|
2010 | ///////////////////////////////////////////////////////////////////////////////////////////// |
---|
2011 | // This static function is called twice by the vmm_get_vseg() function. |
---|
2012 | // It scan the - possibly remote - VSL defined by the <vmm_xp> argument to find the vseg |
---|
2013 | // containing a given virtual address <vaddr>. It uses remote accesses to access the remote |
---|
2014 | // VSL if required. The VSL lock protecting the VSL must be taken by the caller. |
---|
2015 | ///////////////////////////////////////////////////////////////////////////////////////////// |
---|
2016 | // @ vmm_xp : extended pointer on the process VMM. |
---|
2017 | // @ vaddr : virtual address. |
---|
2018 | // @ return local pointer on remote vseg if success / return NULL if not found. |
---|
2019 | ///////////////////////////////////////////////////////////////////////////////////////////// |
---|
2020 | static vseg_t * vmm_vseg_from_vaddr( xptr_t vmm_xp, |
---|
2021 | intptr_t vaddr ) |
---|
2022 | { |
---|
2023 | xptr_t iter_xp; |
---|
2024 | xptr_t vseg_xp; |
---|
2025 | vseg_t * vseg; |
---|
2026 | intptr_t min; |
---|
2027 | intptr_t max; |
---|
2028 | |
---|
2029 | // get cluster and local pointer on target VMM |
---|
2030 | vmm_t * vmm_ptr = GET_PTR( vmm_xp ); |
---|
2031 | cxy_t vmm_cxy = GET_CXY( vmm_xp ); |
---|
2032 | |
---|
2033 | // build extended pointer on VSL root |
---|
2034 | xptr_t root_xp = XPTR( vmm_cxy , &vmm_ptr->vsegs_root ); |
---|
2035 | |
---|
2036 | // scan the list of vsegs in VSL |
---|
2037 | XLIST_FOREACH( root_xp , iter_xp ) |
---|
2038 | { |
---|
2039 | vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
2040 | vseg = GET_PTR( vseg_xp ); |
---|
2041 | |
---|
2042 | min = hal_remote_l32( XPTR( vmm_cxy , &vseg->min ) ); |
---|
2043 | max = hal_remote_l32( XPTR( vmm_cxy , &vseg->max ) ); |
---|
2044 | |
---|
2045 | // return success when match |
---|
2046 | if( (vaddr >= min) && (vaddr < max) ) return vseg; |
---|
2047 | } |
---|
2048 | |
---|
2049 | // return failure |
---|
2050 | return NULL; |
---|
2051 | |
---|
2052 | } // end vmm_vseg_from_vaddr() |
---|
2053 | |
---|
2054 | /////////////////////////////////////////// |
---|
2055 | error_t vmm_get_vseg( process_t * process, |
---|
2056 | intptr_t vaddr, |
---|
2057 | vseg_t ** found_vseg ) |
---|
2058 | { |
---|
2059 | xptr_t loc_lock_xp; // extended pointer on local VSL lock |
---|
2060 | xptr_t ref_lock_xp; // extended pointer on reference VSL lock |
---|
2061 | vseg_t * loc_vseg; // local pointer on local vseg |
---|
2062 | vseg_t * ref_vseg; // local pointer on reference vseg |
---|
2063 | |
---|
2064 | // build extended pointer on local VSL lock |
---|
2065 | loc_lock_xp = XPTR( local_cxy , &process->vmm.vsl_lock ); |
---|
2066 | |
---|
2067 | // get local VSL lock |
---|
2068 | remote_queuelock_acquire( loc_lock_xp ); |
---|
2069 | |
---|
2070 | // try to get vseg from local VSL |
---|
2071 | loc_vseg = vmm_vseg_from_vaddr( XPTR( local_cxy, &process->vmm ) , vaddr ); |
---|
2072 | |
---|
2073 | if (loc_vseg == NULL) // vseg not found => access reference VSL |
---|
2074 | { |
---|
2075 | // get extended pointer on reference process |
---|
2076 | xptr_t ref_xp = process->ref_xp; |
---|
2077 | |
---|
2078 | // get cluster and local pointer on reference process |
---|
2079 | cxy_t ref_cxy = GET_CXY( ref_xp ); |
---|
2080 | process_t * ref_ptr = GET_PTR( ref_xp ); |
---|
2081 | |
---|
2082 | if( ref_cxy == local_cxy ) // local is ref => return error |
---|
2083 | { |
---|
2084 | printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n", |
---|
2085 | __FUNCTION__, vaddr, process->pid ); |
---|
2086 | |
---|
2087 | // release local VSL lock |
---|
2088 | remote_queuelock_release( loc_lock_xp ); |
---|
2089 | |
---|
2090 | return -1; |
---|
2091 | } |
---|
2092 | else // ref != local => access ref VSL |
---|
2093 | { |
---|
2094 | // build extended pointer on reference VSL lock |
---|
2095 | ref_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.vsl_lock ); |
---|
2096 | |
---|
2097 | // get reference VSL lock |
---|
2098 | remote_queuelock_acquire( ref_lock_xp ); |
---|
2099 | |
---|
2100 | // try to get vseg from reference VSL |
---|
2101 | ref_vseg = vmm_vseg_from_vaddr( XPTR( ref_cxy , &ref_ptr->vmm ) , vaddr ); |
---|
2102 | |
---|
2103 | if( ref_vseg == NULL ) // vseg not found => return error |
---|
2104 | { |
---|
2105 | // release both VSL locks |
---|
2106 | remote_queuelock_release( loc_lock_xp ); |
---|
2107 | remote_queuelock_release( ref_lock_xp ); |
---|
2108 | |
---|
2109 | printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n", |
---|
2110 | __FUNCTION__, vaddr, process->pid ); |
---|
2111 | |
---|
2112 | return -1; |
---|
2113 | } |
---|
2114 | else // vseg found => try to update local VSL |
---|
2115 | { |
---|
2116 | // allocate a local vseg descriptor |
---|
2117 | loc_vseg = vseg_alloc(); |
---|
2118 | |
---|
2119 | if( loc_vseg == NULL ) // no memory => return error |
---|
2120 | { |
---|
2121 | printk("\n[ERROR] in %s : vaddr %x in process %x / no memory\n", |
---|
2122 | __FUNCTION__, vaddr, process->pid ); |
---|
2123 | |
---|
2124 | // release both VSL locks |
---|
2125 | remote_queuelock_release( ref_lock_xp ); |
---|
2126 | remote_queuelock_release( loc_lock_xp ); |
---|
2127 | |
---|
2128 | return -1; |
---|
2129 | } |
---|
2130 | else // update local VSL and return success |
---|
2131 | { |
---|
2132 | // initialize local vseg |
---|
2133 | vseg_init_from_ref( loc_vseg , XPTR( ref_cxy , ref_vseg ) ); |
---|
2134 | |
---|
2135 | // register local vseg in local VSL |
---|
2136 | vmm_attach_vseg_to_vsl( &process->vmm , loc_vseg ); |
---|
2137 | |
---|
2138 | // release both VSL locks |
---|
2139 | remote_queuelock_release( ref_lock_xp ); |
---|
2140 | remote_queuelock_release( loc_lock_xp ); |
---|
2141 | |
---|
2142 | *found_vseg = loc_vseg; |
---|
2143 | return 0; |
---|
2144 | } |
---|
2145 | } |
---|
2146 | } |
---|
2147 | } |
---|
2148 | else // vseg found in local VSL => return success |
---|
2149 | { |
---|
2150 | // release local VSL lock |
---|
2151 | remote_queuelock_release( loc_lock_xp ); |
---|
2152 | |
---|
2153 | *found_vseg = loc_vseg; |
---|
2154 | return 0; |
---|
2155 | } |
---|
2156 | } // end vmm_get_vseg() |
---|
2157 | |
---|
2158 | ////////////////////////////////////////////////////////////////////////////////////// |
---|
2159 | // This static function compute the target cluster to allocate a physical page |
---|
2160 | // for a given <vpn> in a given <vseg>, allocates the page and returns an extended |
---|
2161 | // pointer on the allocated page descriptor. |
---|
2162 | // The vseg cannot have the FILE type. |
---|
2163 | ////////////////////////////////////////////////////////////////////////////////////// |
---|
2164 | // @ vseg : local pointer on vseg. |
---|
2165 | // @ vpn : unmapped vpn. |
---|
2166 | // @ return an extended pointer on the allocated page descriptor. |
---|
2167 | ////////////////////////////////////////////////////////////////////////////////////// |
---|
2168 | static xptr_t vmm_page_allocate( vseg_t * vseg, |
---|
2169 | vpn_t vpn ) |
---|
2170 | { |
---|
2171 | |
---|
2172 | #if DEBUG_VMM_PAGE_ALLOCATE |
---|
2173 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
2174 | thread_t * this = CURRENT_THREAD; |
---|
2175 | if( DEBUG_VMM_PAGE_ALLOCATE < cycle ) |
---|
2176 | printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", |
---|
2177 | __FUNCTION__ , this->process->pid, this->trdid, vpn, cycle ); |
---|
2178 | #endif |
---|
2179 | |
---|
2180 | xptr_t page_xp; |
---|
2181 | cxy_t page_cxy; |
---|
2182 | uint32_t index; |
---|
2183 | |
---|
2184 | uint32_t type = vseg->type; |
---|
2185 | uint32_t flags = vseg->flags; |
---|
2186 | uint32_t x_size = LOCAL_CLUSTER->x_size; |
---|
2187 | uint32_t y_size = LOCAL_CLUSTER->y_size; |
---|
2188 | |
---|
2189 | // check vseg type |
---|
2190 | assert( __FUNCTION__, ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" ); |
---|
2191 | |
---|
2192 | // compute target cluster identifier |
---|
2193 | if( flags & VSEG_DISTRIB ) // distributed => cxy depends on vpn LSB |
---|
2194 | { |
---|
2195 | index = vpn & ((x_size * y_size) - 1); |
---|
2196 | page_cxy = HAL_CXY_FROM_XY( (index / y_size) , (index % y_size) ); |
---|
2197 | |
---|
2198 | // If the cluster selected from VPN's LSBs is empty, we select one randomly |
---|
2199 | if ( cluster_is_active( page_cxy ) == false ) |
---|
2200 | { |
---|
2201 | page_cxy = cluster_random_select(); |
---|
2202 | } |
---|
2203 | } |
---|
2204 | else // other cases => cxy specified in vseg |
---|
2205 | { |
---|
2206 | page_cxy = vseg->cxy; |
---|
2207 | } |
---|
2208 | |
---|
2209 | // allocate one small physical page from target cluster |
---|
2210 | kmem_req_t req; |
---|
2211 | req.type = KMEM_PPM; |
---|
2212 | req.order = 0; |
---|
2213 | req.flags = AF_ZERO; |
---|
2214 | |
---|
2215 | // get local pointer on page base |
---|
2216 | void * ptr = kmem_remote_alloc( page_cxy , &req ); |
---|
2217 | |
---|
2218 | // get extended pointer on page descriptor |
---|
2219 | page_xp = ppm_base2page( XPTR( page_cxy , ptr ) ); |
---|
2220 | |
---|
2221 | #if DEBUG_VMM_PAGE_ALLOCATE |
---|
2222 | cycle = (uint32_t)hal_get_cycles(); |
---|
2223 | if( DEBUG_VMM_PAGE_ALLOCATE < cycle ) |
---|
2224 | printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n", |
---|
2225 | __FUNCTION__ , this->process->pid, this->trdid, vpn, ppm_page2ppn(page_xp), cycle ); |
---|
2226 | #endif |
---|
2227 | |
---|
2228 | return page_xp; |
---|
2229 | |
---|
2230 | } // end vmm_page_allocate() |
---|
2231 | |
---|
2232 | //////////////////////////////////////// |
---|
2233 | error_t vmm_get_one_ppn( vseg_t * vseg, |
---|
2234 | vpn_t vpn, |
---|
2235 | ppn_t * ppn ) |
---|
2236 | { |
---|
2237 | error_t error; |
---|
2238 | xptr_t page_xp; // extended pointer on physical page descriptor |
---|
2239 | uint32_t page_id; // missing page index in vseg mapper |
---|
2240 | uint32_t type; // vseg type; |
---|
2241 | |
---|
2242 | type = vseg->type; |
---|
2243 | page_id = vpn - vseg->vpn_base; |
---|
2244 | |
---|
2245 | #if DEBUG_VMM_GET_ONE_PPN |
---|
2246 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
2247 | thread_t * this = CURRENT_THREAD; |
---|
2248 | if( DEBUG_VMM_GET_ONE_PPN < cycle ) |
---|
2249 | printk("\n[%s] thread[%x,%x] enter for vpn %x / vseg %s / page_id %d / cycle %d\n", |
---|
2250 | __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle ); |
---|
2251 | #endif |
---|
2252 | |
---|
2253 | #if (DEBUG_VMM_GET_ONE_PPN & 2) |
---|
2254 | if( DEBUG_VMM_GET_ONE_PPN < cycle ) |
---|
2255 | hal_vmm_display( XPTR( local_cxy , this->process ) , true ); |
---|
2256 | #endif |
---|
2257 | |
---|
2258 | // FILE type : get the physical page from the file mapper |
---|
2259 | if( type == VSEG_TYPE_FILE ) |
---|
2260 | { |
---|
2261 | // get extended pointer on mapper |
---|
2262 | xptr_t mapper_xp = vseg->mapper_xp; |
---|
2263 | |
---|
2264 | assert( __FUNCTION__, (mapper_xp != XPTR_NULL), |
---|
2265 | "mapper not defined for a FILE vseg\n" ); |
---|
2266 | |
---|
2267 | // get extended pointer on page descriptor |
---|
2268 | page_xp = mapper_get_page( mapper_xp , page_id ); |
---|
2269 | |
---|
2270 | if ( page_xp == XPTR_NULL ) return EINVAL; |
---|
2271 | } |
---|
2272 | |
---|
2273 | // Other types : allocate a physical page from target cluster, |
---|
2274 | // as defined by vseg type and vpn value |
---|
2275 | else |
---|
2276 | { |
---|
2277 | // allocate one physical page |
---|
2278 | page_xp = vmm_page_allocate( vseg , vpn ); |
---|
2279 | |
---|
2280 | if( page_xp == XPTR_NULL ) return -1; |
---|
2281 | |
---|
2282 | // initialise missing page from .elf file mapper for DATA and CODE types |
---|
2283 | // the vseg->mapper_xp field is an extended pointer on the .elf file mapper |
---|
2284 | if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) ) |
---|
2285 | { |
---|
2286 | // get extended pointer on mapper |
---|
2287 | xptr_t mapper_xp = vseg->mapper_xp; |
---|
2288 | |
---|
2289 | assert( __FUNCTION__, (mapper_xp != XPTR_NULL), |
---|
2290 | "mapper not defined for a CODE or DATA vseg\n" ); |
---|
2291 | |
---|
2292 | // compute missing page offset in vseg |
---|
2293 | uint32_t offset = page_id << CONFIG_PPM_PAGE_SHIFT; |
---|
2294 | |
---|
2295 | // compute missing page offset in .elf file |
---|
2296 | uint32_t elf_offset = vseg->file_offset + offset; |
---|
2297 | |
---|
2298 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
2299 | if( DEBUG_VMM_GET_ONE_PPN < cycle ) |
---|
2300 | printk("\n[%s] thread[%x,%x] for vpn = %x / elf_offset = %x\n", |
---|
2301 | __FUNCTION__, this->process->pid, this->trdid, vpn, elf_offset ); |
---|
2302 | #endif |
---|
2303 | // compute extended pointer on page base |
---|
2304 | xptr_t base_xp = ppm_page2base( page_xp ); |
---|
2305 | |
---|
2306 | // file_size (in .elf mapper) can be smaller than vseg_size (BSS) |
---|
2307 | uint32_t file_size = vseg->file_size; |
---|
2308 | |
---|
2309 | if( file_size < offset ) // missing page fully in BSS |
---|
2310 | { |
---|
2311 | |
---|
2312 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
2313 | if( DEBUG_VMM_GET_ONE_PPN < cycle ) |
---|
2314 | printk("\n[%s] thread[%x,%x] for vpn %x / fully in BSS\n", |
---|
2315 | __FUNCTION__, this->process->pid, this->trdid, vpn ); |
---|
2316 | #endif |
---|
2317 | if( GET_CXY( page_xp ) == local_cxy ) |
---|
2318 | { |
---|
2319 | memset( GET_PTR( base_xp ) , 0 , CONFIG_PPM_PAGE_SIZE ); |
---|
2320 | } |
---|
2321 | else |
---|
2322 | { |
---|
2323 | hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE ); |
---|
2324 | } |
---|
2325 | } |
---|
2326 | else if( file_size >= (offset + CONFIG_PPM_PAGE_SIZE) ) // fully in mapper |
---|
2327 | { |
---|
2328 | |
---|
2329 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
2330 | if( DEBUG_VMM_GET_ONE_PPN < cycle ) |
---|
2331 | printk("\n[%s] thread[%x,%x] for vpn %x / fully in mapper\n", |
---|
2332 | __FUNCTION__, this->process->pid, this->trdid, vpn ); |
---|
2333 | #endif |
---|
2334 | error = mapper_move_kernel( mapper_xp, |
---|
2335 | true, // to_buffer |
---|
2336 | elf_offset, |
---|
2337 | base_xp, |
---|
2338 | CONFIG_PPM_PAGE_SIZE ); |
---|
2339 | if( error ) return EINVAL; |
---|
2340 | } |
---|
2341 | else // both in mapper and in BSS : |
---|
2342 | // - (file_size - offset) bytes from mapper |
---|
2343 | // - (page_size + offset - file_size) bytes from BSS |
---|
2344 | { |
---|
2345 | |
---|
2346 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
2347 | if( DEBUG_VMM_GET_ONE_PPN < cycle ) |
---|
2348 | printk("\n[%s] thread[%x,%x] for vpn %x / both mapper & BSS\n" |
---|
2349 | " %d bytes from mapper / %d bytes from BSS\n", |
---|
2350 | __FUNCTION__, this->process->pid, this->trdid, vpn, |
---|
2351 | file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size ); |
---|
2352 | #endif |
---|
2353 | // initialize mapper part |
---|
2354 | error = mapper_move_kernel( mapper_xp, |
---|
2355 | true, // to buffer |
---|
2356 | elf_offset, |
---|
2357 | base_xp, |
---|
2358 | file_size - offset ); |
---|
2359 | if( error ) return EINVAL; |
---|
2360 | |
---|
2361 | // initialize BSS part |
---|
2362 | if( GET_CXY( page_xp ) == local_cxy ) |
---|
2363 | { |
---|
2364 | memset( GET_PTR( base_xp ) + file_size - offset , 0 , |
---|
2365 | offset + CONFIG_PPM_PAGE_SIZE - file_size ); |
---|
2366 | } |
---|
2367 | else |
---|
2368 | { |
---|
2369 | hal_remote_memset( base_xp + file_size - offset , 0 , |
---|
2370 | offset + CONFIG_PPM_PAGE_SIZE - file_size ); |
---|
2371 | } |
---|
2372 | } |
---|
2373 | |
---|
2374 | } // end if CODE or DATA types |
---|
2375 | } |
---|
2376 | |
---|
2377 | // return ppn |
---|
2378 | *ppn = ppm_page2ppn( page_xp ); |
---|
2379 | |
---|
2380 | #if DEBUG_VMM_GET_ONE_PPN |
---|
2381 | if( DEBUG_VMM_GET_ONE_PPN < cycle ) |
---|
2382 | printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n", |
---|
2383 | __FUNCTION__ , this->process->pid, this->trdid , vpn , *ppn, cycle ); |
---|
2384 | #endif |
---|
2385 | |
---|
2386 | #if (DEBUG_VMM_GET_ONE_PPN & 2) |
---|
2387 | if( DEBUG_VMM_GET_ONE_PPN < cycle ) |
---|
2388 | hal_vmm_display( XPTR( local_cxy , this->process ) , true ); |
---|
2389 | #endif |
---|
2390 | |
---|
2391 | return 0; |
---|
2392 | |
---|
2393 | } // end vmm_get_one_ppn() |
---|
2394 | |
---|
2395 | /////////////////////////////////////////////////// |
---|
2396 | error_t vmm_handle_page_fault( process_t * process, |
---|
2397 | vpn_t vpn ) |
---|
2398 | { |
---|
2399 | vseg_t * vseg; // vseg containing vpn |
---|
2400 | uint32_t attr; // PTE_ATTR value |
---|
2401 | ppn_t ppn; // PTE_PPN value |
---|
2402 | uint32_t ref_attr; // PTE_ATTR value in reference GPT |
---|
2403 | ppn_t ref_ppn; // PTE_PPN value in reference GPT |
---|
2404 | cxy_t ref_cxy; // reference cluster for missing vpn |
---|
2405 | process_t * ref_ptr; // reference process for missing vpn |
---|
2406 | xptr_t local_gpt_xp; // extended pointer on local GPT |
---|
2407 | xptr_t ref_gpt_xp; // extended pointer on reference GPT |
---|
2408 | error_t error; // value returned by called functions |
---|
2409 | |
---|
2410 | thread_t * this = CURRENT_THREAD; |
---|
2411 | |
---|
2412 | #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) |
---|
2413 | uint32_t start_cycle = (uint32_t)hal_get_cycles(); |
---|
2414 | #endif |
---|
2415 | |
---|
2416 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
2417 | if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) & (vpn > 0) ) |
---|
2418 | printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", |
---|
2419 | __FUNCTION__, this->process->pid, this->trdid, vpn, start_cycle ); |
---|
2420 | #endif |
---|
2421 | |
---|
2422 | #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2) |
---|
2423 | if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) |
---|
2424 | hal_vmm_display( XPTR( local_cxy , this->process ) , true ); |
---|
2425 | #endif |
---|
2426 | |
---|
2427 | // get local vseg (access to reference VSL can be required) |
---|
2428 | error = vmm_get_vseg( process, |
---|
2429 | (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT, |
---|
2430 | &vseg ); |
---|
2431 | if( error ) |
---|
2432 | { |
---|
2433 | printk("\n[ERROR] in %s : vpn %x in thread[%x,%x] not in registered vseg\n", |
---|
2434 | __FUNCTION__ , vpn , process->pid, this->trdid ); |
---|
2435 | |
---|
2436 | return EXCP_USER_ERROR; |
---|
2437 | } |
---|
2438 | |
---|
2439 | #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) |
---|
2440 | if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) |
---|
2441 | printk("\n[%s] thread[%x,%x] found vseg %s\n", |
---|
2442 | __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) ); |
---|
2443 | #endif |
---|
2444 | |
---|
2445 | // build extended pointer on local GPT |
---|
2446 | local_gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); |
---|
2447 | |
---|
2448 | // lock PTE in local GPT and get current PPN and attributes |
---|
2449 | error = hal_gpt_lock_pte( local_gpt_xp, |
---|
2450 | vpn, |
---|
2451 | &attr, |
---|
2452 | &ppn ); |
---|
2453 | if( error ) |
---|
2454 | { |
---|
2455 | printk("\n[PANIC] in %s : cannot lock PTE in local GPT / vpn %x / process %x\n", |
---|
2456 | __FUNCTION__ , vpn , process->pid ); |
---|
2457 | |
---|
2458 | return EXCP_KERNEL_PANIC; |
---|
2459 | } |
---|
2460 | |
---|
2461 | #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) |
---|
2462 | if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) |
---|
2463 | printk("\n[%s] thread[%x,%x] locked vpn %x in cluster %x\n", |
---|
2464 | __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy ); |
---|
2465 | #endif |
---|
2466 | |
---|
2467 | // handle page fault only if local PTE still unmapped after lock |
---|
2468 | if( (attr & GPT_MAPPED) == 0 ) |
---|
2469 | { |
---|
2470 | // get reference process cluster and local pointer |
---|
2471 | ref_cxy = GET_CXY( process->ref_xp ); |
---|
2472 | ref_ptr = GET_PTR( process->ref_xp ); |
---|
2473 | |
---|
2474 | /////////////// private vseg or (local == reference) |
---|
2475 | /////////////// => access only the local GPT |
---|
2476 | if( (vseg->type == VSEG_TYPE_STACK) || |
---|
2477 | (vseg->type == VSEG_TYPE_CODE) || |
---|
2478 | (ref_cxy == local_cxy ) ) |
---|
2479 | { |
---|
2480 | |
---|
2481 | #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) |
---|
2482 | if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) |
---|
2483 | printk("\n[%s] thread[%x,%x] access local gpt : cxy %x / ref_cxy %x / type %s / cycle %d\n", |
---|
2484 | __FUNCTION__, this->process->pid, this->trdid, |
---|
2485 | local_cxy, ref_cxy, vseg_type_str(vseg->type), (uint32_t)hal_get_cycles() ); |
---|
2486 | #endif |
---|
2487 | // allocate and initialise a physical page |
---|
2488 | error = vmm_get_one_ppn( vseg , vpn , &ppn ); |
---|
2489 | |
---|
2490 | if( error ) |
---|
2491 | { |
---|
2492 | printk("\n[ERROR] in %s : no physical page / process = %x / vpn = %x\n", |
---|
2493 | __FUNCTION__ , process->pid , vpn ); |
---|
2494 | |
---|
2495 | // unlock PTE in local GPT |
---|
2496 | hal_gpt_unlock_pte( local_gpt_xp , vpn ); |
---|
2497 | |
---|
2498 | return EXCP_KERNEL_PANIC; |
---|
2499 | } |
---|
2500 | |
---|
2501 | // define attr from vseg flags |
---|
2502 | attr = GPT_MAPPED | GPT_SMALL | GPT_READABLE; |
---|
2503 | if( vseg->flags & VSEG_USER ) attr |= GPT_USER; |
---|
2504 | if( vseg->flags & VSEG_WRITE ) attr |= GPT_WRITABLE; |
---|
2505 | if( vseg->flags & VSEG_EXEC ) attr |= GPT_EXECUTABLE; |
---|
2506 | if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE; |
---|
2507 | |
---|
2508 | // set PTE to local GPT |
---|
2509 | // it unlocks this PTE |
---|
2510 | hal_gpt_set_pte( local_gpt_xp, |
---|
2511 | vpn, |
---|
2512 | attr, |
---|
2513 | ppn ); |
---|
2514 | |
---|
2515 | #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) |
---|
2516 | uint32_t end_cycle = (uint32_t)hal_get_cycles(); |
---|
2517 | #endif |
---|
2518 | |
---|
2519 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
2520 | if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) |
---|
2521 | printk("\n[%s] thread[%x,%x] handled local pgfault / ppn %x / attr %x / cycle %d\n", |
---|
2522 | __FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle ); |
---|
2523 | #endif |
---|
2524 | |
---|
2525 | #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2) |
---|
2526 | if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) |
---|
2527 | hal_vmm_display( XPTR( local_cxy , this->process ) , true ); |
---|
2528 | #endif |
---|
2529 | |
---|
2530 | #if CONFIG_INSTRUMENTATION_PGFAULTS |
---|
2531 | uint32_t cost = end_cycle - start_cycle; |
---|
2532 | this->info.local_pgfault_nr++; |
---|
2533 | this->info.local_pgfault_cost += cost; |
---|
2534 | if( cost > this->info.local_pgfault_max ) this->info.local_pgfault_max = cost; |
---|
2535 | #endif |
---|
2536 | return EXCP_NON_FATAL; |
---|
2537 | |
---|
2538 | } // end local GPT access |
---|
2539 | |
---|
2540 | /////////////////// public vseg and (local != reference) |
---|
2541 | /////////////////// => access ref GPT to update local GPT |
---|
2542 | else |
---|
2543 | { |
---|
2544 | |
---|
2545 | #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) |
---|
2546 | if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) |
---|
2547 | printk("\n[%s] thread[%x,%x] access ref gpt : cxy %x / ref_cxy %x / type %s / cycle %d\n", |
---|
2548 | __FUNCTION__, this->process->pid, this->trdid, |
---|
2549 | local_cxy, ref_cxy, vseg_type_str(vseg->type), (uint32_t)hal_get_cycles() ); |
---|
2550 | #endif |
---|
2551 | // build extended pointer on reference GPT |
---|
2552 | ref_gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt ); |
---|
2553 | |
---|
2554 | // lock PTE in reference GPT and get current PPN and attributes |
---|
2555 | error = hal_gpt_lock_pte( ref_gpt_xp, |
---|
2556 | vpn, |
---|
2557 | &ref_attr, |
---|
2558 | &ref_ppn ); |
---|
2559 | if( error ) |
---|
2560 | { |
---|
2561 | printk("\n[PANIC] in %s : cannot lock PTE in ref GPT / vpn %x / process %x\n", |
---|
2562 | __FUNCTION__ , vpn , process->pid ); |
---|
2563 | |
---|
2564 | // unlock PTE in local GPT |
---|
2565 | hal_gpt_unlock_pte( local_gpt_xp , vpn ); |
---|
2566 | |
---|
2567 | return EXCP_KERNEL_PANIC; |
---|
2568 | } |
---|
2569 | |
---|
2570 | #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) |
---|
2571 | if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) |
---|
2572 | printk("\n[%s] thread[%x,%x] get pte from ref gpt / attr %x / ppn %x\n", |
---|
2573 | __FUNCTION__, this->process->pid, this->trdid, ref_attr, ref_ppn ); |
---|
2574 | #endif |
---|
2575 | |
---|
2576 | if( ref_attr & GPT_MAPPED ) // false page fault |
---|
2577 | { |
---|
2578 | // update local GPT from reference GPT values |
---|
2579 | // this unlocks the PTE in local GPT |
---|
2580 | hal_gpt_set_pte( local_gpt_xp, |
---|
2581 | vpn, |
---|
2582 | ref_attr, |
---|
2583 | ref_ppn ); |
---|
2584 | |
---|
2585 | #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) |
---|
2586 | if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) |
---|
2587 | printk("\n[%s] thread[%x,%x] updated local gpt for a false pgfault\n", |
---|
2588 | __FUNCTION__, this->process->pid, this->trdid ); |
---|
2589 | #endif |
---|
2590 | |
---|
2591 | // unlock the PTE in reference GPT |
---|
2592 | hal_gpt_unlock_pte( ref_gpt_xp, vpn ); |
---|
2593 | |
---|
2594 | #if (DEBUG_VMM_HANDLE_PAGE_FAULT &1) |
---|
2595 | if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) |
---|
2596 | printk("\n[%s] thread[%x,%x] unlock the ref gpt after a false pgfault\n", |
---|
2597 | __FUNCTION__, this->process->pid, this->trdid ); |
---|
2598 | #endif |
---|
2599 | |
---|
2600 | #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) |
---|
2601 | uint32_t end_cycle = (uint32_t)hal_get_cycles(); |
---|
2602 | #endif |
---|
2603 | |
---|
2604 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
2605 | if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) |
---|
2606 | printk("\n[%s] thread[%x,%x] handled false pgfault / ppn %x / attr %x / cycle %d\n", |
---|
2607 | __FUNCTION__, this->process->pid, this->trdid, ref_ppn, ref_attr, end_cycle ); |
---|
2608 | #endif |
---|
2609 | |
---|
2610 | #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2) |
---|
2611 | if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) |
---|
2612 | hal_vmm_display( XPTR( local_cxy , this->process ) , true ); |
---|
2613 | #endif |
---|
2614 | |
---|
2615 | #if CONFIG_INSTRUMENTATION_PGFAULTS |
---|
2616 | uint32_t cost = end_cycle - start_cycle; |
---|
2617 | this->info.false_pgfault_nr++; |
---|
2618 | this->info.false_pgfault_cost += cost; |
---|
2619 | if( cost > this->info.false_pgfault_max ) this->info.false_pgfault_max = cost; |
---|
2620 | #endif |
---|
2621 | return EXCP_NON_FATAL; |
---|
2622 | } |
---|
2623 | else // true page fault |
---|
2624 | { |
---|
2625 | // allocate and initialise a physical page depending on the vseg type |
---|
2626 | error = vmm_get_one_ppn( vseg , vpn , &ppn ); |
---|
2627 | |
---|
2628 | if( error ) |
---|
2629 | { |
---|
2630 | printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", |
---|
2631 | __FUNCTION__ , process->pid , vpn ); |
---|
2632 | |
---|
2633 | // unlock PTE in local GPT and in reference GPT |
---|
2634 | hal_gpt_unlock_pte( local_gpt_xp , vpn ); |
---|
2635 | hal_gpt_unlock_pte( ref_gpt_xp , vpn ); |
---|
2636 | |
---|
2637 | return EXCP_KERNEL_PANIC; |
---|
2638 | } |
---|
2639 | |
---|
2640 | // define attr from vseg flags |
---|
2641 | attr = GPT_MAPPED | GPT_SMALL | GPT_READABLE; |
---|
2642 | if( vseg->flags & VSEG_USER ) attr |= GPT_USER; |
---|
2643 | if( vseg->flags & VSEG_WRITE ) attr |= GPT_WRITABLE; |
---|
2644 | if( vseg->flags & VSEG_EXEC ) attr |= GPT_EXECUTABLE; |
---|
2645 | if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE; |
---|
2646 | |
---|
2647 | #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) |
---|
2648 | if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) |
---|
2649 | printk("\n[%s] thread[%x,%x] build a new PTE for a true pgfault\n", |
---|
2650 | __FUNCTION__, this->process->pid, this->trdid ); |
---|
2651 | #endif |
---|
2652 | // set PTE in reference GPT |
---|
2653 | // this unlock the PTE |
---|
2654 | hal_gpt_set_pte( ref_gpt_xp, |
---|
2655 | vpn, |
---|
2656 | attr, |
---|
2657 | ppn ); |
---|
2658 | |
---|
2659 | #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) |
---|
2660 | if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) |
---|
2661 | printk("\n[%s] thread[%x,%x] set new PTE in ref gpt for a true page fault\n", |
---|
2662 | __FUNCTION__, this->process->pid, this->trdid ); |
---|
2663 | #endif |
---|
2664 | |
---|
2665 | // set PTE in local GPT |
---|
2666 | // this unlock the PTE |
---|
2667 | hal_gpt_set_pte( local_gpt_xp, |
---|
2668 | vpn, |
---|
2669 | attr, |
---|
2670 | ppn ); |
---|
2671 | |
---|
2672 | #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) |
---|
2673 | uint32_t end_cycle = (uint32_t)hal_get_cycles(); |
---|
2674 | #endif |
---|
2675 | |
---|
2676 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
2677 | if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) |
---|
2678 | printk("\n[%s] thread[%x,%x] handled global pgfault / ppn %x / attr %x / cycle %d\n", |
---|
2679 | __FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle ); |
---|
2680 | #endif |
---|
2681 | |
---|
2682 | #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2) |
---|
2683 | if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) |
---|
2684 | hal_vmm_display( XPTR( local_cxy , this->process ) , true ); |
---|
2685 | #endif |
---|
2686 | |
---|
2687 | #if CONFIG_INSTRUMENTATION_PGFAULTS |
---|
2688 | uint32_t cost = end_cycle - start_cycle; |
---|
2689 | this->info.global_pgfault_nr++; |
---|
2690 | this->info.global_pgfault_cost += cost; |
---|
2691 | if( cost > this->info.global_pgfault_max ) this->info.global_pgfault_max = cost; |
---|
2692 | #endif |
---|
2693 | return EXCP_NON_FATAL; |
---|
2694 | } |
---|
2695 | } |
---|
2696 | } |
---|
2697 | else // page has been locally mapped by another concurrent thread |
---|
2698 | { |
---|
2699 | // unlock the PTE in local GPT |
---|
2700 | hal_gpt_unlock_pte( local_gpt_xp , vpn ); |
---|
2701 | |
---|
2702 | #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) |
---|
2703 | uint32_t end_cycle = (uint32_t)hal_get_cycles(); |
---|
2704 | #endif |
---|
2705 | |
---|
2706 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
2707 | if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) |
---|
2708 | printk("\n[%s] handled by another thread / vpn %x / ppn %x / attr %x / cycle %d\n", |
---|
2709 | __FUNCTION__, vpn, ppn, attr, end_cycle ); |
---|
2710 | #endif |
---|
2711 | |
---|
2712 | #if CONFIG_INSTRUMENTATION_PGFAULTS |
---|
2713 | uint32_t cost = end_cycle - start_cycle; |
---|
2714 | this->info.false_pgfault_nr++; |
---|
2715 | this->info.false_pgfault_cost += cost; |
---|
2716 | if( cost > this->info.false_pgfault_max ) this->info.false_pgfault_max = cost; |
---|
2717 | #endif |
---|
2718 | return EXCP_NON_FATAL; |
---|
2719 | } |
---|
2720 | |
---|
2721 | } // end vmm_handle_page_fault() |
---|
2722 | |
---|
2723 | //////////////////////////////////////////// |
---|
2724 | error_t vmm_handle_cow( process_t * process, |
---|
2725 | vpn_t vpn ) |
---|
2726 | { |
---|
2727 | vseg_t * vseg; // vseg containing vpn |
---|
2728 | xptr_t gpt_xp; // extended pointer on GPT (local or reference) |
---|
2729 | gpt_t * gpt_ptr; // local pointer on GPT (local or reference) |
---|
2730 | cxy_t gpt_cxy; // GPT cluster identifier |
---|
2731 | uint32_t old_attr; // current PTE_ATTR value |
---|
2732 | ppn_t old_ppn; // current PTE_PPN value |
---|
2733 | uint32_t new_attr; // new PTE_ATTR value |
---|
2734 | ppn_t new_ppn; // new PTE_PPN value |
---|
2735 | cxy_t ref_cxy; // reference process cluster |
---|
2736 | process_t * ref_ptr; // local pointer on reference process |
---|
2737 | error_t error; |
---|
2738 | |
---|
2739 | thread_t * this = CURRENT_THREAD; |
---|
2740 | |
---|
2741 | #if DEBUG_VMM_HANDLE_COW |
---|
2742 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
2743 | if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) |
---|
2744 | printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n", |
---|
2745 | __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); |
---|
2746 | #endif |
---|
2747 | |
---|
2748 | #if (DEBUG_VMM_HANDLE_COW & 2) |
---|
2749 | hal_vmm_display( XPTR( local_cxy , process ) , true ); |
---|
2750 | #endif |
---|
2751 | |
---|
2752 | // get local vseg |
---|
2753 | error = vmm_get_vseg( process, |
---|
2754 | (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT, |
---|
2755 | &vseg ); |
---|
2756 | if( error ) |
---|
2757 | { |
---|
2758 | printk("\n[ERROR] in %s : vpn %x in thread[%x,%x] not in a registered vseg\n", |
---|
2759 | __FUNCTION__, vpn, process->pid, this->trdid ); |
---|
2760 | |
---|
2761 | return EXCP_USER_ERROR; |
---|
2762 | } |
---|
2763 | |
---|
2764 | #if DEBUG_VMM_HANDLE_COW |
---|
2765 | if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) |
---|
2766 | printk("\n[%s] thread[%x,%x] get vseg %s\n", |
---|
2767 | __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) ); |
---|
2768 | #endif |
---|
2769 | |
---|
2770 | // get reference process cluster and local pointer |
---|
2771 | ref_cxy = GET_CXY( process->ref_xp ); |
---|
2772 | ref_ptr = GET_PTR( process->ref_xp ); |
---|
2773 | |
---|
2774 | // build pointers on relevant GPT |
---|
2775 | // - access only local GPT for a private vseg |
---|
2776 | // - access reference GPT and all copies for a public vseg |
---|
2777 | if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) |
---|
2778 | { |
---|
2779 | gpt_cxy = local_cxy; |
---|
2780 | gpt_ptr = &process->vmm.gpt; |
---|
2781 | gpt_xp = XPTR( gpt_cxy , gpt_ptr ); |
---|
2782 | } |
---|
2783 | else |
---|
2784 | { |
---|
2785 | gpt_cxy = ref_cxy; |
---|
2786 | gpt_ptr = &ref_ptr->vmm.gpt; |
---|
2787 | gpt_xp = XPTR( gpt_cxy , gpt_ptr ); |
---|
2788 | } |
---|
2789 | |
---|
2790 | // lock target PTE in relevant GPT (local or reference) |
---|
2791 | // and get current PTE value |
---|
2792 | error = hal_gpt_lock_pte( gpt_xp, |
---|
2793 | vpn, |
---|
2794 | &old_attr, |
---|
2795 | &old_ppn ); |
---|
2796 | if( error ) |
---|
2797 | { |
---|
2798 | printk("\n[PANIC] in %s : cannot lock PTE in GPT / cxy %x / vpn %x / process %x\n", |
---|
2799 | __FUNCTION__ , gpt_cxy, vpn , process->pid ); |
---|
2800 | |
---|
2801 | return EXCP_KERNEL_PANIC; |
---|
2802 | } |
---|
2803 | |
---|
2804 | #if DEBUG_VMM_HANDLE_COW |
---|
2805 | if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) |
---|
2806 | printk("\n[%s] thread[%x,%x] get pte for vpn %x : ppn %x / attr %x\n", |
---|
2807 | __FUNCTION__, this->process->pid, this->trdid, vpn, old_ppn, old_attr ); |
---|
2808 | #endif |
---|
2809 | |
---|
2810 | // return user error if COW attribute not set or PTE2 unmapped |
---|
2811 | if( ((old_attr & GPT_COW) == 0) || ((old_attr & GPT_MAPPED) == 0) ) |
---|
2812 | { |
---|
2813 | hal_gpt_unlock_pte( gpt_xp , vpn ); |
---|
2814 | |
---|
2815 | return EXCP_USER_ERROR; |
---|
2816 | } |
---|
2817 | |
---|
2818 | // get pointers on physical page descriptor |
---|
2819 | xptr_t page_xp = ppm_ppn2page( old_ppn ); |
---|
2820 | cxy_t page_cxy = GET_CXY( page_xp ); |
---|
2821 | page_t * page_ptr = GET_PTR( page_xp ); |
---|
2822 | |
---|
2823 | // get extended pointers on forks and lock field in page descriptor |
---|
2824 | xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
2825 | xptr_t forks_lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
2826 | |
---|
2827 | // take lock protecting "forks" counter |
---|
2828 | remote_busylock_acquire( forks_lock_xp ); |
---|
2829 | |
---|
2830 | // get number of pending forks from page descriptor |
---|
2831 | uint32_t forks = hal_remote_l32( forks_xp ); |
---|
2832 | |
---|
2833 | #if DEBUG_VMM_HANDLE_COW |
---|
2834 | if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) |
---|
2835 | printk("\n[%s] thread[%x,%x] get forks = %d for vpn %x\n", |
---|
2836 | __FUNCTION__, this->process->pid, this->trdid, forks, vpn ); |
---|
2837 | #endif |
---|
2838 | |
---|
2839 | if( forks ) // pending fork => allocate a new page, and copy old to new |
---|
2840 | { |
---|
2841 | // decrement pending forks counter in page descriptor |
---|
2842 | hal_remote_atomic_add( forks_xp , -1 ); |
---|
2843 | |
---|
2844 | // release lock protecting "forks" counter |
---|
2845 | remote_busylock_release( forks_lock_xp ); |
---|
2846 | |
---|
2847 | // allocate a new physical page depending on vseg type |
---|
2848 | page_xp = vmm_page_allocate( vseg , vpn ); |
---|
2849 | |
---|
2850 | if( page_xp == XPTR_NULL ) |
---|
2851 | { |
---|
2852 | printk("\n[PANIC] in %s : no memory for vpn %x in process %x\n", |
---|
2853 | __FUNCTION__ , vpn, process->pid ); |
---|
2854 | |
---|
2855 | hal_gpt_unlock_pte( gpt_xp , vpn ); |
---|
2856 | |
---|
2857 | return EXCP_KERNEL_PANIC; |
---|
2858 | } |
---|
2859 | |
---|
2860 | // compute allocated page PPN |
---|
2861 | new_ppn = ppm_page2ppn( page_xp ); |
---|
2862 | |
---|
2863 | #if DEBUG_VMM_HANDLE_COW |
---|
2864 | if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) |
---|
2865 | printk("\n[%s] thread[%x,%x] get new ppn %x for vpn %x\n", |
---|
2866 | __FUNCTION__, this->process->pid, this->trdid, new_ppn, vpn ); |
---|
2867 | #endif |
---|
2868 | |
---|
2869 | // copy old page content to new page |
---|
2870 | hal_remote_memcpy( ppm_ppn2base( new_ppn ), |
---|
2871 | ppm_ppn2base( old_ppn ), |
---|
2872 | CONFIG_PPM_PAGE_SIZE ); |
---|
2873 | |
---|
2874 | #if DEBUG_VMM_HANDLE_COW |
---|
2875 | if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) |
---|
2876 | printk("\n[%s] thread[%x,%x] copied old page to new page\n", |
---|
2877 | __FUNCTION__, this->process->pid, this->trdid ); |
---|
2878 | #endif |
---|
2879 | |
---|
2880 | } |
---|
2881 | else // no pending fork => keep the existing page |
---|
2882 | { |
---|
2883 | // release lock protecting "forks" counter |
---|
2884 | remote_busylock_release( forks_lock_xp ); |
---|
2885 | |
---|
2886 | #if(DEBUG_VMM_HANDLE_COW & 1) |
---|
2887 | if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) |
---|
2888 | printk("\n[%s] thread[%x,%x] no pending forks / keep existing PPN %x\n", |
---|
2889 | __FUNCTION__, this->process->pid, this->trdid, old_ppn ); |
---|
2890 | #endif |
---|
2891 | new_ppn = old_ppn; |
---|
2892 | } |
---|
2893 | |
---|
2894 | // build new_attr : set WRITABLE, reset COW, reset LOCKED |
---|
2895 | new_attr = (((old_attr | GPT_WRITABLE) & (~GPT_COW)) & (~GPT_LOCKED)); |
---|
2896 | |
---|
2897 | #if(DEBUG_VMM_HANDLE_COW & 1) |
---|
2898 | if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) |
---|
2899 | printk("\n[%s] thread[%x,%x] new_attr %x / new_ppn %x\n", |
---|
2900 | __FUNCTION__, this->process->pid, this->trdid, new_attr, new_ppn ); |
---|
2901 | #endif |
---|
2902 | |
---|
2903 | // update the relevant GPT(s) |
---|
2904 | // - private vseg => update only the local GPT |
---|
2905 | // - public vseg => update the reference GPT AND all the GPT copies |
---|
2906 | if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) |
---|
2907 | { |
---|
2908 | // set new PTE in local gpt |
---|
2909 | hal_gpt_set_pte( gpt_xp, |
---|
2910 | vpn, |
---|
2911 | new_attr, |
---|
2912 | new_ppn ); |
---|
2913 | } |
---|
2914 | else |
---|
2915 | { |
---|
2916 | // set new PTE in all GPT copies |
---|
2917 | vmm_global_update_pte( process, |
---|
2918 | vpn, |
---|
2919 | new_attr, |
---|
2920 | new_ppn ); |
---|
2921 | } |
---|
2922 | |
---|
2923 | #if DEBUG_VMM_HANDLE_COW |
---|
2924 | cycle = (uint32_t)hal_get_cycles(); |
---|
2925 | if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) |
---|
2926 | printk("\n[%s] thread[%x,%x] exit for vpn %x / core[%x,%d] / cycle %d\n", |
---|
2927 | __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); |
---|
2928 | #endif |
---|
2929 | |
---|
2930 | #if (DEBUG_VMM_HANDLE_COW & 2) |
---|
2931 | hal_vmm_display( XPTR( local_cxy , process ) , true ); |
---|
2932 | #endif |
---|
2933 | |
---|
2934 | return EXCP_NON_FATAL; |
---|
2935 | |
---|
2936 | } // end vmm_handle_cow() |
---|
2937 | |
---|