1 | /* |
---|
2 | * vmm.c - virtual memory manager related operations definition. |
---|
3 | * |
---|
4 | * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) |
---|
5 | * Mohamed Lamine Karaoui (2015) |
---|
6 | * Alain Greiner (2016,2017,2018,2019) |
---|
7 | * |
---|
8 | * Copyright (c) UPMC Sorbonne Universites |
---|
9 | * |
---|
10 | * This file is part of ALMOS-MKH. |
---|
11 | * |
---|
12 | * ALMOS-MKH is free software; you can redistribute it and/or modify it |
---|
13 | * under the terms of the GNU General Public License as published by |
---|
14 | * the Free Software Foundation; version 2.0 of the License. |
---|
15 | * |
---|
16 | * ALMOS-MKH is distributed in the hope that it will be useful, but |
---|
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
19 | * General Public License for more details. |
---|
20 | * |
---|
21 | * You should have received a copy of the GNU General Public License |
---|
22 | * along with ALMOS-MKH; if not, write to the Free Software Foundation, |
---|
23 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
24 | */ |
---|
25 | |
---|
26 | #include <kernel_config.h> |
---|
27 | #include <hal_kernel_types.h> |
---|
28 | #include <hal_special.h> |
---|
29 | #include <hal_gpt.h> |
---|
30 | #include <hal_vmm.h> |
---|
31 | #include <hal_macros.h> |
---|
32 | #include <printk.h> |
---|
33 | #include <memcpy.h> |
---|
34 | #include <remote_rwlock.h> |
---|
35 | #include <remote_queuelock.h> |
---|
36 | #include <list.h> |
---|
37 | #include <xlist.h> |
---|
38 | #include <bits.h> |
---|
39 | #include <process.h> |
---|
40 | #include <thread.h> |
---|
41 | #include <vseg.h> |
---|
42 | #include <cluster.h> |
---|
43 | #include <scheduler.h> |
---|
44 | #include <vfs.h> |
---|
45 | #include <mapper.h> |
---|
46 | #include <page.h> |
---|
47 | #include <kmem.h> |
---|
48 | #include <vmm.h> |
---|
49 | #include <hal_exception.h> |
---|
50 | |
---|
51 | ////////////////////////////////////////////////////////////////////////////////// |
---|
52 | // Extern global variables |
---|
53 | ////////////////////////////////////////////////////////////////////////////////// |
---|
54 | |
---|
55 | extern process_t process_zero; // allocated in cluster.c |
---|
56 | |
---|
57 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
58 | // This static function is called by the vmm_create_vseg() function, and implements |
---|
59 | // the VMM STACK specific allocator. |
---|
60 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
61 | // @ vmm : [in] pointer on VMM. |
---|
62 | // @ ltid : [in] requested slot == local user thread identifier. |
---|
63 | // @ vpn_base : [out] first allocated page |
---|
64 | // @ vpn_size : [out] number of allocated pages |
---|
65 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
66 | static void vmm_stack_alloc( vmm_t * vmm, |
---|
67 | ltid_t ltid, |
---|
68 | vpn_t * vpn_base, |
---|
69 | vpn_t * vpn_size ) |
---|
70 | { |
---|
71 | |
---|
72 | // check ltid argument |
---|
73 | assert( (ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)), |
---|
74 | "slot index %d too large for an user stack vseg", ltid ); |
---|
75 | |
---|
76 | // get stack allocator pointer |
---|
77 | stack_mgr_t * mgr = &vmm->stack_mgr; |
---|
78 | |
---|
79 | // get lock on stack allocator |
---|
80 | busylock_acquire( &mgr->lock ); |
---|
81 | |
---|
82 | // check requested slot is available |
---|
83 | assert( (bitmap_state( &mgr->bitmap , ltid ) == false), |
---|
84 | "slot index %d already allocated", ltid ); |
---|
85 | |
---|
86 | // update bitmap |
---|
87 | bitmap_set( &mgr->bitmap , ltid ); |
---|
88 | |
---|
89 | // release lock on stack allocator |
---|
90 | busylock_release( &mgr->lock ); |
---|
91 | |
---|
92 | // returns vpn_base, vpn_size (first page non allocated) |
---|
93 | *vpn_base = mgr->vpn_base + ltid * CONFIG_VMM_STACK_SIZE + 1; |
---|
94 | *vpn_size = CONFIG_VMM_STACK_SIZE - 1; |
---|
95 | |
---|
96 | } // end vmm_stack_alloc() |
---|
97 | |
---|
98 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
99 | // This static function is called by the vmm_remove_vseg() function, and implements |
---|
100 | // the VMM STACK specific desallocator. |
---|
101 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
102 | // @ vmm : [in] pointer on VMM. |
---|
103 | // @ vseg : [in] pointer on released vseg. |
---|
104 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
105 | static void vmm_stack_free( vmm_t * vmm, |
---|
106 | vseg_t * vseg ) |
---|
107 | { |
---|
108 | // get stack allocator pointer |
---|
109 | stack_mgr_t * mgr = &vmm->stack_mgr; |
---|
110 | |
---|
111 | // compute slot index |
---|
112 | uint32_t index = (vseg->vpn_base - 1 - mgr->vpn_base) / CONFIG_VMM_STACK_SIZE; |
---|
113 | |
---|
114 | // check index |
---|
115 | assert( (index <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)), |
---|
116 | "slot index %d too large for an user stack vseg", index ); |
---|
117 | |
---|
118 | // check released slot is allocated |
---|
119 | assert( (bitmap_state( &mgr->bitmap , index ) == true), |
---|
120 | "released slot index %d non allocated", index ); |
---|
121 | |
---|
122 | // get lock on stack allocator |
---|
123 | busylock_acquire( &mgr->lock ); |
---|
124 | |
---|
125 | // update stacks_bitmap |
---|
126 | bitmap_clear( &mgr->bitmap , index ); |
---|
127 | |
---|
128 | // release lock on stack allocator |
---|
129 | busylock_release( &mgr->lock ); |
---|
130 | |
---|
131 | } // end vmm_stack_free() |
---|
132 | |
---|
133 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
134 | // This static function is called by the vmm_create_vseg() function, and implements |
---|
135 | // the VMM MMAP specific allocator. |
---|
136 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
137 | // @ vmm : [in] pointer on VMM. |
---|
138 | // @ npages : [in] requested number of pages. |
---|
139 | // @ vpn_base : [out] first allocated page. |
---|
140 | // @ vpn_size : [out] actual number of allocated pages. |
---|
141 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
142 | static error_t vmm_mmap_alloc( vmm_t * vmm, |
---|
143 | vpn_t npages, |
---|
144 | vpn_t * vpn_base, |
---|
145 | vpn_t * vpn_size ) |
---|
146 | { |
---|
147 | uint32_t order; |
---|
148 | xptr_t vseg_xp; |
---|
149 | vseg_t * vseg; |
---|
150 | vpn_t base; |
---|
151 | vpn_t size; |
---|
152 | vpn_t free; |
---|
153 | |
---|
154 | #if DEBUG_VMM_MMAP_ALLOC |
---|
155 | thread_t * this = CURRENT_THREAD; |
---|
156 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
157 | if( DEBUG_VMM_MMAP_ALLOC < cycle ) |
---|
158 | printk("\n[%s] thread[%x,%x] enter / cycle %d\n", |
---|
159 | __FUNCTION__, this->process->pid, this->trdid, cycle ); |
---|
160 | #endif |
---|
161 | |
---|
162 | // number of allocated pages must be power of 2 |
---|
163 | // compute actual size and order |
---|
164 | size = POW2_ROUNDUP( npages ); |
---|
165 | order = bits_log2( size ); |
---|
166 | |
---|
167 | // get mmap allocator pointer |
---|
168 | mmap_mgr_t * mgr = &vmm->mmap_mgr; |
---|
169 | |
---|
170 | // build extended pointer on root of zombi_list[order] |
---|
171 | xptr_t root_xp = XPTR( local_cxy , &mgr->zombi_list[order] ); |
---|
172 | |
---|
173 | // take lock protecting zombi_lists |
---|
174 | busylock_acquire( &mgr->lock ); |
---|
175 | |
---|
176 | // get vseg from zombi_list or from mmap zone |
---|
177 | if( xlist_is_empty( root_xp ) ) // from mmap zone |
---|
178 | { |
---|
179 | // check overflow |
---|
180 | free = mgr->first_free_vpn; |
---|
181 | if( (free + size) > mgr->vpn_size ) return -1; |
---|
182 | |
---|
183 | // update MMAP allocator |
---|
184 | mgr->first_free_vpn += size; |
---|
185 | |
---|
186 | // compute base |
---|
187 | base = free; |
---|
188 | } |
---|
189 | else // from zombi_list |
---|
190 | { |
---|
191 | // get pointer on zombi vseg from zombi_list |
---|
192 | vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); |
---|
193 | vseg = GET_PTR( vseg_xp ); |
---|
194 | |
---|
195 | // remove vseg from free-list |
---|
196 | xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); |
---|
197 | |
---|
198 | // compute base |
---|
199 | base = vseg->vpn_base; |
---|
200 | } |
---|
201 | |
---|
202 | // release lock |
---|
203 | busylock_release( &mgr->lock ); |
---|
204 | |
---|
205 | #if DEBUG_VMM_MMAP_ALLOC |
---|
206 | cycle = (uint32_t)hal_get_cycles(); |
---|
207 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
208 | printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n", |
---|
209 | __FUNCTION__, this->process->pid, this->trdid, base, size, cycle ); |
---|
210 | #endif |
---|
211 | |
---|
212 | // returns vpn_base, vpn_size |
---|
213 | *vpn_base = base; |
---|
214 | *vpn_size = size; |
---|
215 | return 0; |
---|
216 | |
---|
217 | } // end vmm_mmap_alloc() |
---|
218 | |
---|
219 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
220 | // This static function is called by the vmm_remove_vseg() function, and implements |
---|
221 | // the VMM MMAP specific desallocator. |
---|
222 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
223 | // @ vmm : [in] pointer on VMM. |
---|
224 | // @ vseg : [in] pointer on released vseg. |
---|
225 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
226 | static void vmm_mmap_free( vmm_t * vmm, |
---|
227 | vseg_t * vseg ) |
---|
228 | { |
---|
229 | // get pointer on mmap allocator |
---|
230 | mmap_mgr_t * mgr = &vmm->mmap_mgr; |
---|
231 | |
---|
232 | // compute zombi_list order |
---|
233 | uint32_t order = bits_log2( vseg->vpn_size ); |
---|
234 | |
---|
235 | // take lock protecting zombi lists |
---|
236 | busylock_acquire( &mgr->lock ); |
---|
237 | |
---|
238 | // update relevant zombi_list |
---|
239 | xlist_add_first( XPTR( local_cxy , &mgr->zombi_list[order] ), |
---|
240 | XPTR( local_cxy , &vseg->xlist ) ); |
---|
241 | |
---|
242 | // release lock |
---|
243 | busylock_release( &mgr->lock ); |
---|
244 | |
---|
245 | } // end of vmm_mmap_free() |
---|
246 | |
---|
247 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
248 | // This static function registers one vseg in the VSL of a local process descriptor. |
---|
249 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
250 | // vmm : [in] pointer on VMM. |
---|
251 | // vseg : [in] pointer on vseg. |
---|
252 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
253 | void vmm_attach_vseg_to_vsl( vmm_t * vmm, |
---|
254 | vseg_t * vseg ) |
---|
255 | { |
---|
256 | // update vseg descriptor |
---|
257 | vseg->vmm = vmm; |
---|
258 | |
---|
259 | // increment vsegs number |
---|
260 | vmm->vsegs_nr++; |
---|
261 | |
---|
262 | // add vseg in vmm list |
---|
263 | xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ), |
---|
264 | XPTR( local_cxy , &vseg->xlist ) ); |
---|
265 | |
---|
266 | } // end vmm_attach_vseg_from_vsl() |
---|
267 | |
---|
268 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
269 | // This static function removes one vseg from the VSL of a local process descriptor. |
---|
270 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
271 | // vmm : [in] pointer on VMM. |
---|
272 | // vseg : [in] pointer on vseg. |
---|
273 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
274 | void vmm_detach_vseg_from_vsl( vmm_t * vmm, |
---|
275 | vseg_t * vseg ) |
---|
276 | { |
---|
277 | // update vseg descriptor |
---|
278 | vseg->vmm = NULL; |
---|
279 | |
---|
280 | // decrement vsegs number |
---|
281 | vmm->vsegs_nr--; |
---|
282 | |
---|
283 | // remove vseg from VSL |
---|
284 | xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); |
---|
285 | |
---|
286 | } // end vmm_detach_from_vsl() |
---|
287 | |
---|
288 | |
---|
289 | |
---|
290 | |
---|
291 | //////////////////////////////////////////// |
---|
292 | error_t vmm_user_init( process_t * process ) |
---|
293 | { |
---|
294 | vseg_t * vseg_args; |
---|
295 | vseg_t * vseg_envs; |
---|
296 | intptr_t base; |
---|
297 | intptr_t size; |
---|
298 | uint32_t i; |
---|
299 | |
---|
300 | #if DEBUG_VMM_USER_INIT |
---|
301 | thread_t * this = CURRENT_THREAD; |
---|
302 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
303 | if( DEBUG_VMM_USER_INIT ) |
---|
304 | printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", |
---|
305 | __FUNCTION__ , this->process->pid, this->trdid, process->pid, local_cxy, cycle ); |
---|
306 | #endif |
---|
307 | |
---|
308 | // get pointer on VMM |
---|
309 | vmm_t * vmm = &process->vmm; |
---|
310 | |
---|
311 | // check UTILS zone |
---|
312 | assert( ((CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) <= |
---|
313 | (CONFIG_VMM_ELF_BASE - CONFIG_VMM_UTILS_BASE)) , |
---|
314 | "UTILS zone too small\n" ); |
---|
315 | |
---|
316 | // check STACK zone |
---|
317 | assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <= |
---|
318 | (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , |
---|
319 | "STACK zone too small\n"); |
---|
320 | |
---|
321 | // register "args" vseg in VSL |
---|
322 | base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_SHIFT; |
---|
323 | size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT; |
---|
324 | |
---|
325 | vseg_args = vmm_create_vseg( process, |
---|
326 | VSEG_TYPE_DATA, |
---|
327 | base, |
---|
328 | size, |
---|
329 | 0, // file_offset unused |
---|
330 | 0, // file_size unused |
---|
331 | XPTR_NULL, // mapper_xp unused |
---|
332 | local_cxy ); |
---|
333 | if( vseg_args == NULL ) |
---|
334 | { |
---|
335 | printk("\n[ERROR] in %s : cannot register args vseg\n", __FUNCTION__ ); |
---|
336 | return -1; |
---|
337 | } |
---|
338 | |
---|
339 | vmm->args_vpn_base = base; |
---|
340 | |
---|
341 | // register "envs" vseg in VSL |
---|
342 | base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_SHIFT; |
---|
343 | size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT; |
---|
344 | |
---|
345 | vseg_envs = vmm_create_vseg( process, |
---|
346 | VSEG_TYPE_DATA, |
---|
347 | base, |
---|
348 | size, |
---|
349 | 0, // file_offset unused |
---|
350 | 0, // file_size unused |
---|
351 | XPTR_NULL, // mapper_xp unused |
---|
352 | local_cxy ); |
---|
353 | if( vseg_envs == NULL ) |
---|
354 | { |
---|
355 | printk("\n[ERROR] in %s : cannot register envs vseg\n", __FUNCTION__ ); |
---|
356 | return -1; |
---|
357 | } |
---|
358 | |
---|
359 | vmm->envs_vpn_base = base; |
---|
360 | |
---|
361 | // initialize STACK allocator |
---|
362 | vmm->stack_mgr.bitmap = 0; |
---|
363 | vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE; |
---|
364 | busylock_init( &vmm->stack_mgr.lock , LOCK_VMM_STACK ); |
---|
365 | |
---|
366 | // initialize MMAP allocator |
---|
367 | vmm->mmap_mgr.vpn_base = CONFIG_VMM_HEAP_BASE; |
---|
368 | vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; |
---|
369 | vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; |
---|
370 | busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP ); |
---|
371 | for( i = 0 ; i < 32 ; i++ ) |
---|
372 | { |
---|
373 | xlist_root_init( XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] ) ); |
---|
374 | } |
---|
375 | |
---|
376 | // initialize instrumentation counters |
---|
377 | vmm->pgfault_nr = 0; |
---|
378 | |
---|
379 | hal_fence(); |
---|
380 | |
---|
381 | #if DEBUG_VMM_USER_INIT |
---|
382 | cycle = (uint32_t)hal_get_cycles(); |
---|
383 | if( DEBUG_VMM_USER_INIT ) |
---|
384 | printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", |
---|
385 | __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle ); |
---|
386 | #endif |
---|
387 | |
---|
388 | return 0; |
---|
389 | |
---|
390 | } // end vmm_user_init() |
---|
391 | |
---|
392 | ////////////////////////////////////////// |
---|
393 | void vmm_user_reset( process_t * process ) |
---|
394 | { |
---|
395 | xptr_t vseg_xp; |
---|
396 | vseg_t * vseg; |
---|
397 | vseg_type_t vseg_type; |
---|
398 | |
---|
399 | #if DEBUG_VMM_USER_RESET |
---|
400 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
401 | thread_t * this = CURRENT_THREAD; |
---|
402 | if( DEBUG_VMM_USER_RESET < cycle ) |
---|
403 | printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", |
---|
404 | __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle ); |
---|
405 | #endif |
---|
406 | |
---|
407 | #if (DEBUG_VMM_USER_RESET & 1 ) |
---|
408 | if( DEBUG_VMM_USER_RESET < cycle ) |
---|
409 | hal_vmm_display( process , true ); |
---|
410 | #endif |
---|
411 | |
---|
412 | // get pointer on local VMM |
---|
413 | vmm_t * vmm = &process->vmm; |
---|
414 | |
---|
415 | // build extended pointer on VSL root and VSL lock |
---|
416 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
417 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); |
---|
418 | |
---|
419 | // take the VSL lock |
---|
420 | remote_rwlock_wr_acquire( lock_xp ); |
---|
421 | |
---|
422 | // scan the VSL to delete all non kernel vsegs |
---|
423 | // (we don't use a FOREACH in case of item deletion) |
---|
424 | xptr_t iter_xp; |
---|
425 | xptr_t next_xp; |
---|
426 | for( iter_xp = hal_remote_l64( root_xp ) ; |
---|
427 | iter_xp != root_xp ; |
---|
428 | iter_xp = next_xp ) |
---|
429 | { |
---|
430 | // save extended pointer on next item in xlist |
---|
431 | next_xp = hal_remote_l64( iter_xp ); |
---|
432 | |
---|
433 | // get pointers on current vseg in VSL |
---|
434 | vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
435 | vseg = GET_PTR( vseg_xp ); |
---|
436 | vseg_type = vseg->type; |
---|
437 | |
---|
438 | #if( DEBUG_VMM_USER_RESET & 1 ) |
---|
439 | if( DEBUG_VMM_USER_RESET < cycle ) |
---|
440 | printk("\n[%s] found %s vseg / vpn_base %x / vpn_size %d\n", |
---|
441 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
442 | #endif |
---|
443 | // delete non kernel vseg |
---|
444 | if( (vseg_type != VSEG_TYPE_KCODE) && |
---|
445 | (vseg_type != VSEG_TYPE_KDATA) && |
---|
446 | (vseg_type != VSEG_TYPE_KDEV ) ) |
---|
447 | { |
---|
448 | // remove vseg from VSL |
---|
449 | vmm_remove_vseg( process , vseg ); |
---|
450 | |
---|
451 | #if( DEBUG_VMM_USER_RESET & 1 ) |
---|
452 | if( DEBUG_VMM_USER_RESET < cycle ) |
---|
453 | printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n", |
---|
454 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
455 | #endif |
---|
456 | } |
---|
457 | else |
---|
458 | { |
---|
459 | |
---|
460 | #if( DEBUG_VMM_USER_RESET & 1 ) |
---|
461 | if( DEBUG_VMM_USER_RESET < cycle ) |
---|
462 | printk("\n[%s] keep %s vseg / vpn_base %x / vpn_size %d\n", |
---|
463 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
464 | #endif |
---|
465 | } |
---|
466 | } // end loop on vsegs in VSL |
---|
467 | |
---|
468 | // release the VSL lock |
---|
469 | remote_rwlock_wr_release( lock_xp ); |
---|
470 | |
---|
471 | // FIXME il faut gérer les process copies... |
---|
472 | |
---|
473 | #if DEBUG_VMM_USER_RESET |
---|
474 | cycle = (uint32_t)hal_get_cycles(); |
---|
475 | if( DEBUG_VMM_USER_RESET < cycle ) |
---|
476 | printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", |
---|
477 | __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle ); |
---|
478 | #endif |
---|
479 | |
---|
480 | } // end vmm_user_reset() |
---|
481 | |
---|
482 | //////////////////////////////////////////////// |
---|
483 | void vmm_global_update_pte( process_t * process, |
---|
484 | vpn_t vpn, |
---|
485 | uint32_t attr, |
---|
486 | ppn_t ppn ) |
---|
487 | { |
---|
488 | xlist_entry_t * process_root_ptr; |
---|
489 | xptr_t process_root_xp; |
---|
490 | xptr_t process_iter_xp; |
---|
491 | |
---|
492 | xptr_t remote_process_xp; |
---|
493 | cxy_t remote_process_cxy; |
---|
494 | process_t * remote_process_ptr; |
---|
495 | xptr_t remote_gpt_xp; |
---|
496 | |
---|
497 | pid_t pid; |
---|
498 | cxy_t owner_cxy; |
---|
499 | lpid_t owner_lpid; |
---|
500 | |
---|
501 | #if DEBUG_VMM_UPDATE_PTE |
---|
502 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
503 | thread_t * this = CURRENT_THREAD; |
---|
504 | if( DEBUG_VMM_UPDATE_PTE < cycle ) |
---|
505 | printk("\n[%s] thread[%x,%x] enter for process %x / vpn %x / cycle %d\n", |
---|
506 | __FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle ); |
---|
507 | #endif |
---|
508 | |
---|
509 | // check cluster is reference |
---|
510 | assert( (GET_CXY( process->ref_xp ) == local_cxy) , "not called in reference cluster\n"); |
---|
511 | |
---|
512 | // get extended pointer on root of process copies xlist in owner cluster |
---|
513 | pid = process->pid; |
---|
514 | owner_cxy = CXY_FROM_PID( pid ); |
---|
515 | owner_lpid = LPID_FROM_PID( pid ); |
---|
516 | process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; |
---|
517 | process_root_xp = XPTR( owner_cxy , process_root_ptr ); |
---|
518 | |
---|
519 | // loop on destination process copies |
---|
520 | XLIST_FOREACH( process_root_xp , process_iter_xp ) |
---|
521 | { |
---|
522 | // get cluster and local pointer on remote process |
---|
523 | remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); |
---|
524 | remote_process_ptr = GET_PTR( remote_process_xp ); |
---|
525 | remote_process_cxy = GET_CXY( remote_process_xp ); |
---|
526 | |
---|
527 | #if (DEBUG_VMM_UPDATE_PTE & 0x1) |
---|
528 | if( DEBUG_VMM_UPDATE_PTE < cycle ) |
---|
529 | printk("\n[%s] threadr[%x,%x] handling vpn %x for process %x in cluster %x\n", |
---|
530 | __FUNCTION__, this->process->pid, this->trdid, vpn, process->pid, remote_process_cxy ); |
---|
531 | #endif |
---|
532 | |
---|
533 | // get extended pointer on remote gpt |
---|
534 | remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt ); |
---|
535 | |
---|
536 | // update remote GPT |
---|
537 | hal_gpt_update_pte( remote_gpt_xp, vpn, attr, ppn ); |
---|
538 | } |
---|
539 | |
---|
540 | #if DEBUG_VMM_UPDATE_PTE |
---|
541 | cycle = (uint32_t)hal_get_cycles(); |
---|
542 | if( DEBUG_VMM_UPDATE_PTE < cycle ) |
---|
543 | printk("\n[%s] thread[%x,%x] exit for process %x / vpn %x / cycle %d\n", |
---|
544 | __FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle ); |
---|
545 | #endif |
---|
546 | |
---|
547 | } // end vmm_global_update_pte() |
---|
548 | |
---|
549 | /////////////////////////////////////// |
---|
550 | void vmm_set_cow( process_t * process ) |
---|
551 | { |
---|
552 | vmm_t * vmm; |
---|
553 | |
---|
554 | xlist_entry_t * process_root_ptr; |
---|
555 | xptr_t process_root_xp; |
---|
556 | xptr_t process_iter_xp; |
---|
557 | |
---|
558 | xptr_t remote_process_xp; |
---|
559 | cxy_t remote_process_cxy; |
---|
560 | process_t * remote_process_ptr; |
---|
561 | xptr_t remote_gpt_xp; |
---|
562 | |
---|
563 | xptr_t vseg_root_xp; |
---|
564 | xptr_t vseg_iter_xp; |
---|
565 | |
---|
566 | xptr_t vseg_xp; |
---|
567 | vseg_t * vseg; |
---|
568 | |
---|
569 | pid_t pid; |
---|
570 | cxy_t owner_cxy; |
---|
571 | lpid_t owner_lpid; |
---|
572 | |
---|
573 | #if DEBUG_VMM_SET_COW |
---|
574 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
575 | thread_t * this = CURRENT_THREAD; |
---|
576 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
577 | printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n", |
---|
578 | __FUNCTION__, this->process->pid, this->trdid, process->pid , cycle ); |
---|
579 | #endif |
---|
580 | |
---|
581 | // check cluster is reference |
---|
582 | assert( (GET_CXY( process->ref_xp ) == local_cxy) , |
---|
583 | "local cluster is not process reference cluster\n"); |
---|
584 | |
---|
585 | // get pointer on reference VMM |
---|
586 | vmm = &process->vmm; |
---|
587 | |
---|
588 | // get extended pointer on root of process copies xlist in owner cluster |
---|
589 | pid = process->pid; |
---|
590 | owner_cxy = CXY_FROM_PID( pid ); |
---|
591 | owner_lpid = LPID_FROM_PID( pid ); |
---|
592 | process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; |
---|
593 | process_root_xp = XPTR( owner_cxy , process_root_ptr ); |
---|
594 | |
---|
595 | // get extended pointer on root of vsegs xlist from reference VMM |
---|
596 | vseg_root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
597 | |
---|
598 | // loop on destination process copies |
---|
599 | XLIST_FOREACH( process_root_xp , process_iter_xp ) |
---|
600 | { |
---|
601 | // get cluster and local pointer on remote process |
---|
602 | remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); |
---|
603 | remote_process_ptr = GET_PTR( remote_process_xp ); |
---|
604 | remote_process_cxy = GET_CXY( remote_process_xp ); |
---|
605 | |
---|
606 | #if (DEBUG_VMM_SET_COW & 1) |
---|
607 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
608 | printk("\n[%s] thread[%x,%x] handling process %x in cluster %x\n", |
---|
609 | __FUNCTION__, this->process->pid, this->trdid, process->pid , remote_process_cxy ); |
---|
610 | #endif |
---|
611 | |
---|
612 | // get extended pointer on remote gpt |
---|
613 | remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt ); |
---|
614 | |
---|
615 | // loop on vsegs in (local) reference process VSL |
---|
616 | XLIST_FOREACH( vseg_root_xp , vseg_iter_xp ) |
---|
617 | { |
---|
618 | // get pointer on vseg |
---|
619 | vseg_xp = XLIST_ELEMENT( vseg_iter_xp , vseg_t , xlist ); |
---|
620 | vseg = GET_PTR( vseg_xp ); |
---|
621 | |
---|
622 | assert( (GET_CXY( vseg_xp ) == local_cxy) , |
---|
623 | "all vsegs in reference VSL must be local\n" ); |
---|
624 | |
---|
625 | // get vseg type, base and size |
---|
626 | uint32_t type = vseg->type; |
---|
627 | vpn_t vpn_base = vseg->vpn_base; |
---|
628 | vpn_t vpn_size = vseg->vpn_size; |
---|
629 | |
---|
630 | #if (DEBUG_VMM_SET_COW & 1) |
---|
631 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
632 | printk("\n[%s] thread[%x,%x] handling vseg %s / vpn_base = %x / vpn_size = %x\n", |
---|
633 | __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size ); |
---|
634 | #endif |
---|
635 | // only DATA, ANON and REMOTE vsegs |
---|
636 | if( (type == VSEG_TYPE_DATA) || |
---|
637 | (type == VSEG_TYPE_ANON) || |
---|
638 | (type == VSEG_TYPE_REMOTE) ) |
---|
639 | { |
---|
640 | vpn_t vpn; |
---|
641 | uint32_t attr; |
---|
642 | ppn_t ppn; |
---|
643 | xptr_t page_xp; |
---|
644 | cxy_t page_cxy; |
---|
645 | page_t * page_ptr; |
---|
646 | xptr_t forks_xp; |
---|
647 | xptr_t lock_xp; |
---|
648 | |
---|
649 | // update flags in remote GPT |
---|
650 | hal_gpt_set_cow( remote_gpt_xp, |
---|
651 | vpn_base, |
---|
652 | vpn_size ); |
---|
653 | |
---|
654 | // atomically increment pending forks counter in physical pages, |
---|
655 | // for all vseg pages that are mapped in reference cluster |
---|
656 | if( remote_process_cxy == local_cxy ) |
---|
657 | { |
---|
658 | // scan all pages in vseg |
---|
659 | for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) |
---|
660 | { |
---|
661 | // get page attributes and PPN from reference GPT |
---|
662 | hal_gpt_get_pte( remote_gpt_xp , vpn , &attr , &ppn ); |
---|
663 | |
---|
664 | // atomically update pending forks counter if page is mapped |
---|
665 | if( attr & GPT_MAPPED ) |
---|
666 | { |
---|
667 | // get pointers and cluster on page descriptor |
---|
668 | page_xp = ppm_ppn2page( ppn ); |
---|
669 | page_cxy = GET_CXY( page_xp ); |
---|
670 | page_ptr = GET_PTR( page_xp ); |
---|
671 | |
---|
672 | // get extended pointers on "forks" and "lock" |
---|
673 | forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
674 | lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
675 | |
---|
676 | // take lock protecting "forks" counter |
---|
677 | remote_busylock_acquire( lock_xp ); |
---|
678 | |
---|
679 | // increment "forks" |
---|
680 | hal_remote_atomic_add( forks_xp , 1 ); |
---|
681 | |
---|
682 | // release lock protecting "forks" counter |
---|
683 | remote_busylock_release( lock_xp ); |
---|
684 | } |
---|
685 | } // end loop on vpn |
---|
686 | } // end if local |
---|
687 | } // end if vseg type |
---|
688 | } // end loop on vsegs |
---|
689 | } // end loop on process copies |
---|
690 | |
---|
691 | #if DEBUG_VMM_SET_COW |
---|
692 | cycle = (uint32_t)hal_get_cycles(); |
---|
693 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
694 | printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", |
---|
695 | __FUNCTION__, this->process->pid, this->trdid, process->pid , cycle ); |
---|
696 | #endif |
---|
697 | |
---|
698 | } // end vmm_set-cow() |
---|
699 | |
---|
700 | ///////////////////////////////////////////////// |
---|
701 | error_t vmm_fork_copy( process_t * child_process, |
---|
702 | xptr_t parent_process_xp ) |
---|
703 | { |
---|
704 | error_t error; |
---|
705 | cxy_t parent_cxy; |
---|
706 | process_t * parent_process; |
---|
707 | vmm_t * parent_vmm; |
---|
708 | xptr_t parent_lock_xp; |
---|
709 | vmm_t * child_vmm; |
---|
710 | xptr_t iter_xp; |
---|
711 | xptr_t parent_vseg_xp; |
---|
712 | vseg_t * parent_vseg; |
---|
713 | vseg_t * child_vseg; |
---|
714 | uint32_t type; |
---|
715 | bool_t cow; |
---|
716 | vpn_t vpn; |
---|
717 | vpn_t vpn_base; |
---|
718 | vpn_t vpn_size; |
---|
719 | xptr_t page_xp; // extended pointer on page descriptor |
---|
720 | page_t * page_ptr; |
---|
721 | cxy_t page_cxy; |
---|
722 | xptr_t forks_xp; // extended pointer on forks counter in page descriptor |
---|
723 | xptr_t parent_root_xp; |
---|
724 | bool_t mapped; |
---|
725 | ppn_t ppn; |
---|
726 | |
---|
727 | #if DEBUG_VMM_FORK_COPY |
---|
728 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
729 | thread_t * this = CURRENT_THREAD; |
---|
730 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
731 | printk("\n[%s] thread %x enter / cycle %d\n", |
---|
732 | __FUNCTION__ , this->process->pid, this->trdid, cycle ); |
---|
733 | #endif |
---|
734 | |
---|
735 | // get parent process cluster and local pointer |
---|
736 | parent_cxy = GET_CXY( parent_process_xp ); |
---|
737 | parent_process = GET_PTR( parent_process_xp ); |
---|
738 | |
---|
739 | // get local pointers on parent and child VMM |
---|
740 | parent_vmm = &parent_process->vmm; |
---|
741 | child_vmm = &child_process->vmm; |
---|
742 | |
---|
743 | // initialize the locks protecting the child VSL and GPT |
---|
744 | remote_rwlock_init( XPTR( local_cxy , &child_vmm->gpt_lock ) , LOCK_VMM_GPT ); |
---|
745 | remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsl_lock ) , LOCK_VMM_VSL ); |
---|
746 | |
---|
747 | // initialize the child VSL as empty |
---|
748 | xlist_root_init( XPTR( local_cxy, &child_vmm->vsegs_root ) ); |
---|
749 | child_vmm->vsegs_nr = 0; |
---|
750 | |
---|
751 | // create an empty child GPT |
---|
752 | error = hal_gpt_create( &child_vmm->gpt ); |
---|
753 | if( error ) |
---|
754 | { |
---|
755 | printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ ); |
---|
756 | return -1; |
---|
757 | } |
---|
758 | |
---|
759 | // build extended pointer on parent VSL root and lock |
---|
760 | parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root ); |
---|
761 | parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsl_lock ); |
---|
762 | |
---|
763 | // take the lock protecting the parent VSL in read mode |
---|
764 | remote_rwlock_rd_acquire( parent_lock_xp ); |
---|
765 | |
---|
766 | // loop on parent VSL xlist |
---|
767 | XLIST_FOREACH( parent_root_xp , iter_xp ) |
---|
768 | { |
---|
769 | // get pointers on current parent vseg |
---|
770 | parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
771 | parent_vseg = GET_PTR( parent_vseg_xp ); |
---|
772 | |
---|
773 | // get vseg type |
---|
774 | type = hal_remote_l32( XPTR( parent_cxy , &parent_vseg->type ) ); |
---|
775 | |
---|
776 | #if DEBUG_VMM_FORK_COPY |
---|
777 | cycle = (uint32_t)hal_get_cycles(); |
---|
778 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
779 | printk("\n[%s] thread[%x,%x] found parent vseg %s / vpn_base = %x / cycle %d\n", |
---|
780 | __FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type), |
---|
781 | hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); |
---|
782 | #endif |
---|
783 | |
---|
784 | // all parent vsegs - but STACK and kernel vsegs - must be copied in child VSL |
---|
785 | if( (type != VSEG_TYPE_STACK) && (type != VSEG_TYPE_KCODE) && |
---|
786 | (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) ) |
---|
787 | { |
---|
788 | // allocate memory for a new child vseg |
---|
789 | child_vseg = vseg_alloc(); |
---|
790 | if( child_vseg == NULL ) // release all allocated vsegs |
---|
791 | { |
---|
792 | vmm_destroy( child_process ); |
---|
793 | printk("\n[ERROR] in %s : cannot create vseg for child\n", __FUNCTION__ ); |
---|
794 | return -1; |
---|
795 | } |
---|
796 | |
---|
797 | // copy parent vseg to child vseg |
---|
798 | vseg_init_from_ref( child_vseg , parent_vseg_xp ); |
---|
799 | |
---|
800 | // build extended pointer on VSL lock |
---|
801 | xptr_t lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock ); |
---|
802 | |
---|
803 | // take the VSL lock in write mode |
---|
804 | remote_rwlock_wr_acquire( lock_xp ); |
---|
805 | |
---|
806 | // register child vseg in child VSL |
---|
807 | vmm_attach_vseg_to_vsl( child_vmm , child_vseg ); |
---|
808 | |
---|
809 | // release the VSL lock |
---|
810 | remote_rwlock_wr_release( lock_xp ); |
---|
811 | |
---|
812 | #if DEBUG_VMM_FORK_COPY |
---|
813 | cycle = (uint32_t)hal_get_cycles(); |
---|
814 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
815 | printk("\n[%s] thread[%x,%x] copied vseg %s / vpn_base = %x to child VSL / cycle %d\n", |
---|
816 | __FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type), |
---|
817 | hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); |
---|
818 | #endif |
---|
819 | // copy DATA, ANON, REMOTE, FILE parent GPT entries to child GPT |
---|
820 | if( type != VSEG_TYPE_CODE ) |
---|
821 | { |
---|
822 | // activate the COW for DATA, ANON, REMOTE vsegs only |
---|
823 | cow = ( type != VSEG_TYPE_FILE ); |
---|
824 | |
---|
825 | vpn_base = child_vseg->vpn_base; |
---|
826 | vpn_size = child_vseg->vpn_size; |
---|
827 | |
---|
828 | // scan pages in parent vseg |
---|
829 | for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) |
---|
830 | { |
---|
831 | error = hal_gpt_pte_copy( &child_vmm->gpt, |
---|
832 | vpn, |
---|
833 | XPTR( parent_cxy , &parent_vmm->gpt ), |
---|
834 | vpn, |
---|
835 | cow, |
---|
836 | &ppn, |
---|
837 | &mapped ); |
---|
838 | if( error ) |
---|
839 | { |
---|
840 | vmm_destroy( child_process ); |
---|
841 | printk("\n[ERROR] in %s : cannot copy GPT\n", __FUNCTION__ ); |
---|
842 | return -1; |
---|
843 | } |
---|
844 | |
---|
845 | // increment pending forks counter in page if mapped |
---|
846 | if( mapped ) |
---|
847 | { |
---|
848 | // get pointers and cluster on page descriptor |
---|
849 | page_xp = ppm_ppn2page( ppn ); |
---|
850 | page_cxy = GET_CXY( page_xp ); |
---|
851 | page_ptr = GET_PTR( page_xp ); |
---|
852 | |
---|
853 | // get extended pointers on "forks" and "lock" |
---|
854 | forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
855 | lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
856 | |
---|
857 | // get lock protecting "forks" counter |
---|
858 | remote_busylock_acquire( lock_xp ); |
---|
859 | |
---|
860 | // increment "forks" |
---|
861 | hal_remote_atomic_add( forks_xp , 1 ); |
---|
862 | |
---|
863 | // release lock protecting "forks" counter |
---|
864 | remote_busylock_release( lock_xp ); |
---|
865 | |
---|
866 | #if DEBUG_VMM_FORK_COPY |
---|
867 | cycle = (uint32_t)hal_get_cycles(); |
---|
868 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
869 | printk("\n[%s] thread[%x,%x] copied vpn %x to child GPT / cycle %d\n", |
---|
870 | __FUNCTION__ , this->process->pid, this->trdid , vpn , cycle ); |
---|
871 | #endif |
---|
872 | } |
---|
873 | } |
---|
874 | } // end if no code & no stack |
---|
875 | } // end if no stack |
---|
876 | } // end loop on vsegs |
---|
877 | |
---|
878 | // release the parent VSL lock in read mode |
---|
879 | remote_rwlock_rd_release( parent_lock_xp ); |
---|
880 | |
---|
881 | // update child VMM with kernel vsegs |
---|
882 | error = hal_vmm_kernel_update( child_process ); |
---|
883 | |
---|
884 | if( error ) |
---|
885 | { |
---|
886 | printk("\n[ERROR] in %s : cannot update child VMM\n", __FUNCTION__ ); |
---|
887 | return -1; |
---|
888 | } |
---|
889 | |
---|
890 | // initialize the child VMM STACK allocator |
---|
891 | child_vmm->stack_mgr.bitmap = 0; |
---|
892 | child_vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE; |
---|
893 | |
---|
894 | // initialize the child VMM MMAP allocator |
---|
895 | uint32_t i; |
---|
896 | child_vmm->mmap_mgr.vpn_base = CONFIG_VMM_HEAP_BASE; |
---|
897 | child_vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; |
---|
898 | child_vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; |
---|
899 | for( i = 0 ; i < 32 ; i++ ) |
---|
900 | { |
---|
901 | xlist_root_init( XPTR( local_cxy , &child_vmm->mmap_mgr.zombi_list[i] ) ); |
---|
902 | } |
---|
903 | |
---|
904 | // initialize instrumentation counters |
---|
905 | child_vmm->pgfault_nr = 0; |
---|
906 | |
---|
907 | // copy base addresses from parent VMM to child VMM |
---|
908 | child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base)); |
---|
909 | child_vmm->envs_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->envs_vpn_base)); |
---|
910 | child_vmm->heap_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->heap_vpn_base)); |
---|
911 | child_vmm->code_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->code_vpn_base)); |
---|
912 | child_vmm->data_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->data_vpn_base)); |
---|
913 | |
---|
914 | child_vmm->entry_point = (intptr_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->entry_point)); |
---|
915 | |
---|
916 | hal_fence(); |
---|
917 | |
---|
918 | #if DEBUG_VMM_FORK_COPY |
---|
919 | cycle = (uint32_t)hal_get_cycles(); |
---|
920 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
921 | printk("\n[%s] thread[%x,%x] exit successfully / cycle %d\n", |
---|
922 | __FUNCTION__ , this->process->pid, this->trdid , cycle ); |
---|
923 | #endif |
---|
924 | |
---|
925 | return 0; |
---|
926 | |
---|
927 | } // vmm_fork_copy() |
---|
928 | |
---|
929 | /////////////////////////////////////// |
---|
930 | void vmm_destroy( process_t * process ) |
---|
931 | { |
---|
932 | xptr_t vseg_xp; |
---|
933 | vseg_t * vseg; |
---|
934 | |
---|
935 | #if DEBUG_VMM_DESTROY |
---|
936 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
937 | thread_t * this = CURRENT_THREAD; |
---|
938 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
939 | printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", |
---|
940 | __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle ); |
---|
941 | #endif |
---|
942 | |
---|
943 | #if (DEBUG_VMM_DESTROY & 1 ) |
---|
944 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
945 | hal_vmm_display( process , true ); |
---|
946 | #endif |
---|
947 | |
---|
948 | // get pointer on local VMM |
---|
949 | vmm_t * vmm = &process->vmm; |
---|
950 | |
---|
951 | // build extended pointer on VSL root, VSL lock and GPT lock |
---|
952 | xptr_t vsl_root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
953 | xptr_t vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); |
---|
954 | xptr_t gpt_lock_xp = XPTR( local_cxy , &vmm->gpt_lock ); |
---|
955 | |
---|
956 | // take the VSL lock |
---|
957 | remote_rwlock_wr_acquire( vsl_lock_xp ); |
---|
958 | |
---|
959 | // scan the VSL to delete all registered vsegs |
---|
960 | // (we don't use a FOREACH in case of item deletion) |
---|
961 | xptr_t iter_xp; |
---|
962 | xptr_t next_xp; |
---|
963 | for( iter_xp = hal_remote_l64( vsl_root_xp ) ; |
---|
964 | iter_xp != vsl_root_xp ; |
---|
965 | iter_xp = next_xp ) |
---|
966 | { |
---|
967 | // save extended pointer on next item in xlist |
---|
968 | next_xp = hal_remote_l64( iter_xp ); |
---|
969 | |
---|
970 | // get pointers on current vseg in VSL |
---|
971 | vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
972 | vseg = GET_PTR( vseg_xp ); |
---|
973 | |
---|
974 | // delete vseg and release physical pages |
---|
975 | vmm_remove_vseg( process , vseg ); |
---|
976 | |
---|
977 | #if( DEBUG_VMM_DESTROY & 1 ) |
---|
978 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
979 | printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n", |
---|
980 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
981 | #endif |
---|
982 | |
---|
983 | } |
---|
984 | |
---|
985 | // release the VSL lock |
---|
986 | remote_rwlock_wr_release( vsl_lock_xp ); |
---|
987 | |
---|
988 | // remove all registered MMAP vsegs |
---|
989 | // from zombi_lists in MMAP allocator |
---|
990 | uint32_t i; |
---|
991 | for( i = 0 ; i<32 ; i++ ) |
---|
992 | { |
---|
993 | // build extended pointer on zombi_list[i] |
---|
994 | xptr_t root_xp = XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] ); |
---|
995 | |
---|
996 | // scan zombi_list[i] |
---|
997 | while( !xlist_is_empty( root_xp ) ) |
---|
998 | { |
---|
999 | vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); |
---|
1000 | vseg = GET_PTR( vseg_xp ); |
---|
1001 | |
---|
1002 | #if( DEBUG_VMM_DESTROY & 1 ) |
---|
1003 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
1004 | printk("\n[%s] found zombi vseg / vpn_base %x / vpn_size %d\n", |
---|
1005 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
1006 | #endif |
---|
1007 | // clean vseg descriptor |
---|
1008 | vseg->vmm = NULL; |
---|
1009 | |
---|
1010 | // remove vseg from zombi_list |
---|
1011 | xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); |
---|
1012 | |
---|
1013 | // release vseg descriptor |
---|
1014 | vseg_free( vseg ); |
---|
1015 | |
---|
1016 | #if( DEBUG_VMM_DESTROY & 1 ) |
---|
1017 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
1018 | printk("\n[%s] zombi vseg released / vpn_base %x / vpn_size %d\n", |
---|
1019 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
1020 | #endif |
---|
1021 | } |
---|
1022 | } |
---|
1023 | |
---|
1024 | // take the GPT lock |
---|
1025 | remote_rwlock_wr_acquire( gpt_lock_xp ); |
---|
1026 | |
---|
1027 | // release memory allocated to the GPT itself |
---|
1028 | hal_gpt_destroy( &vmm->gpt ); |
---|
1029 | |
---|
1030 | // release the GPT lock |
---|
1031 | remote_rwlock_wr_release( gpt_lock_xp ); |
---|
1032 | |
---|
1033 | #if DEBUG_VMM_DESTROY |
---|
1034 | cycle = (uint32_t)hal_get_cycles(); |
---|
1035 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
1036 | printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", |
---|
1037 | __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle ); |
---|
1038 | #endif |
---|
1039 | |
---|
1040 | } // end vmm_destroy() |
---|
1041 | |
---|
1042 | ///////////////////////////////////////////////// |
---|
1043 | vseg_t * vmm_check_conflict( process_t * process, |
---|
1044 | vpn_t vpn_base, |
---|
1045 | vpn_t vpn_size ) |
---|
1046 | { |
---|
1047 | vmm_t * vmm = &process->vmm; |
---|
1048 | |
---|
1049 | // scan the VSL |
---|
1050 | vseg_t * vseg; |
---|
1051 | xptr_t iter_xp; |
---|
1052 | xptr_t vseg_xp; |
---|
1053 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
1054 | |
---|
1055 | XLIST_FOREACH( root_xp , iter_xp ) |
---|
1056 | { |
---|
1057 | vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
1058 | vseg = GET_PTR( vseg_xp ); |
---|
1059 | |
---|
1060 | if( ((vpn_base + vpn_size) > vseg->vpn_base) && |
---|
1061 | (vpn_base < (vseg->vpn_base + vseg->vpn_size)) ) return vseg; |
---|
1062 | } |
---|
1063 | return NULL; |
---|
1064 | |
---|
1065 | } // end vmm_check_conflict() |
---|
1066 | |
---|
1067 | |
---|
1068 | |
---|
1069 | //////////////////////////////////////////////// |
---|
1070 | vseg_t * vmm_create_vseg( process_t * process, |
---|
1071 | vseg_type_t type, |
---|
1072 | intptr_t base, |
---|
1073 | uint32_t size, |
---|
1074 | uint32_t file_offset, |
---|
1075 | uint32_t file_size, |
---|
1076 | xptr_t mapper_xp, |
---|
1077 | cxy_t cxy ) |
---|
1078 | { |
---|
1079 | vseg_t * vseg; // created vseg pointer |
---|
1080 | vpn_t vpn_base; // first page index |
---|
1081 | vpn_t vpn_size; // number of pages covered by vseg |
---|
1082 | error_t error; |
---|
1083 | |
---|
1084 | #if DEBUG_VMM_CREATE_VSEG |
---|
1085 | thread_t * this = CURRENT_THREAD; |
---|
1086 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1087 | if( DEBUG_VMM_CREATE_VSEG < cycle ) |
---|
1088 | printk("\n[%s] thread[%x,%x] enter for process %x / %s / cxy %x / cycle %d\n", |
---|
1089 | __FUNCTION__, this->process->pid, this->trdid, process->pid, vseg_type_str(type), cxy, cycle ); |
---|
1090 | #endif |
---|
1091 | |
---|
1092 | // get pointer on VMM |
---|
1093 | vmm_t * vmm = &process->vmm; |
---|
1094 | |
---|
1095 | // compute base, size, vpn_base, vpn_size, depending on vseg type |
---|
1096 | // we use the VMM specific allocators for "stack", "file", "anon", & "remote" vsegs |
---|
1097 | |
---|
1098 | if( type == VSEG_TYPE_STACK ) |
---|
1099 | { |
---|
1100 | // get vpn_base and vpn_size from STACK allocator |
---|
1101 | vmm_stack_alloc( vmm , base , &vpn_base , &vpn_size ); |
---|
1102 | |
---|
1103 | // compute vseg base and size from vpn_base and vpn_size |
---|
1104 | base = vpn_base << CONFIG_PPM_PAGE_SHIFT; |
---|
1105 | size = vpn_size << CONFIG_PPM_PAGE_SHIFT; |
---|
1106 | } |
---|
1107 | else if( type == VSEG_TYPE_FILE ) |
---|
1108 | { |
---|
1109 | // compute page index (in mapper) for first byte |
---|
1110 | vpn_t vpn_min = file_offset >> CONFIG_PPM_PAGE_SHIFT; |
---|
1111 | |
---|
1112 | // compute page index (in mapper) for last byte |
---|
1113 | vpn_t vpn_max = (file_offset + size - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
1114 | |
---|
1115 | // compute offset in first page |
---|
1116 | uint32_t offset = file_offset & CONFIG_PPM_PAGE_MASK; |
---|
1117 | |
---|
1118 | // compute number of pages required in virtual space |
---|
1119 | vpn_t npages = vpn_max - vpn_min + 1; |
---|
1120 | |
---|
1121 | // get vpn_base and vpn_size from MMAP allocator |
---|
1122 | error = vmm_mmap_alloc( vmm , npages , &vpn_base , &vpn_size ); |
---|
1123 | if( error ) |
---|
1124 | { |
---|
1125 | printk("\n[ERROR] in %s : no vspace for mmap vseg / process %x in cluster %x\n", |
---|
1126 | __FUNCTION__ , process->pid , local_cxy ); |
---|
1127 | return NULL; |
---|
1128 | } |
---|
1129 | |
---|
1130 | // set the vseg base (not always aligned for FILE) |
---|
1131 | base = (vpn_base << CONFIG_PPM_PAGE_SHIFT) + offset; |
---|
1132 | } |
---|
1133 | else if( (type == VSEG_TYPE_ANON) || |
---|
1134 | (type == VSEG_TYPE_REMOTE) ) |
---|
1135 | { |
---|
1136 | // compute number of required pages in virtual space |
---|
1137 | vpn_t npages = size >> CONFIG_PPM_PAGE_SHIFT; |
---|
1138 | if( size & CONFIG_PPM_PAGE_MASK) npages++; |
---|
1139 | |
---|
1140 | // get vpn_base and vpn_size from MMAP allocator |
---|
1141 | error = vmm_mmap_alloc( vmm , npages , &vpn_base , &vpn_size ); |
---|
1142 | if( error ) |
---|
1143 | { |
---|
1144 | printk("\n[ERROR] in %s : no vspace for mmap vseg / process %x in cluster %x\n", |
---|
1145 | __FUNCTION__ , process->pid , local_cxy ); |
---|
1146 | return NULL; |
---|
1147 | } |
---|
1148 | |
---|
1149 | // set vseg base (always aligned for ANON or REMOTE) |
---|
1150 | base = vpn_base << CONFIG_PPM_PAGE_SHIFT; |
---|
1151 | } |
---|
1152 | else // VSEG_TYPE_DATA, VSEG_TYPE_CODE or KERNEL vseg |
---|
1153 | { |
---|
1154 | uint32_t vpn_min = base >> CONFIG_PPM_PAGE_SHIFT; |
---|
1155 | uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
1156 | |
---|
1157 | vpn_base = vpn_min; |
---|
1158 | vpn_size = vpn_max - vpn_min + 1; |
---|
1159 | } |
---|
1160 | |
---|
1161 | // check collisions |
---|
1162 | vseg = vmm_check_conflict( process , vpn_base , vpn_size ); |
---|
1163 | |
---|
1164 | if( vseg != NULL ) |
---|
1165 | { |
---|
1166 | printk("\n[ERROR] in %s for process %x : new vseg [vpn_base %x / vpn_size %x]\n" |
---|
1167 | " overlap existing vseg [vpn_base %x / vpn_size %x]\n", |
---|
1168 | __FUNCTION__ , process->pid, vpn_base, vpn_size, vseg->vpn_base, vseg->vpn_size ); |
---|
1169 | return NULL; |
---|
1170 | } |
---|
1171 | |
---|
1172 | // allocate physical memory for vseg descriptor |
---|
1173 | vseg = vseg_alloc(); |
---|
1174 | if( vseg == NULL ) |
---|
1175 | { |
---|
1176 | printk("\n[ERROR] in %s for process %x : cannot allocate memory for vseg\n", |
---|
1177 | __FUNCTION__ , process->pid ); |
---|
1178 | return NULL; |
---|
1179 | } |
---|
1180 | |
---|
1181 | #if DEBUG_VMM_CREATE_VSEG |
---|
1182 | if( DEBUG_VMM_CREATE_VSEG < cycle ) |
---|
1183 | printk("\n[%s] thread[%x,%x] : base %x / size %x / vpn_base %x / vpn_size %x\n", |
---|
1184 | __FUNCTION__, this->process->pid, this->trdid, base, size, vpn_base, vpn_size ); |
---|
1185 | #endif |
---|
1186 | |
---|
1187 | // initialize vseg descriptor |
---|
1188 | vseg_init( vseg, |
---|
1189 | type, |
---|
1190 | base, |
---|
1191 | size, |
---|
1192 | vpn_base, |
---|
1193 | vpn_size, |
---|
1194 | file_offset, |
---|
1195 | file_size, |
---|
1196 | mapper_xp, |
---|
1197 | cxy ); |
---|
1198 | |
---|
1199 | // build extended pointer on VSL lock |
---|
1200 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); |
---|
1201 | |
---|
1202 | // take the VSL lock in write mode |
---|
1203 | remote_rwlock_wr_acquire( lock_xp ); |
---|
1204 | |
---|
1205 | // attach vseg to VSL |
---|
1206 | vmm_attach_vseg_to_vsl( vmm , vseg ); |
---|
1207 | |
---|
1208 | // release the VSL lock |
---|
1209 | remote_rwlock_wr_release( lock_xp ); |
---|
1210 | |
---|
1211 | #if DEBUG_VMM_CREATE_VSEG |
---|
1212 | cycle = (uint32_t)hal_get_cycles(); |
---|
1213 | if( DEBUG_VMM_CREATE_VSEG < cycle ) |
---|
1214 | printk("\n[%s] thread[%x,%x] exit / %s / cxy %x / cycle %d\n", |
---|
1215 | __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), cxy, cycle ); |
---|
1216 | #endif |
---|
1217 | |
---|
1218 | return vseg; |
---|
1219 | |
---|
1220 | } // vmm_create_vseg() |
---|
1221 | |
---|
1222 | |
---|
1223 | ////////////////////////////////////////// |
---|
1224 | void vmm_remove_vseg( process_t * process, |
---|
1225 | vseg_t * vseg ) |
---|
1226 | { |
---|
1227 | vmm_t * vmm; // local pointer on process VMM |
---|
1228 | bool_t is_ref; // local process is reference process |
---|
1229 | uint32_t vseg_type; // vseg type |
---|
1230 | vpn_t vpn; // VPN of current PTE |
---|
1231 | vpn_t vpn_min; // VPN of first PTE |
---|
1232 | vpn_t vpn_max; // VPN of last PTE (excluded) |
---|
1233 | ppn_t ppn; // current PTE ppn value |
---|
1234 | uint32_t attr; // current PTE attributes |
---|
1235 | kmem_req_t req; // request to release memory |
---|
1236 | xptr_t page_xp; // extended pointer on page descriptor |
---|
1237 | cxy_t page_cxy; // page descriptor cluster |
---|
1238 | page_t * page_ptr; // page descriptor pointer |
---|
1239 | xptr_t count_xp; // extended pointer on page refcount |
---|
1240 | uint32_t count; // current value of page refcount |
---|
1241 | |
---|
1242 | // check arguments |
---|
1243 | assert( (process != NULL), "process argument is NULL" ); |
---|
1244 | assert( (vseg != NULL), "vseg argument is NULL" ); |
---|
1245 | |
---|
1246 | // compute is_ref |
---|
1247 | is_ref = (GET_CXY( process->ref_xp ) == local_cxy); |
---|
1248 | |
---|
1249 | // get pointers on local process VMM |
---|
1250 | vmm = &process->vmm; |
---|
1251 | |
---|
1252 | // get relevant vseg infos |
---|
1253 | vseg_type = vseg->type; |
---|
1254 | vpn_min = vseg->vpn_base; |
---|
1255 | vpn_max = vpn_min + vseg->vpn_size; |
---|
1256 | |
---|
1257 | #if DEBUG_VMM_REMOVE_VSEG |
---|
1258 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1259 | thread_t * this = CURRENT_THREAD; |
---|
1260 | if( DEBUG_VMM_REMOVE_VSEG < cycle ) |
---|
1261 | printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n", |
---|
1262 | __FUNCTION__, this->process->pid, this->trdid, |
---|
1263 | process->pid, vseg_type_str(vseg->type), vseg->min, cycle ); |
---|
1264 | #endif |
---|
1265 | |
---|
1266 | // loop on PTEs in GPT |
---|
1267 | for( vpn = vpn_min ; vpn < vpn_max ; vpn++ ) |
---|
1268 | { |
---|
1269 | // get ppn and attr |
---|
1270 | hal_gpt_get_pte( XPTR( local_cxy , &vmm->gpt ) , vpn , &attr , &ppn ); |
---|
1271 | |
---|
1272 | if( attr & GPT_MAPPED ) // PTE is mapped |
---|
1273 | { |
---|
1274 | |
---|
1275 | #if( DEBUG_VMM_REMOVE_VSEG & 1 ) |
---|
1276 | if( DEBUG_VMM_REMOVE_VSEG < cycle ) |
---|
1277 | printk("- unmap vpn %x / ppn %x / %s" , vpn , ppn, vseg_type_str(vseg_type) ); |
---|
1278 | #endif |
---|
1279 | // unmap GPT entry in local GPT |
---|
1280 | hal_gpt_reset_pte( &vmm->gpt , vpn ); |
---|
1281 | |
---|
1282 | // get pointers on physical page descriptor |
---|
1283 | page_xp = ppm_ppn2page( ppn ); |
---|
1284 | page_cxy = GET_CXY( page_xp ); |
---|
1285 | page_ptr = GET_PTR( page_xp ); |
---|
1286 | |
---|
1287 | // decrement page refcount |
---|
1288 | count_xp = XPTR( page_cxy , &page_ptr->refcount ); |
---|
1289 | count = hal_remote_atomic_add( count_xp , -1 ); |
---|
1290 | |
---|
1291 | // compute the ppn_release condition depending on vseg type |
---|
1292 | bool_t ppn_release; |
---|
1293 | if( (vseg_type == VSEG_TYPE_FILE) || |
---|
1294 | (vseg_type == VSEG_TYPE_KCODE) || |
---|
1295 | (vseg_type == VSEG_TYPE_KDATA) || |
---|
1296 | (vseg_type == VSEG_TYPE_KDEV) ) |
---|
1297 | { |
---|
1298 | // no physical page release for FILE and KERNEL |
---|
1299 | ppn_release = false; |
---|
1300 | } |
---|
1301 | else if( (vseg_type == VSEG_TYPE_CODE) || |
---|
1302 | (vseg_type == VSEG_TYPE_STACK) ) |
---|
1303 | { |
---|
1304 | // always release physical page for private vsegs |
---|
1305 | ppn_release = true; |
---|
1306 | } |
---|
1307 | else if( (vseg_type == VSEG_TYPE_ANON) || |
---|
1308 | (vseg_type == VSEG_TYPE_REMOTE) ) |
---|
1309 | { |
---|
1310 | // release physical page if reference cluster |
---|
1311 | ppn_release = is_ref; |
---|
1312 | } |
---|
1313 | else if( is_ref ) // vseg_type == DATA in reference cluster |
---|
1314 | { |
---|
1315 | // get extended pointers on forks and lock field in page descriptor |
---|
1316 | xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
1317 | xptr_t lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
1318 | |
---|
1319 | // take lock protecting "forks" counter |
---|
1320 | remote_busylock_acquire( lock_xp ); |
---|
1321 | |
---|
1322 | // get number of pending forks from page descriptor |
---|
1323 | uint32_t forks = hal_remote_l32( forks_xp ); |
---|
1324 | |
---|
1325 | // decrement pending forks counter if required |
---|
1326 | if( forks ) hal_remote_atomic_add( forks_xp , -1 ); |
---|
1327 | |
---|
1328 | // release lock protecting "forks" counter |
---|
1329 | remote_busylock_release( lock_xp ); |
---|
1330 | |
---|
1331 | // release physical page if forks == 0 |
---|
1332 | ppn_release = (forks == 0); |
---|
1333 | } |
---|
1334 | else // vseg_type == DATA not in reference cluster |
---|
1335 | { |
---|
1336 | // no physical page release if not in reference cluster |
---|
1337 | ppn_release = false; |
---|
1338 | } |
---|
1339 | |
---|
1340 | // release physical page to relevant kmem when required |
---|
1341 | if( ppn_release ) |
---|
1342 | { |
---|
1343 | if( page_cxy == local_cxy ) |
---|
1344 | { |
---|
1345 | req.type = KMEM_PAGE; |
---|
1346 | req.ptr = page_ptr; |
---|
1347 | kmem_free( &req ); |
---|
1348 | } |
---|
1349 | else |
---|
1350 | { |
---|
1351 | rpc_pmem_release_pages_client( page_cxy , page_ptr ); |
---|
1352 | } |
---|
1353 | } |
---|
1354 | |
---|
1355 | #if( DEBUG_VMM_REMOVE_VSEG & 1 ) |
---|
1356 | if( DEBUG_VMM_REMOVE_VSEG < cycle ) |
---|
1357 | { |
---|
1358 | if( ppn_release ) printk(" / released to kmem\n" ); |
---|
1359 | else printk("\n"); |
---|
1360 | } |
---|
1361 | #endif |
---|
1362 | } |
---|
1363 | } |
---|
1364 | |
---|
1365 | // remove vseg from VSL |
---|
1366 | vmm_detach_vseg_from_vsl( vmm , vseg ); |
---|
1367 | |
---|
1368 | // release vseg descriptor depending on vseg type |
---|
1369 | if( vseg_type == VSEG_TYPE_STACK ) |
---|
1370 | { |
---|
1371 | // release slot to local stack allocator |
---|
1372 | vmm_stack_free( vmm , vseg ); |
---|
1373 | |
---|
1374 | // release vseg descriptor to local kmem |
---|
1375 | vseg_free( vseg ); |
---|
1376 | } |
---|
1377 | else if( (vseg_type == VSEG_TYPE_ANON) || |
---|
1378 | (vseg_type == VSEG_TYPE_FILE) || |
---|
1379 | (vseg_type == VSEG_TYPE_REMOTE) ) |
---|
1380 | { |
---|
1381 | // release vseg to local mmap allocator |
---|
1382 | vmm_mmap_free( vmm , vseg ); |
---|
1383 | } |
---|
1384 | else |
---|
1385 | { |
---|
1386 | // release vseg descriptor to local kmem |
---|
1387 | vseg_free( vseg ); |
---|
1388 | } |
---|
1389 | |
---|
1390 | #if DEBUG_VMM_REMOVE_VSEG |
---|
1391 | cycle = (uint32_t)hal_get_cycles(); |
---|
1392 | if( DEBUG_VMM_REMOVE_VSEG < cycle ) |
---|
1393 | printk("[%s] thread[%x,%x] exit / process %x / %s / base %x / cycle %d\n", |
---|
1394 | __FUNCTION__, this->process->pid, this->trdid, |
---|
1395 | process->pid, vseg_type_str(vseg->type), vseg->min, cycle ); |
---|
1396 | #endif |
---|
1397 | |
---|
1398 | } // end vmm_remove_vseg() |
---|
1399 | |
---|
1400 | |
---|
1401 | /////////////////////////////////// |
---|
1402 | void vmm_delete_vseg( pid_t pid, |
---|
1403 | intptr_t vaddr ) |
---|
1404 | { |
---|
1405 | process_t * process; // local pointer on local process |
---|
1406 | vseg_t * vseg; // local pointer on local vseg containing vaddr |
---|
1407 | |
---|
1408 | // get local pointer on local process descriptor |
---|
1409 | process = cluster_get_local_process_from_pid( pid ); |
---|
1410 | |
---|
1411 | if( process == NULL ) |
---|
1412 | { |
---|
1413 | printk("\n[WARNING] in %s : cannot get local process descriptor\n", |
---|
1414 | __FUNCTION__ ); |
---|
1415 | return; |
---|
1416 | } |
---|
1417 | |
---|
1418 | // get local pointer on local vseg containing vaddr |
---|
1419 | vseg = vmm_vseg_from_vaddr( &process->vmm , vaddr ); |
---|
1420 | |
---|
1421 | if( vseg == NULL ) |
---|
1422 | { |
---|
1423 | printk("\n[WARNING] in %s : cannot get vseg descriptor\n", |
---|
1424 | __FUNCTION__ ); |
---|
1425 | return; |
---|
1426 | } |
---|
1427 | |
---|
1428 | // call relevant function |
---|
1429 | vmm_remove_vseg( process , vseg ); |
---|
1430 | |
---|
1431 | } // end vmm_delete_vseg |
---|
1432 | |
---|
1433 | |
---|
1434 | ///////////////////////////////////////////// |
---|
1435 | vseg_t * vmm_vseg_from_vaddr( vmm_t * vmm, |
---|
1436 | intptr_t vaddr ) |
---|
1437 | { |
---|
1438 | xptr_t vseg_xp; |
---|
1439 | vseg_t * vseg; |
---|
1440 | xptr_t iter_xp; |
---|
1441 | |
---|
1442 | // get extended pointers on VSL lock and root |
---|
1443 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); |
---|
1444 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
1445 | |
---|
1446 | // get lock protecting the VSL |
---|
1447 | remote_rwlock_rd_acquire( lock_xp ); |
---|
1448 | |
---|
1449 | // scan the list of vsegs in VSL |
---|
1450 | XLIST_FOREACH( root_xp , iter_xp ) |
---|
1451 | { |
---|
1452 | // get pointers on vseg |
---|
1453 | vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
1454 | vseg = GET_PTR( vseg_xp ); |
---|
1455 | |
---|
1456 | // return success when match |
---|
1457 | if( (vaddr >= vseg->min) && (vaddr < vseg->max) ) |
---|
1458 | { |
---|
1459 | // return success |
---|
1460 | remote_rwlock_rd_release( lock_xp ); |
---|
1461 | return vseg; |
---|
1462 | } |
---|
1463 | } |
---|
1464 | |
---|
1465 | // return failure |
---|
1466 | remote_rwlock_rd_release( lock_xp ); |
---|
1467 | return NULL; |
---|
1468 | |
---|
1469 | } // end vmm_vseg_from_vaddr() |
---|
1470 | |
---|
1471 | ///////////////////////////////////////////// |
---|
1472 | error_t vmm_resize_vseg( process_t * process, |
---|
1473 | intptr_t base, |
---|
1474 | intptr_t size ) |
---|
1475 | { |
---|
1476 | error_t error; |
---|
1477 | vseg_t * new; |
---|
1478 | vpn_t vpn_min; |
---|
1479 | vpn_t vpn_max; |
---|
1480 | |
---|
1481 | #if DEBUG_VMM_RESIZE_VSEG |
---|
1482 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1483 | thread_t * this = CURRENT_THREAD; |
---|
1484 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1485 | printk("\n[%s] thread[%x,%x] enter / process %x / base %x / size %d / cycle %d\n", |
---|
1486 | __FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle ); |
---|
1487 | #endif |
---|
1488 | |
---|
1489 | // get pointer on process VMM |
---|
1490 | vmm_t * vmm = &process->vmm; |
---|
1491 | |
---|
1492 | intptr_t addr_min = base; |
---|
1493 | intptr_t addr_max = base + size; |
---|
1494 | |
---|
1495 | // get pointer on vseg |
---|
1496 | vseg_t * vseg = vmm_vseg_from_vaddr( vmm , base ); |
---|
1497 | |
---|
1498 | if( vseg == NULL) |
---|
1499 | { |
---|
1500 | printk("\n[ERROR] in %s : vseg(%x,%d) not found\n", |
---|
1501 | __FUNCTION__, base , size ); |
---|
1502 | return -1; |
---|
1503 | } |
---|
1504 | |
---|
1505 | // resize depends on unmapped region base and size |
---|
1506 | if( (vseg->min > addr_min) || (vseg->max < addr_max) ) // not included in vseg |
---|
1507 | { |
---|
1508 | printk("\n[ERROR] in %s : unmapped region[%x->%x[ not included in vseg[%x->%x[\n", |
---|
1509 | __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); |
---|
1510 | |
---|
1511 | error = -1; |
---|
1512 | } |
---|
1513 | else if( (vseg->min == addr_min) && (vseg->max == addr_max) ) // vseg must be deleted |
---|
1514 | { |
---|
1515 | |
---|
1516 | #if( DEBUG_VMM_RESIZE_VSEG & 1 ) |
---|
1517 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1518 | printk("\n[%s] unmapped region[%x->%x[ equal vseg[%x->%x[\n", |
---|
1519 | __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); |
---|
1520 | #endif |
---|
1521 | vmm_delete_vseg( process->pid , vseg->min ); |
---|
1522 | |
---|
1523 | #if( DEBUG_VMM_RESIZE_VSEG & 1 ) |
---|
1524 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1525 | printk("\n[%s] thread[%x,%x] deleted vseg\n", |
---|
1526 | __FUNCTION__, this->process->pid, this->trdid ); |
---|
1527 | #endif |
---|
1528 | error = 0; |
---|
1529 | } |
---|
1530 | else if( vseg->min == addr_min ) // vseg must be resized |
---|
1531 | { |
---|
1532 | |
---|
1533 | #if( DEBUG_VMM_RESIZE_VSEG & 1 ) |
---|
1534 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1535 | printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n", |
---|
1536 | __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); |
---|
1537 | #endif |
---|
1538 | // update vseg min address |
---|
1539 | vseg->min = addr_max; |
---|
1540 | |
---|
1541 | // update vpn_base and vpn_size |
---|
1542 | vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; |
---|
1543 | vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
1544 | vseg->vpn_base = vpn_min; |
---|
1545 | vseg->vpn_size = vpn_max - vpn_min + 1; |
---|
1546 | |
---|
1547 | #if( DEBUG_VMM_RESIZE_VSEG & 1 ) |
---|
1548 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1549 | printk("\n[%s] thread[%x,%x] changed vseg_min\n", |
---|
1550 | __FUNCTION__, this->process->pid, this->trdid ); |
---|
1551 | #endif |
---|
1552 | error = 0; |
---|
1553 | } |
---|
1554 | else if( vseg->max == addr_max ) // vseg must be resized |
---|
1555 | { |
---|
1556 | |
---|
1557 | #if( DEBUG_VMM_RESIZE_VSEG & 1 ) |
---|
1558 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1559 | printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n", |
---|
1560 | __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); |
---|
1561 | #endif |
---|
1562 | // update vseg max address |
---|
1563 | vseg->max = addr_min; |
---|
1564 | |
---|
1565 | // update vpn_base and vpn_size |
---|
1566 | vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; |
---|
1567 | vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
1568 | vseg->vpn_base = vpn_min; |
---|
1569 | vseg->vpn_size = vpn_max - vpn_min + 1; |
---|
1570 | |
---|
1571 | #if( DEBUG_VMM_RESIZE_VSEG & 1 ) |
---|
1572 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1573 | printk("\n[%s] thread[%x,%x] changed vseg_max\n", |
---|
1574 | __FUNCTION__, this->process->pid, this->trdid ); |
---|
1575 | #endif |
---|
1576 | error = 0; |
---|
1577 | |
---|
1578 | } |
---|
1579 | else // vseg cut in three regions |
---|
1580 | { |
---|
1581 | |
---|
1582 | #if( DEBUG_VMM_RESIZE_VSEG & 1 ) |
---|
1583 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1584 | printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n", |
---|
1585 | __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); |
---|
1586 | #endif |
---|
1587 | // resize existing vseg |
---|
1588 | vseg->max = addr_min; |
---|
1589 | |
---|
1590 | // update vpn_base and vpn_size |
---|
1591 | vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; |
---|
1592 | vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
1593 | vseg->vpn_base = vpn_min; |
---|
1594 | vseg->vpn_size = vpn_max - vpn_min + 1; |
---|
1595 | |
---|
1596 | // create new vseg |
---|
1597 | new = vmm_create_vseg( process, |
---|
1598 | vseg->type, |
---|
1599 | addr_min, |
---|
1600 | (vseg->max - addr_max), |
---|
1601 | vseg->file_offset, |
---|
1602 | vseg->file_size, |
---|
1603 | vseg->mapper_xp, |
---|
1604 | vseg->cxy ); |
---|
1605 | |
---|
1606 | #if( DEBUG_VMM_RESIZE_VSEG & 1 ) |
---|
1607 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1608 | printk("\n[%s] thread[%x,%x] replaced vseg by two smal vsegs\n", |
---|
1609 | __FUNCTION__, this->process->pid, this->trdid ); |
---|
1610 | #endif |
---|
1611 | |
---|
1612 | if( new == NULL ) error = -1; |
---|
1613 | else error = 0; |
---|
1614 | } |
---|
1615 | |
---|
1616 | #if DEBUG_VMM_RESIZE_VSEG |
---|
1617 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1618 | printk("\n[%s] thread[%x,%x] exit / process %x / base %x / size %d / cycle %d\n", |
---|
1619 | __FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle ); |
---|
1620 | #endif |
---|
1621 | |
---|
1622 | return error; |
---|
1623 | |
---|
1624 | } // vmm_resize_vseg() |
---|
1625 | |
---|
1626 | /////////////////////////////////////////// |
---|
1627 | error_t vmm_get_vseg( process_t * process, |
---|
1628 | intptr_t vaddr, |
---|
1629 | vseg_t ** found_vseg ) |
---|
1630 | { |
---|
1631 | xptr_t vseg_xp; |
---|
1632 | vseg_t * vseg; |
---|
1633 | vmm_t * vmm; |
---|
1634 | error_t error; |
---|
1635 | |
---|
1636 | // get pointer on local VMM |
---|
1637 | vmm = &process->vmm; |
---|
1638 | |
---|
1639 | // try to get vseg from local VMM |
---|
1640 | vseg = vmm_vseg_from_vaddr( vmm , vaddr ); |
---|
1641 | |
---|
1642 | if( vseg == NULL ) // vseg not found in local cluster => try to get it from ref |
---|
1643 | { |
---|
1644 | // get extended pointer on reference process |
---|
1645 | xptr_t ref_xp = process->ref_xp; |
---|
1646 | |
---|
1647 | // get cluster and local pointer on reference process |
---|
1648 | cxy_t ref_cxy = GET_CXY( ref_xp ); |
---|
1649 | process_t * ref_ptr = GET_PTR( ref_xp ); |
---|
1650 | |
---|
1651 | if( local_cxy == ref_cxy ) return -1; // local cluster is the reference |
---|
1652 | |
---|
1653 | // get extended pointer on reference vseg |
---|
1654 | rpc_vmm_get_vseg_client( ref_cxy , ref_ptr , vaddr , &vseg_xp , &error ); |
---|
1655 | |
---|
1656 | if( error ) return -1; // vseg not found => illegal user vaddr |
---|
1657 | |
---|
1658 | // allocate a vseg in local cluster |
---|
1659 | vseg = vseg_alloc(); |
---|
1660 | |
---|
1661 | if( vseg == NULL ) return -1; // cannot allocate a local vseg |
---|
1662 | |
---|
1663 | // initialise local vseg from reference |
---|
1664 | vseg_init_from_ref( vseg , vseg_xp ); |
---|
1665 | |
---|
1666 | // build extended pointer on VSL lock |
---|
1667 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); |
---|
1668 | |
---|
1669 | // take the VSL lock in write mode |
---|
1670 | remote_rwlock_wr_acquire( lock_xp ); |
---|
1671 | |
---|
1672 | // register local vseg in local VSL |
---|
1673 | vmm_attach_vseg_to_vsl( vmm , vseg ); |
---|
1674 | |
---|
1675 | // release the VSL lock |
---|
1676 | remote_rwlock_wr_release( lock_xp ); |
---|
1677 | } |
---|
1678 | |
---|
1679 | // success |
---|
1680 | *found_vseg = vseg; |
---|
1681 | return 0; |
---|
1682 | |
---|
1683 | } // end vmm_get_vseg() |
---|
1684 | |
---|
1685 | ////////////////////////////////////////////////////////////////////////////////////// |
---|
1686 | // This static function compute the target cluster to allocate a physical page |
---|
1687 | // for a given <vpn> in a given <vseg>, allocates the page (with an RPC if required) |
---|
1688 | // and returns an extended pointer on the allocated page descriptor. |
---|
1689 | // It can be called by a thread running in any cluster. |
---|
1690 | // The vseg cannot have the FILE type. |
---|
1691 | ////////////////////////////////////////////////////////////////////////////////////// |
---|
1692 | static xptr_t vmm_page_allocate( vseg_t * vseg, |
---|
1693 | vpn_t vpn ) |
---|
1694 | { |
---|
1695 | |
---|
1696 | #if DEBUG_VMM_ALLOCATE_PAGE |
---|
1697 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1698 | thread_t * this = CURRENT_THREAD; |
---|
1699 | if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) |
---|
1700 | printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", |
---|
1701 | __FUNCTION__ , this->process->pid, this->trdid, vpn, cycle ); |
---|
1702 | #endif |
---|
1703 | |
---|
1704 | page_t * page_ptr; |
---|
1705 | cxy_t page_cxy; |
---|
1706 | kmem_req_t req; |
---|
1707 | uint32_t index; |
---|
1708 | |
---|
1709 | uint32_t type = vseg->type; |
---|
1710 | uint32_t flags = vseg->flags; |
---|
1711 | uint32_t x_size = LOCAL_CLUSTER->x_size; |
---|
1712 | uint32_t y_size = LOCAL_CLUSTER->y_size; |
---|
1713 | |
---|
1714 | // check vseg type |
---|
1715 | assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" ); |
---|
1716 | |
---|
1717 | if( flags & VSEG_DISTRIB ) // distributed => cxy depends on vpn LSB |
---|
1718 | { |
---|
1719 | index = vpn & ((x_size * y_size) - 1); |
---|
1720 | page_cxy = HAL_CXY_FROM_XY( (index / y_size) , (index % y_size) ); |
---|
1721 | |
---|
1722 | // If the cluster selected from VPN's LSBs is empty, we select one randomly |
---|
1723 | if ( cluster_is_active( page_cxy ) == false ) |
---|
1724 | { |
---|
1725 | page_cxy = cluster_random_select(); |
---|
1726 | } |
---|
1727 | } |
---|
1728 | else // other cases => cxy specified in vseg |
---|
1729 | { |
---|
1730 | page_cxy = vseg->cxy; |
---|
1731 | } |
---|
1732 | |
---|
1733 | // allocate a physical page from target cluster |
---|
1734 | if( page_cxy == local_cxy ) // target cluster is the local cluster |
---|
1735 | { |
---|
1736 | req.type = KMEM_PAGE; |
---|
1737 | req.size = 0; |
---|
1738 | req.flags = AF_NONE; |
---|
1739 | page_ptr = (page_t *)kmem_alloc( &req ); |
---|
1740 | } |
---|
1741 | else // target cluster is not the local cluster |
---|
1742 | { |
---|
1743 | rpc_pmem_get_pages_client( page_cxy , 0 , &page_ptr ); |
---|
1744 | } |
---|
1745 | |
---|
1746 | #if DEBUG_VMM_ALLOCATE_PAGE |
---|
1747 | cycle = (uint32_t)hal_get_cycles(); |
---|
1748 | if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) |
---|
1749 | printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n", |
---|
1750 | __FUNCTION__ , this->process->pid, this->trdid, vpn, |
---|
1751 | ppm_page2ppn( XPTR( page_cxy , page_ptr ) , cycle ); |
---|
1752 | #endif |
---|
1753 | |
---|
1754 | if( page_ptr == NULL ) return XPTR_NULL; |
---|
1755 | else return XPTR( page_cxy , page_ptr ); |
---|
1756 | |
---|
1757 | } // end vmm_page_allocate() |
---|
1758 | |
---|
1759 | //////////////////////////////////////// |
---|
1760 | error_t vmm_get_one_ppn( vseg_t * vseg, |
---|
1761 | vpn_t vpn, |
---|
1762 | ppn_t * ppn ) |
---|
1763 | { |
---|
1764 | error_t error; |
---|
1765 | xptr_t page_xp; // extended pointer on physical page descriptor |
---|
1766 | uint32_t page_id; // missing page index in vseg mapper |
---|
1767 | uint32_t type; // vseg type; |
---|
1768 | |
---|
1769 | type = vseg->type; |
---|
1770 | page_id = vpn - vseg->vpn_base; |
---|
1771 | |
---|
1772 | #if DEBUG_VMM_GET_ONE_PPN |
---|
1773 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1774 | thread_t * this = CURRENT_THREAD; |
---|
1775 | if( DEBUG_VMM_GET_ONE_PPN < cycle ) |
---|
1776 | printk("\n[%s] thread[%x,%x] enter for vpn %x / type %s / page_id %d / cycle %d\n", |
---|
1777 | __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle ); |
---|
1778 | #endif |
---|
1779 | |
---|
1780 | // FILE type : get the physical page from the file mapper |
---|
1781 | if( type == VSEG_TYPE_FILE ) |
---|
1782 | { |
---|
1783 | // get extended pointer on mapper |
---|
1784 | xptr_t mapper_xp = vseg->mapper_xp; |
---|
1785 | |
---|
1786 | assert( (mapper_xp != XPTR_NULL), |
---|
1787 | "mapper not defined for a FILE vseg\n" ); |
---|
1788 | |
---|
1789 | // get extended pointer on page descriptor |
---|
1790 | page_xp = mapper_remote_get_page( mapper_xp , page_id ); |
---|
1791 | |
---|
1792 | if ( page_xp == XPTR_NULL ) return EINVAL; |
---|
1793 | } |
---|
1794 | |
---|
1795 | // Other types : allocate a physical page from target cluster, |
---|
1796 | // as defined by vseg type and vpn value |
---|
1797 | else |
---|
1798 | { |
---|
1799 | // allocate one physical page |
---|
1800 | page_xp = vmm_page_allocate( vseg , vpn ); |
---|
1801 | |
---|
1802 | if( page_xp == XPTR_NULL ) return ENOMEM; |
---|
1803 | |
---|
1804 | // initialise missing page from .elf file mapper for DATA and CODE types |
---|
1805 | // the vseg->mapper_xp field is an extended pointer on the .elf file mapper |
---|
1806 | if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) ) |
---|
1807 | { |
---|
1808 | // get extended pointer on mapper |
---|
1809 | xptr_t mapper_xp = vseg->mapper_xp; |
---|
1810 | |
---|
1811 | assert( (mapper_xp != XPTR_NULL), |
---|
1812 | "mapper not defined for a CODE or DATA vseg\n" ); |
---|
1813 | |
---|
1814 | // compute missing page offset in vseg |
---|
1815 | uint32_t offset = page_id << CONFIG_PPM_PAGE_SHIFT; |
---|
1816 | |
---|
1817 | // compute missing page offset in .elf file |
---|
1818 | uint32_t elf_offset = vseg->file_offset + offset; |
---|
1819 | |
---|
1820 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
1821 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
1822 | printk("\n[%s] thread[%x,%x] for vpn = %x / elf_offset = %x\n", |
---|
1823 | __FUNCTION__, this->process->pid, this->trdid, vpn, elf_offset ); |
---|
1824 | #endif |
---|
1825 | // compute extended pointer on page base |
---|
1826 | xptr_t base_xp = ppm_page2base( page_xp ); |
---|
1827 | |
---|
1828 | // file_size (in .elf mapper) can be smaller than vseg_size (BSS) |
---|
1829 | uint32_t file_size = vseg->file_size; |
---|
1830 | |
---|
1831 | if( file_size < offset ) // missing page fully in BSS |
---|
1832 | { |
---|
1833 | |
---|
1834 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
1835 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
1836 | printk("\n[%s] thread[%x,%x] for vpn %x / fully in BSS\n", |
---|
1837 | __FUNCTION__, this->process->pid, this->trdid, vpn ); |
---|
1838 | #endif |
---|
1839 | if( GET_CXY( page_xp ) == local_cxy ) |
---|
1840 | { |
---|
1841 | memset( GET_PTR( base_xp ) , 0 , CONFIG_PPM_PAGE_SIZE ); |
---|
1842 | } |
---|
1843 | else |
---|
1844 | { |
---|
1845 | hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE ); |
---|
1846 | } |
---|
1847 | } |
---|
1848 | else if( file_size >= (offset + CONFIG_PPM_PAGE_SIZE) ) // fully in mapper |
---|
1849 | { |
---|
1850 | |
---|
1851 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
1852 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
1853 | printk("\n[%s] thread[%x,%x] for vpn %x / fully in mapper\n", |
---|
1854 | __FUNCTION__, this->process->pid, this->trdid, vpn ); |
---|
1855 | #endif |
---|
1856 | error = mapper_move_kernel( mapper_xp, |
---|
1857 | true, // to_buffer |
---|
1858 | elf_offset, |
---|
1859 | base_xp, |
---|
1860 | CONFIG_PPM_PAGE_SIZE ); |
---|
1861 | if( error ) return EINVAL; |
---|
1862 | } |
---|
1863 | else // both in mapper and in BSS : |
---|
1864 | // - (file_size - offset) bytes from mapper |
---|
1865 | // - (page_size + offset - file_size) bytes from BSS |
---|
1866 | { |
---|
1867 | |
---|
1868 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
1869 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
1870 | printk("\n[%s] thread[%x,%x] for vpn %x / both mapper & BSS\n" |
---|
1871 | " %d bytes from mapper / %d bytes from BSS\n", |
---|
1872 | __FUNCTION__, this->process->pid, this->trdid, vpn, |
---|
1873 | file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size ); |
---|
1874 | #endif |
---|
1875 | // initialize mapper part |
---|
1876 | error = mapper_move_kernel( mapper_xp, |
---|
1877 | true, // to buffer |
---|
1878 | elf_offset, |
---|
1879 | base_xp, |
---|
1880 | file_size - offset ); |
---|
1881 | if( error ) return EINVAL; |
---|
1882 | |
---|
1883 | // initialize BSS part |
---|
1884 | if( GET_CXY( page_xp ) == local_cxy ) |
---|
1885 | { |
---|
1886 | memset( GET_PTR( base_xp ) + file_size - offset , 0 , |
---|
1887 | offset + CONFIG_PPM_PAGE_SIZE - file_size ); |
---|
1888 | } |
---|
1889 | else |
---|
1890 | { |
---|
1891 | hal_remote_memset( base_xp + file_size - offset , 0 , |
---|
1892 | offset + CONFIG_PPM_PAGE_SIZE - file_size ); |
---|
1893 | } |
---|
1894 | } |
---|
1895 | } // end initialisation for CODE or DATA types |
---|
1896 | } |
---|
1897 | |
---|
1898 | // return ppn |
---|
1899 | *ppn = ppm_page2ppn( page_xp ); |
---|
1900 | |
---|
1901 | #if DEBUG_VMM_GET_ONE_PPN |
---|
1902 | cycle = (uint32_t)hal_get_cycles(); |
---|
1903 | if( DEBUG_VMM_GET_ONE_PPN < cycle ) |
---|
1904 | printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle\n", |
---|
1905 | __FUNCTION__ , this->process->pid, this->trdid , vpn , *ppn, cycle ); |
---|
1906 | #endif |
---|
1907 | |
---|
1908 | return 0; |
---|
1909 | |
---|
1910 | } // end vmm_get_one_ppn() |
---|
1911 | |
---|
1912 | /////////////////////////////////////////////////// |
---|
1913 | error_t vmm_handle_page_fault( process_t * process, |
---|
1914 | vpn_t vpn ) |
---|
1915 | { |
---|
1916 | vseg_t * vseg; // vseg containing vpn |
---|
1917 | uint32_t new_attr; // new PTE_ATTR value |
---|
1918 | ppn_t new_ppn; // new PTE_PPN value |
---|
1919 | uint32_t ref_attr; // PTE_ATTR value in reference GPT |
---|
1920 | ppn_t ref_ppn; // PTE_PPN value in reference GPT |
---|
1921 | cxy_t ref_cxy; // reference cluster for missing vpn |
---|
1922 | process_t * ref_ptr; // reference process for missing vpn |
---|
1923 | xptr_t local_gpt_xp; // extended pointer on local GPT |
---|
1924 | xptr_t local_lock_xp; // extended pointer on local GPT lock |
---|
1925 | xptr_t ref_gpt_xp; // extended pointer on reference GPT |
---|
1926 | xptr_t ref_lock_xp; // extended pointer on reference GPT lock |
---|
1927 | error_t error; // value returned by called functions |
---|
1928 | |
---|
1929 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
1930 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1931 | thread_t * this = CURRENT_THREAD; |
---|
1932 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
1933 | printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", |
---|
1934 | __FUNCTION__, this->process->pid, this->trdid, vpn, cycle ); |
---|
1935 | hal_vmm_display( process , true ); |
---|
1936 | #endif |
---|
1937 | |
---|
1938 | // get local vseg (access to reference VSL can be required) |
---|
1939 | error = vmm_get_vseg( process, |
---|
1940 | (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT, |
---|
1941 | &vseg ); |
---|
1942 | if( error ) |
---|
1943 | { |
---|
1944 | printk("\n[ERROR] in %s : vpn %x in process %x not in registered vseg / cycle %d\n", |
---|
1945 | __FUNCTION__ , vpn , process->pid, (uint32_t)hal_get_cycles() ); |
---|
1946 | |
---|
1947 | return EXCP_USER_ERROR; |
---|
1948 | } |
---|
1949 | |
---|
1950 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
1951 | cycle = (uint32_t)hal_get_cycles(); |
---|
1952 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
1953 | printk("\n[%s] threadr[%x,%x] found vseg %s / cycle %d\n", |
---|
1954 | __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type), cycle ); |
---|
1955 | #endif |
---|
1956 | |
---|
1957 | //////////////// private vseg => access only the local GPT |
---|
1958 | if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) |
---|
1959 | { |
---|
1960 | // build extended pointer on local GPT and local GPT lock |
---|
1961 | local_gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); |
---|
1962 | local_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); |
---|
1963 | |
---|
1964 | // take local GPT lock in write mode |
---|
1965 | remote_rwlock_wr_acquire( local_lock_xp ); |
---|
1966 | |
---|
1967 | // check VPN still unmapped in local GPT |
---|
1968 | |
---|
1969 | // do nothing if VPN has been mapped by a a concurrent page_fault |
---|
1970 | hal_gpt_get_pte( local_gpt_xp, |
---|
1971 | vpn, |
---|
1972 | &new_attr, |
---|
1973 | &new_ppn ); |
---|
1974 | |
---|
1975 | if( (new_attr & GPT_MAPPED) == 0 ) // VPN still unmapped |
---|
1976 | { |
---|
1977 | // allocate and initialise a physical page depending on the vseg type |
---|
1978 | error = vmm_get_one_ppn( vseg , vpn , &new_ppn ); |
---|
1979 | |
---|
1980 | if( error ) |
---|
1981 | { |
---|
1982 | printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", |
---|
1983 | __FUNCTION__ , process->pid , vpn ); |
---|
1984 | |
---|
1985 | // release local GPT lock in write mode |
---|
1986 | remote_rwlock_wr_release( local_lock_xp ); |
---|
1987 | |
---|
1988 | return EXCP_KERNEL_PANIC; |
---|
1989 | } |
---|
1990 | |
---|
1991 | // define new_attr from vseg flags |
---|
1992 | new_attr = GPT_MAPPED | GPT_SMALL; |
---|
1993 | if( vseg->flags & VSEG_USER ) new_attr |= GPT_USER; |
---|
1994 | if( vseg->flags & VSEG_WRITE ) new_attr |= GPT_WRITABLE; |
---|
1995 | if( vseg->flags & VSEG_EXEC ) new_attr |= GPT_EXECUTABLE; |
---|
1996 | if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE; |
---|
1997 | |
---|
1998 | // set PTE (PPN & attribute) to local GPT |
---|
1999 | error = hal_gpt_set_pte( local_gpt_xp, |
---|
2000 | vpn, |
---|
2001 | new_attr, |
---|
2002 | new_ppn ); |
---|
2003 | if ( error ) |
---|
2004 | { |
---|
2005 | printk("\n[ERROR] in %s : cannot update local GPT / process %x / vpn = %x\n", |
---|
2006 | __FUNCTION__ , process->pid , vpn ); |
---|
2007 | |
---|
2008 | // release local GPT lock in write mode |
---|
2009 | remote_rwlock_wr_release( local_lock_xp ); |
---|
2010 | |
---|
2011 | return EXCP_KERNEL_PANIC; |
---|
2012 | } |
---|
2013 | } |
---|
2014 | |
---|
2015 | // release local GPT lock in write mode |
---|
2016 | remote_rwlock_wr_release( local_lock_xp ); |
---|
2017 | |
---|
2018 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
2019 | cycle = (uint32_t)hal_get_cycles(); |
---|
2020 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
2021 | printk("\n[%s] private page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", |
---|
2022 | __FUNCTION__, vpn, new_ppn, new_attr, cycle ); |
---|
2023 | #endif |
---|
2024 | return EXCP_NON_FATAL; |
---|
2025 | |
---|
2026 | } // end local GPT access |
---|
2027 | |
---|
2028 | //////////// public vseg => access reference GPT |
---|
2029 | else |
---|
2030 | { |
---|
2031 | // get reference process cluster and local pointer |
---|
2032 | ref_cxy = GET_CXY( process->ref_xp ); |
---|
2033 | ref_ptr = GET_PTR( process->ref_xp ); |
---|
2034 | |
---|
2035 | // build extended pointer on reference GPT and reference GPT lock |
---|
2036 | ref_gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt ); |
---|
2037 | ref_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt_lock ); |
---|
2038 | |
---|
2039 | // build extended pointer on local GPT and local GPT lock |
---|
2040 | local_gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); |
---|
2041 | local_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); |
---|
2042 | |
---|
2043 | // take reference GPT lock in read mode |
---|
2044 | remote_rwlock_rd_acquire( ref_lock_xp ); |
---|
2045 | |
---|
2046 | // get directly PPN & attributes from reference GPT |
---|
2047 | // this can avoids a costly RPC for a false page fault |
---|
2048 | hal_gpt_get_pte( ref_gpt_xp, |
---|
2049 | vpn, |
---|
2050 | &ref_attr, |
---|
2051 | &ref_ppn ); |
---|
2052 | |
---|
2053 | // release reference GPT lock in read mode |
---|
2054 | remote_rwlock_rd_release( ref_lock_xp ); |
---|
2055 | |
---|
2056 | if( ref_attr & GPT_MAPPED ) // false page fault => update local GPT |
---|
2057 | { |
---|
2058 | // take local GPT lock in write mode |
---|
2059 | remote_rwlock_wr_acquire( local_lock_xp ); |
---|
2060 | |
---|
2061 | // check VPN still unmapped in local GPT |
---|
2062 | hal_gpt_get_pte( local_gpt_xp, |
---|
2063 | vpn, |
---|
2064 | &new_attr, |
---|
2065 | &new_ppn ); |
---|
2066 | |
---|
2067 | if( (new_attr & GPT_MAPPED) == 0 ) // VPN still unmapped |
---|
2068 | { |
---|
2069 | // update local GPT from reference GPT |
---|
2070 | error = hal_gpt_set_pte( local_gpt_xp, |
---|
2071 | vpn, |
---|
2072 | ref_attr, |
---|
2073 | ref_ppn ); |
---|
2074 | if( error ) |
---|
2075 | { |
---|
2076 | printk("\n[ERROR] in %s : cannot update local GPT / process %x / vpn %x\n", |
---|
2077 | __FUNCTION__ , process->pid , vpn ); |
---|
2078 | |
---|
2079 | // release local GPT lock in write mode |
---|
2080 | remote_rwlock_wr_release( local_lock_xp ); |
---|
2081 | |
---|
2082 | return EXCP_KERNEL_PANIC; |
---|
2083 | } |
---|
2084 | } |
---|
2085 | else // VPN has been mapped by a a concurrent page_fault |
---|
2086 | { |
---|
2087 | // keep PTE from local GPT |
---|
2088 | ref_attr = new_attr; |
---|
2089 | ref_ppn = new_ppn; |
---|
2090 | } |
---|
2091 | |
---|
2092 | // release local GPT lock in write mode |
---|
2093 | remote_rwlock_wr_release( local_lock_xp ); |
---|
2094 | |
---|
2095 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
2096 | cycle = (uint32_t)hal_get_cycles(); |
---|
2097 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
2098 | printk("\n[%s] false page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", |
---|
2099 | __FUNCTION__, vpn, ref_ppn, ref_attr, cycle ); |
---|
2100 | #endif |
---|
2101 | return EXCP_NON_FATAL; |
---|
2102 | } |
---|
2103 | else // true page fault => update reference GPT |
---|
2104 | { |
---|
2105 | // take reference GPT lock in write mode |
---|
2106 | remote_rwlock_wr_acquire( ref_lock_xp ); |
---|
2107 | |
---|
2108 | // check VPN still unmapped in reference GPT |
---|
2109 | // do nothing if VPN has been mapped by a a concurrent page_fault |
---|
2110 | hal_gpt_get_pte( ref_gpt_xp, |
---|
2111 | vpn, |
---|
2112 | &ref_attr, |
---|
2113 | &ref_ppn ); |
---|
2114 | |
---|
2115 | if( (ref_attr & GPT_MAPPED) == 0 ) // VPN actually unmapped |
---|
2116 | { |
---|
2117 | // allocate and initialise a physical page depending on the vseg type |
---|
2118 | error = vmm_get_one_ppn( vseg , vpn , &new_ppn ); |
---|
2119 | |
---|
2120 | if( error ) |
---|
2121 | { |
---|
2122 | printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", |
---|
2123 | __FUNCTION__ , process->pid , vpn ); |
---|
2124 | |
---|
2125 | // release reference GPT lock in write mode |
---|
2126 | remote_rwlock_wr_release( ref_lock_xp ); |
---|
2127 | |
---|
2128 | return EXCP_KERNEL_PANIC; |
---|
2129 | } |
---|
2130 | |
---|
2131 | // define new_attr from vseg flags |
---|
2132 | new_attr = GPT_MAPPED | GPT_SMALL; |
---|
2133 | if( vseg->flags & VSEG_USER ) new_attr |= GPT_USER; |
---|
2134 | if( vseg->flags & VSEG_WRITE ) new_attr |= GPT_WRITABLE; |
---|
2135 | if( vseg->flags & VSEG_EXEC ) new_attr |= GPT_EXECUTABLE; |
---|
2136 | if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE; |
---|
2137 | |
---|
2138 | // update reference GPT |
---|
2139 | error = hal_gpt_set_pte( ref_gpt_xp, |
---|
2140 | vpn, |
---|
2141 | new_attr, |
---|
2142 | new_ppn ); |
---|
2143 | |
---|
2144 | // update local GPT (protected by reference GPT lock) |
---|
2145 | error |= hal_gpt_set_pte( local_gpt_xp, |
---|
2146 | vpn, |
---|
2147 | new_attr, |
---|
2148 | new_ppn ); |
---|
2149 | |
---|
2150 | if( error ) |
---|
2151 | { |
---|
2152 | printk("\n[ERROR] in %s : cannot update GPT / process %x / vpn = %x\n", |
---|
2153 | __FUNCTION__ , process->pid , vpn ); |
---|
2154 | |
---|
2155 | // release reference GPT lock in write mode |
---|
2156 | remote_rwlock_wr_release( ref_lock_xp ); |
---|
2157 | |
---|
2158 | return EXCP_KERNEL_PANIC; |
---|
2159 | } |
---|
2160 | } |
---|
2161 | |
---|
2162 | // release reference GPT lock in write mode |
---|
2163 | remote_rwlock_wr_release( ref_lock_xp ); |
---|
2164 | |
---|
2165 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
2166 | cycle = (uint32_t)hal_get_cycles(); |
---|
2167 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
2168 | printk("\n[%s] true page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", |
---|
2169 | __FUNCTION__, vpn, new_ppn, new_attr, cycle ); |
---|
2170 | #endif |
---|
2171 | return EXCP_NON_FATAL; |
---|
2172 | } |
---|
2173 | } |
---|
2174 | } // end vmm_handle_page_fault() |
---|
2175 | |
---|
2176 | //////////////////////////////////////////// |
---|
2177 | error_t vmm_handle_cow( process_t * process, |
---|
2178 | vpn_t vpn ) |
---|
2179 | { |
---|
2180 | vseg_t * vseg; // vseg containing vpn |
---|
2181 | cxy_t ref_cxy; // reference cluster for missing vpn |
---|
2182 | process_t * ref_ptr; // reference process for missing vpn |
---|
2183 | xptr_t gpt_xp; // extended pointer on GPT |
---|
2184 | xptr_t gpt_lock_xp; // extended pointer on GPT lock |
---|
2185 | uint32_t old_attr; // current PTE_ATTR value |
---|
2186 | ppn_t old_ppn; // current PTE_PPN value |
---|
2187 | uint32_t new_attr; // new PTE_ATTR value |
---|
2188 | ppn_t new_ppn; // new PTE_PPN value |
---|
2189 | error_t error; |
---|
2190 | |
---|
2191 | thread_t * this = CURRENT_THREAD; |
---|
2192 | |
---|
2193 | #if DEBUG_VMM_HANDLE_COW |
---|
2194 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
2195 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
2196 | printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n", |
---|
2197 | __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); |
---|
2198 | hal_vmm_display( process , true ); |
---|
2199 | #endif |
---|
2200 | |
---|
2201 | // access local GPT to get GPT_COW flag |
---|
2202 | bool_t cow = hal_gpt_pte_is_cow( &(process->vmm.gpt), vpn ); |
---|
2203 | |
---|
2204 | if( cow == false ) return EXCP_USER_ERROR; |
---|
2205 | |
---|
2206 | // get local vseg |
---|
2207 | error = vmm_get_vseg( process, |
---|
2208 | (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT, |
---|
2209 | &vseg ); |
---|
2210 | if( error ) |
---|
2211 | { |
---|
2212 | printk("\n[PANIC] in %s vpn %x in thread[%x,%x] not in a registered vseg\n", |
---|
2213 | __FUNCTION__, vpn, process->pid, this->trdid ); |
---|
2214 | |
---|
2215 | return EXCP_KERNEL_PANIC; |
---|
2216 | } |
---|
2217 | |
---|
2218 | #if( DEBUG_VMM_HANDLE_COW & 1) |
---|
2219 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
2220 | printk("\n[%s] thread[%x,%x] get vseg for vpn %x\n", |
---|
2221 | __FUNCTION__, this->process->pid, this->trdid, vpn ); |
---|
2222 | #endif |
---|
2223 | |
---|
2224 | // get reference GPT cluster and local pointer |
---|
2225 | ref_cxy = GET_CXY( process->ref_xp ); |
---|
2226 | ref_ptr = GET_PTR( process->ref_xp ); |
---|
2227 | |
---|
2228 | // build relevant extended pointers on relevant GPT and GPT lock |
---|
2229 | // - access local GPT for a private vseg |
---|
2230 | // - access reference GPT for a public vseg |
---|
2231 | if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) |
---|
2232 | { |
---|
2233 | gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); |
---|
2234 | gpt_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); |
---|
2235 | } |
---|
2236 | else |
---|
2237 | { |
---|
2238 | gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt ); |
---|
2239 | gpt_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt_lock ); |
---|
2240 | } |
---|
2241 | |
---|
2242 | // take GPT lock in write mode |
---|
2243 | remote_rwlock_wr_acquire( gpt_lock_xp ); |
---|
2244 | |
---|
2245 | // get current PTE from reference GPT |
---|
2246 | hal_gpt_get_pte( gpt_xp, |
---|
2247 | vpn, |
---|
2248 | &old_attr, |
---|
2249 | &old_ppn ); |
---|
2250 | |
---|
2251 | #if( DEBUG_VMM_HANDLE_COW & 1) |
---|
2252 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
2253 | printk("\n[%s] thread[%x,%x] get pte for vpn %x : ppn %x / attr %x\n", |
---|
2254 | __FUNCTION__, this->process->pid, this->trdid, vpn, old_ppn, old_attr ); |
---|
2255 | #endif |
---|
2256 | |
---|
2257 | // the PTE must be mapped for a COW |
---|
2258 | if( (old_attr & GPT_MAPPED) == 0 ) |
---|
2259 | { |
---|
2260 | printk("\n[PANIC] in %s : VPN %x in process %x unmapped\n", |
---|
2261 | __FUNCTION__, vpn, process->pid ); |
---|
2262 | |
---|
2263 | // release GPT lock in write mode |
---|
2264 | remote_rwlock_wr_release( gpt_lock_xp ); |
---|
2265 | |
---|
2266 | return EXCP_KERNEL_PANIC; |
---|
2267 | } |
---|
2268 | |
---|
2269 | // get pointers on physical page descriptor |
---|
2270 | xptr_t page_xp = ppm_ppn2page( old_ppn ); |
---|
2271 | cxy_t page_cxy = GET_CXY( page_xp ); |
---|
2272 | page_t * page_ptr = GET_PTR( page_xp ); |
---|
2273 | |
---|
2274 | // get extended pointers on forks and lock field in page descriptor |
---|
2275 | xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
2276 | xptr_t forks_lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
2277 | |
---|
2278 | // take lock protecting "forks" counter |
---|
2279 | remote_busylock_acquire( forks_lock_xp ); |
---|
2280 | |
---|
2281 | // get number of pending forks from page descriptor |
---|
2282 | uint32_t forks = hal_remote_l32( forks_xp ); |
---|
2283 | |
---|
2284 | #if( DEBUG_VMM_HANDLE_COW & 1) |
---|
2285 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
2286 | printk("\n[%s] thread[%x,%x] get forks = %d for vpn %x\n", |
---|
2287 | __FUNCTION__, this->process->pid, this->trdid, forks, vpn ); |
---|
2288 | #endif |
---|
2289 | |
---|
2290 | if( forks ) // pending fork => allocate a new page, and copy old to new |
---|
2291 | { |
---|
2292 | // decrement pending forks counter in page descriptor |
---|
2293 | hal_remote_atomic_add( forks_xp , -1 ); |
---|
2294 | |
---|
2295 | // release lock protecting "forks" counter |
---|
2296 | remote_busylock_release( forks_lock_xp ); |
---|
2297 | |
---|
2298 | // allocate a new page |
---|
2299 | page_xp = vmm_page_allocate( vseg , vpn ); |
---|
2300 | |
---|
2301 | if( page_xp == XPTR_NULL ) |
---|
2302 | { |
---|
2303 | printk("\n[PANIC] in %s : no memory for vpn %x in process %x\n", |
---|
2304 | __FUNCTION__ , vpn, process->pid ); |
---|
2305 | |
---|
2306 | // release GPT lock in write mode |
---|
2307 | remote_rwlock_wr_acquire( gpt_lock_xp ); |
---|
2308 | |
---|
2309 | return EXCP_KERNEL_PANIC; |
---|
2310 | } |
---|
2311 | |
---|
2312 | // compute allocated page PPN |
---|
2313 | new_ppn = ppm_page2ppn( page_xp ); |
---|
2314 | |
---|
2315 | #if( DEBUG_VMM_HANDLE_COW & 1) |
---|
2316 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
2317 | printk("\n[%s] thread[%x,%x] get new ppn %x for vpn %x\n", |
---|
2318 | __FUNCTION__, this->process->pid, this->trdid, new_ppn, vpn ); |
---|
2319 | #endif |
---|
2320 | |
---|
2321 | // copy old page content to new page |
---|
2322 | hal_remote_memcpy( ppm_ppn2base( new_ppn ), |
---|
2323 | ppm_ppn2base( old_ppn ), |
---|
2324 | CONFIG_PPM_PAGE_SIZE ); |
---|
2325 | |
---|
2326 | #if(DEBUG_VMM_HANDLE_COW & 1) |
---|
2327 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
2328 | printk("\n[%s] thread[%x,%x] copied old page to new page\n", |
---|
2329 | __FUNCTION__, this->process->pid, this->trdid ); |
---|
2330 | #endif |
---|
2331 | |
---|
2332 | } |
---|
2333 | else // no pending fork => keep the existing page |
---|
2334 | { |
---|
2335 | // release lock protecting "forks" counter |
---|
2336 | remote_busylock_release( forks_lock_xp ); |
---|
2337 | |
---|
2338 | #if(DEBUG_VMM_HANDLE_COW & 1) |
---|
2339 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
2340 | printk("\n[%s] thread[%x,%x] no pending forks / keep existing PPN %x\n", |
---|
2341 | __FUNCTION__, this->process->pid, this->trdid, old_ppn ); |
---|
2342 | #endif |
---|
2343 | new_ppn = old_ppn; |
---|
2344 | } |
---|
2345 | |
---|
2346 | // build new_attr : reset COW and set WRITABLE, |
---|
2347 | new_attr = (old_attr | GPT_WRITABLE) & (~GPT_COW); |
---|
2348 | |
---|
2349 | // update the relevant GPT |
---|
2350 | // - private vseg => update local GPT |
---|
2351 | // - public vseg => update all GPT copies |
---|
2352 | if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) |
---|
2353 | { |
---|
2354 | hal_gpt_set_pte( gpt_xp, |
---|
2355 | vpn, |
---|
2356 | new_attr, |
---|
2357 | new_ppn ); |
---|
2358 | } |
---|
2359 | else |
---|
2360 | { |
---|
2361 | if( ref_cxy == local_cxy ) // reference cluster is local |
---|
2362 | { |
---|
2363 | vmm_global_update_pte( process, |
---|
2364 | vpn, |
---|
2365 | new_attr, |
---|
2366 | new_ppn ); |
---|
2367 | } |
---|
2368 | else // reference cluster is remote |
---|
2369 | { |
---|
2370 | rpc_vmm_global_update_pte_client( ref_cxy, |
---|
2371 | ref_ptr, |
---|
2372 | vpn, |
---|
2373 | new_attr, |
---|
2374 | new_ppn ); |
---|
2375 | } |
---|
2376 | } |
---|
2377 | |
---|
2378 | // release GPT lock in write mode |
---|
2379 | remote_rwlock_wr_release( gpt_lock_xp ); |
---|
2380 | |
---|
2381 | #if DEBUG_VMM_HANDLE_COW |
---|
2382 | cycle = (uint32_t)hal_get_cycles(); |
---|
2383 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
2384 | printk("\n[%s] thread[%x,%x] exit for vpn %x / core[%x,%d] / cycle %d\n", |
---|
2385 | __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); |
---|
2386 | #endif |
---|
2387 | |
---|
2388 | return EXCP_NON_FATAL; |
---|
2389 | |
---|
2390 | } // end vmm_handle_cow() |
---|
2391 | |
---|