1 | /* |
---|
2 | * vmm.c - virtual memory manager related operations definition. |
---|
3 | * |
---|
4 | * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) |
---|
5 | * Mohamed Lamine Karaoui (2015) |
---|
6 | * Alain Greiner (2016,2017,2018) |
---|
7 | * |
---|
8 | * Copyright (c) UPMC Sorbonne Universites |
---|
9 | * |
---|
10 | * This file is part of ALMOS-MKH. |
---|
11 | * |
---|
12 | * ALMOS-MKH is free software; you can redistribute it and/or modify it |
---|
13 | * under the terms of the GNU General Public License as published by |
---|
14 | * the Free Software Foundation; version 2.0 of the License. |
---|
15 | * |
---|
16 | * ALMOS-MKH is distributed in the hope that it will be useful, but |
---|
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
19 | * General Public License for more details. |
---|
20 | * |
---|
21 | * You should have received a copy of the GNU General Public License |
---|
22 | * along with ALMOS-MKH; if not, write to the Free Software Foundation, |
---|
23 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
24 | */ |
---|
25 | |
---|
26 | #include <kernel_config.h> |
---|
27 | #include <hal_kernel_types.h> |
---|
28 | #include <hal_special.h> |
---|
29 | #include <hal_gpt.h> |
---|
30 | #include <hal_vmm.h> |
---|
31 | #include <hal_macros.h> |
---|
32 | #include <printk.h> |
---|
33 | #include <memcpy.h> |
---|
34 | #include <remote_rwlock.h> |
---|
35 | #include <remote_queuelock.h> |
---|
36 | #include <list.h> |
---|
37 | #include <xlist.h> |
---|
38 | #include <bits.h> |
---|
39 | #include <process.h> |
---|
40 | #include <thread.h> |
---|
41 | #include <vseg.h> |
---|
42 | #include <cluster.h> |
---|
43 | #include <scheduler.h> |
---|
44 | #include <vfs.h> |
---|
45 | #include <mapper.h> |
---|
46 | #include <page.h> |
---|
47 | #include <kmem.h> |
---|
48 | #include <vmm.h> |
---|
49 | #include <hal_exception.h> |
---|
50 | |
---|
51 | ////////////////////////////////////////////////////////////////////////////////// |
---|
52 | // Extern global variables |
---|
53 | ////////////////////////////////////////////////////////////////////////////////// |
---|
54 | |
---|
55 | extern process_t process_zero; // allocated in cluster.c |
---|
56 | |
---|
57 | /////////////////////////////////////// |
---|
58 | error_t vmm_init( process_t * process ) |
---|
59 | { |
---|
60 | error_t error; |
---|
61 | vseg_t * vseg_kentry; |
---|
62 | vseg_t * vseg_args; |
---|
63 | vseg_t * vseg_envs; |
---|
64 | intptr_t base; |
---|
65 | intptr_t size; |
---|
66 | |
---|
67 | #if DEBUG_VMM_INIT |
---|
68 | thread_t * this = CURRENT_THREAD; |
---|
69 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
70 | if( DEBUG_VMM_INIT ) |
---|
71 | printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n", |
---|
72 | __FUNCTION__ , this->process->pid, this->trdid, process->pid , cycle ); |
---|
73 | #endif |
---|
74 | |
---|
75 | // get pointer on VMM |
---|
76 | vmm_t * vmm = &process->vmm; |
---|
77 | |
---|
78 | // initialize local list of vsegs |
---|
79 | vmm->vsegs_nr = 0; |
---|
80 | xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) ); |
---|
81 | remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) , LOCK_VMM_VSL ); |
---|
82 | |
---|
83 | assert( ((CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) |
---|
84 | <= CONFIG_VMM_ELF_BASE) , "UTILS zone too small\n" ); |
---|
85 | |
---|
86 | assert( (CONFIG_THREADS_MAX_PER_CLUSTER <= 32) , |
---|
87 | "no more than 32 threads per cluster for a single process\n"); |
---|
88 | |
---|
89 | assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <= |
---|
90 | (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , |
---|
91 | "STACK zone too small\n"); |
---|
92 | |
---|
93 | // register kentry vseg in VSL |
---|
94 | base = CONFIG_VMM_KENTRY_BASE << CONFIG_PPM_PAGE_SHIFT; |
---|
95 | size = CONFIG_VMM_KENTRY_SIZE << CONFIG_PPM_PAGE_SHIFT; |
---|
96 | |
---|
97 | vseg_kentry = vmm_create_vseg( process, |
---|
98 | VSEG_TYPE_CODE, |
---|
99 | base, |
---|
100 | size, |
---|
101 | 0, // file_offset unused |
---|
102 | 0, // file_size unused |
---|
103 | XPTR_NULL, // mapper_xp unused |
---|
104 | local_cxy ); |
---|
105 | |
---|
106 | if( vseg_kentry == NULL ) |
---|
107 | { |
---|
108 | printk("\n[ERROR] in %s : cannot register kentry vseg\n", __FUNCTION__ ); |
---|
109 | return -1; |
---|
110 | } |
---|
111 | |
---|
112 | vmm->kent_vpn_base = base; |
---|
113 | |
---|
114 | // register args vseg in VSL |
---|
115 | base = (CONFIG_VMM_KENTRY_BASE + |
---|
116 | CONFIG_VMM_KENTRY_SIZE ) << CONFIG_PPM_PAGE_SHIFT; |
---|
117 | size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT; |
---|
118 | |
---|
119 | vseg_args = vmm_create_vseg( process, |
---|
120 | VSEG_TYPE_DATA, |
---|
121 | base, |
---|
122 | size, |
---|
123 | 0, // file_offset unused |
---|
124 | 0, // file_size unused |
---|
125 | XPTR_NULL, // mapper_xp unused |
---|
126 | local_cxy ); |
---|
127 | |
---|
128 | if( vseg_args == NULL ) |
---|
129 | { |
---|
130 | printk("\n[ERROR] in %s : cannot register args vseg\n", __FUNCTION__ ); |
---|
131 | return -1; |
---|
132 | } |
---|
133 | |
---|
134 | vmm->args_vpn_base = base; |
---|
135 | |
---|
136 | // register the envs vseg in VSL |
---|
137 | base = (CONFIG_VMM_KENTRY_BASE + |
---|
138 | CONFIG_VMM_KENTRY_SIZE + |
---|
139 | CONFIG_VMM_ARGS_SIZE ) << CONFIG_PPM_PAGE_SHIFT; |
---|
140 | size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT; |
---|
141 | |
---|
142 | vseg_envs = vmm_create_vseg( process, |
---|
143 | VSEG_TYPE_DATA, |
---|
144 | base, |
---|
145 | size, |
---|
146 | 0, // file_offset unused |
---|
147 | 0, // file_size unused |
---|
148 | XPTR_NULL, // mapper_xp unused |
---|
149 | local_cxy ); |
---|
150 | |
---|
151 | if( vseg_envs == NULL ) |
---|
152 | { |
---|
153 | printk("\n[ERROR] in %s : cannot register envs vseg\n", __FUNCTION__ ); |
---|
154 | return -1; |
---|
155 | } |
---|
156 | |
---|
157 | vmm->envs_vpn_base = base; |
---|
158 | |
---|
159 | // create GPT (empty) |
---|
160 | error = hal_gpt_create( &vmm->gpt ); |
---|
161 | |
---|
162 | if( error ) |
---|
163 | printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ ); |
---|
164 | |
---|
165 | // initialize GPT lock |
---|
166 | remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT ); |
---|
167 | |
---|
168 | // architecture specic GPT initialisation |
---|
169 | // (For TSAR, identity map the kentry_vseg) |
---|
170 | error = hal_vmm_init( vmm ); |
---|
171 | |
---|
172 | if( error ) |
---|
173 | printk("\n[ERROR] in %s : cannot initialize GPT\n", __FUNCTION__ ); |
---|
174 | |
---|
175 | // initialize STACK allocator |
---|
176 | vmm->stack_mgr.bitmap = 0; |
---|
177 | vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE; |
---|
178 | busylock_init( &vmm->stack_mgr.lock , LOCK_VMM_STACK ); |
---|
179 | |
---|
180 | // initialize MMAP allocator |
---|
181 | vmm->mmap_mgr.vpn_base = CONFIG_VMM_HEAP_BASE; |
---|
182 | vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; |
---|
183 | vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; |
---|
184 | busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP ); |
---|
185 | |
---|
186 | uint32_t i; |
---|
187 | for( i = 0 ; i < 32 ; i++ ) list_root_init( &vmm->mmap_mgr.zombi_list[i] ); |
---|
188 | |
---|
189 | // initialize instrumentation counters |
---|
190 | vmm->pgfault_nr = 0; |
---|
191 | |
---|
192 | hal_fence(); |
---|
193 | |
---|
194 | #if DEBUG_VMM_INIT |
---|
195 | cycle = (uint32_t)hal_get_cycles(); |
---|
196 | if( DEBUG_VMM_INIT ) |
---|
197 | printk("\n[%s] thread[%x,%x] exit / process %x / entry_point %x / cycle %d\n", |
---|
198 | __FUNCTION__, this->process->pid, this->trdid, process->pid, process->vmm.entry_point, cycle ); |
---|
199 | #endif |
---|
200 | |
---|
201 | return 0; |
---|
202 | |
---|
203 | } // end vmm_init() |
---|
204 | |
---|
205 | ////////////////////////////////////// |
---|
206 | void vmm_display( process_t * process, |
---|
207 | bool_t mapping ) |
---|
208 | { |
---|
209 | vmm_t * vmm = &process->vmm; |
---|
210 | gpt_t * gpt = &vmm->gpt; |
---|
211 | |
---|
212 | printk("\n***** VSL and GPT(%x) for process %x in cluster %x\n\n", |
---|
213 | process->vmm.gpt.ptr , process->pid , local_cxy ); |
---|
214 | |
---|
215 | // get lock protecting the VSL and the GPT |
---|
216 | remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->vsegs_lock ) ); |
---|
217 | remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->gpt_lock ) ); |
---|
218 | |
---|
219 | // scan the list of vsegs |
---|
220 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
221 | xptr_t iter_xp; |
---|
222 | xptr_t vseg_xp; |
---|
223 | vseg_t * vseg; |
---|
224 | XLIST_FOREACH( root_xp , iter_xp ) |
---|
225 | { |
---|
226 | vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
227 | vseg = GET_PTR( vseg_xp ); |
---|
228 | |
---|
229 | printk(" - %s : base = %X / size = %X / npages = %d\n", |
---|
230 | vseg_type_str( vseg->type ) , vseg->min , vseg->max - vseg->min , vseg->vpn_size ); |
---|
231 | |
---|
232 | if( mapping ) |
---|
233 | { |
---|
234 | vpn_t vpn; |
---|
235 | ppn_t ppn; |
---|
236 | uint32_t attr; |
---|
237 | vpn_t base = vseg->vpn_base; |
---|
238 | vpn_t size = vseg->vpn_size; |
---|
239 | for( vpn = base ; vpn < (base+size) ; vpn++ ) |
---|
240 | { |
---|
241 | hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn ); |
---|
242 | if( attr & GPT_MAPPED ) |
---|
243 | { |
---|
244 | printk(" . vpn = %X / attr = %X / ppn = %X\n", vpn , attr , ppn ); |
---|
245 | } |
---|
246 | } |
---|
247 | } |
---|
248 | } |
---|
249 | |
---|
250 | // release the locks |
---|
251 | remote_rwlock_rd_release( XPTR( local_cxy , &vmm->vsegs_lock ) ); |
---|
252 | remote_rwlock_rd_release( XPTR( local_cxy , &vmm->gpt_lock ) ); |
---|
253 | |
---|
254 | } // vmm_display() |
---|
255 | |
---|
256 | ////////////////////////////////////////// |
---|
257 | void vmm_attach_vseg_to_vsl( vmm_t * vmm, |
---|
258 | vseg_t * vseg ) |
---|
259 | { |
---|
260 | // build extended pointer on rwlock protecting VSL |
---|
261 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); |
---|
262 | |
---|
263 | // get rwlock in write mode |
---|
264 | remote_rwlock_wr_acquire( lock_xp ); |
---|
265 | |
---|
266 | // update vseg descriptor |
---|
267 | vseg->vmm = vmm; |
---|
268 | |
---|
269 | // add vseg in vmm list |
---|
270 | xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ), |
---|
271 | XPTR( local_cxy , &vseg->xlist ) ); |
---|
272 | |
---|
273 | // release rwlock in write mode |
---|
274 | remote_rwlock_wr_release( lock_xp ); |
---|
275 | } |
---|
276 | |
---|
277 | //////////////////////////////////////////// |
---|
278 | void vmm_detach_vseg_from_vsl( vmm_t * vmm, |
---|
279 | vseg_t * vseg ) |
---|
280 | { |
---|
281 | // get vseg type |
---|
282 | uint32_t type = vseg->type; |
---|
283 | |
---|
284 | // build extended pointer on rwlock protecting VSL |
---|
285 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); |
---|
286 | |
---|
287 | // get rwlock in write mode |
---|
288 | remote_rwlock_wr_acquire( lock_xp ); |
---|
289 | |
---|
290 | // update vseg descriptor |
---|
291 | vseg->vmm = NULL; |
---|
292 | |
---|
293 | // remove vseg from VSL |
---|
294 | xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); |
---|
295 | |
---|
296 | // release rwlock in write mode |
---|
297 | remote_rwlock_wr_release( lock_xp ); |
---|
298 | |
---|
299 | // release the stack slot to VMM stack allocator if STACK type |
---|
300 | if( type == VSEG_TYPE_STACK ) |
---|
301 | { |
---|
302 | // get pointer on stack allocator |
---|
303 | stack_mgr_t * mgr = &vmm->stack_mgr; |
---|
304 | |
---|
305 | // compute slot index |
---|
306 | uint32_t index = ((vseg->vpn_base - mgr->vpn_base - 1) / CONFIG_VMM_STACK_SIZE); |
---|
307 | |
---|
308 | // update stacks_bitmap |
---|
309 | busylock_acquire( &mgr->lock ); |
---|
310 | bitmap_clear( &mgr->bitmap , index ); |
---|
311 | busylock_release( &mgr->lock ); |
---|
312 | } |
---|
313 | |
---|
314 | // release the vseg to VMM mmap allocator if MMAP type |
---|
315 | if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_FILE) || (type == VSEG_TYPE_REMOTE) ) |
---|
316 | { |
---|
317 | // get pointer on mmap allocator |
---|
318 | mmap_mgr_t * mgr = &vmm->mmap_mgr; |
---|
319 | |
---|
320 | // compute zombi_list index |
---|
321 | uint32_t index = bits_log2( vseg->vpn_size ); |
---|
322 | |
---|
323 | // update zombi_list |
---|
324 | busylock_acquire( &mgr->lock ); |
---|
325 | list_add_first( &mgr->zombi_list[index] , &vseg->zlist ); |
---|
326 | busylock_release( &mgr->lock ); |
---|
327 | } |
---|
328 | |
---|
329 | // release physical memory allocated for vseg descriptor if no MMAP type |
---|
330 | if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) ) |
---|
331 | { |
---|
332 | vseg_free( vseg ); |
---|
333 | } |
---|
334 | |
---|
335 | } // end vmm_remove_vseg_from_vsl() |
---|
336 | |
---|
337 | //////////////////////////////////////////////// |
---|
338 | void vmm_global_update_pte( process_t * process, |
---|
339 | vpn_t vpn, |
---|
340 | uint32_t attr, |
---|
341 | ppn_t ppn ) |
---|
342 | { |
---|
343 | xlist_entry_t * process_root_ptr; |
---|
344 | xptr_t process_root_xp; |
---|
345 | xptr_t process_iter_xp; |
---|
346 | |
---|
347 | xptr_t remote_process_xp; |
---|
348 | cxy_t remote_process_cxy; |
---|
349 | process_t * remote_process_ptr; |
---|
350 | xptr_t remote_gpt_xp; |
---|
351 | |
---|
352 | pid_t pid; |
---|
353 | cxy_t owner_cxy; |
---|
354 | lpid_t owner_lpid; |
---|
355 | |
---|
356 | #if DEBUG_VMM_UPDATE_PTE |
---|
357 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
358 | thread_t * this = CURRENT_THREAD; |
---|
359 | if( DEBUG_VMM_UPDATE_PTE < cycle ) |
---|
360 | printk("\n[%s] thread[%x,%x] enter for process %x / vpn %x / cycle %d\n", |
---|
361 | __FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle ); |
---|
362 | #endif |
---|
363 | |
---|
364 | // check cluster is reference |
---|
365 | assert( (GET_CXY( process->ref_xp ) == local_cxy) , "not called in reference cluster\n"); |
---|
366 | |
---|
367 | // get extended pointer on root of process copies xlist in owner cluster |
---|
368 | pid = process->pid; |
---|
369 | owner_cxy = CXY_FROM_PID( pid ); |
---|
370 | owner_lpid = LPID_FROM_PID( pid ); |
---|
371 | process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; |
---|
372 | process_root_xp = XPTR( owner_cxy , process_root_ptr ); |
---|
373 | |
---|
374 | // loop on destination process copies |
---|
375 | XLIST_FOREACH( process_root_xp , process_iter_xp ) |
---|
376 | { |
---|
377 | // get cluster and local pointer on remote process |
---|
378 | remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); |
---|
379 | remote_process_ptr = GET_PTR( remote_process_xp ); |
---|
380 | remote_process_cxy = GET_CXY( remote_process_xp ); |
---|
381 | |
---|
382 | #if (DEBUG_VMM_UPDATE_PTE & 0x1) |
---|
383 | if( DEBUG_VMM_UPDATE_PTE < cycle ) |
---|
384 | printk("\n[%s] threadr[%x,%x] handling vpn %x for process %x in cluster %x\n", |
---|
385 | __FUNCTION__, this->process->pid, this->trdid, vpn, process->pid, remote_process_cxy ); |
---|
386 | #endif |
---|
387 | |
---|
388 | // get extended pointer on remote gpt |
---|
389 | remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt ); |
---|
390 | |
---|
391 | // update remote GPT |
---|
392 | hal_gpt_update_pte( remote_gpt_xp, vpn, attr, ppn ); |
---|
393 | } |
---|
394 | |
---|
395 | #if DEBUG_VMM_UPDATE_PTE |
---|
396 | cycle = (uint32_t)hal_get_cycles(); |
---|
397 | if( DEBUG_VMM_UPDATE_PTE < cycle ) |
---|
398 | printk("\n[%s] thread[%x,%x] exit for process %x / vpn %x / cycle %d\n", |
---|
399 | __FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle ); |
---|
400 | #endif |
---|
401 | |
---|
402 | } // end vmm_global_update_pte() |
---|
403 | |
---|
404 | /////////////////////////////////////// |
---|
405 | void vmm_set_cow( process_t * process ) |
---|
406 | { |
---|
407 | vmm_t * vmm; |
---|
408 | |
---|
409 | xlist_entry_t * process_root_ptr; |
---|
410 | xptr_t process_root_xp; |
---|
411 | xptr_t process_iter_xp; |
---|
412 | |
---|
413 | xptr_t remote_process_xp; |
---|
414 | cxy_t remote_process_cxy; |
---|
415 | process_t * remote_process_ptr; |
---|
416 | xptr_t remote_gpt_xp; |
---|
417 | |
---|
418 | xptr_t vseg_root_xp; |
---|
419 | xptr_t vseg_iter_xp; |
---|
420 | |
---|
421 | xptr_t vseg_xp; |
---|
422 | vseg_t * vseg; |
---|
423 | |
---|
424 | pid_t pid; |
---|
425 | cxy_t owner_cxy; |
---|
426 | lpid_t owner_lpid; |
---|
427 | |
---|
428 | #if DEBUG_VMM_SET_COW |
---|
429 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
430 | thread_t * this = CURRENT_THREAD; |
---|
431 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
432 | printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n", |
---|
433 | __FUNCTION__, this->process->pid, this->trdid, process->pid , cycle ); |
---|
434 | #endif |
---|
435 | |
---|
436 | // check cluster is reference |
---|
437 | assert( (GET_CXY( process->ref_xp ) == local_cxy) , |
---|
438 | "local cluster is not process reference cluster\n"); |
---|
439 | |
---|
440 | // get pointer on reference VMM |
---|
441 | vmm = &process->vmm; |
---|
442 | |
---|
443 | // get extended pointer on root of process copies xlist in owner cluster |
---|
444 | pid = process->pid; |
---|
445 | owner_cxy = CXY_FROM_PID( pid ); |
---|
446 | owner_lpid = LPID_FROM_PID( pid ); |
---|
447 | process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; |
---|
448 | process_root_xp = XPTR( owner_cxy , process_root_ptr ); |
---|
449 | |
---|
450 | // get extended pointer on root of vsegs xlist from reference VMM |
---|
451 | vseg_root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
452 | |
---|
453 | // loop on destination process copies |
---|
454 | XLIST_FOREACH( process_root_xp , process_iter_xp ) |
---|
455 | { |
---|
456 | // get cluster and local pointer on remote process |
---|
457 | remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); |
---|
458 | remote_process_ptr = GET_PTR( remote_process_xp ); |
---|
459 | remote_process_cxy = GET_CXY( remote_process_xp ); |
---|
460 | |
---|
461 | #if (DEBUG_VMM_SET_COW & 1) |
---|
462 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
463 | printk("\n[%s] thread[%x,%x] handling process %x in cluster %x\n", |
---|
464 | __FUNCTION__, this->process->pid, this->trdid, process->pid , remote_process_cxy ); |
---|
465 | #endif |
---|
466 | |
---|
467 | // get extended pointer on remote gpt |
---|
468 | remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt ); |
---|
469 | |
---|
470 | // loop on vsegs in (local) reference process VSL |
---|
471 | XLIST_FOREACH( vseg_root_xp , vseg_iter_xp ) |
---|
472 | { |
---|
473 | // get pointer on vseg |
---|
474 | vseg_xp = XLIST_ELEMENT( vseg_iter_xp , vseg_t , xlist ); |
---|
475 | vseg = GET_PTR( vseg_xp ); |
---|
476 | |
---|
477 | assert( (GET_CXY( vseg_xp ) == local_cxy) , |
---|
478 | "all vsegs in reference VSL must be local\n" ); |
---|
479 | |
---|
480 | // get vseg type, base and size |
---|
481 | uint32_t type = vseg->type; |
---|
482 | vpn_t vpn_base = vseg->vpn_base; |
---|
483 | vpn_t vpn_size = vseg->vpn_size; |
---|
484 | |
---|
485 | #if (DEBUG_VMM_SET_COW & 1) |
---|
486 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
487 | printk("\n[%s] thread[%x,%x] handling vseg %s / vpn_base = %x / vpn_size = %x\n", |
---|
488 | __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size ); |
---|
489 | #endif |
---|
490 | // only DATA, ANON and REMOTE vsegs |
---|
491 | if( (type == VSEG_TYPE_DATA) || |
---|
492 | (type == VSEG_TYPE_ANON) || |
---|
493 | (type == VSEG_TYPE_REMOTE) ) |
---|
494 | { |
---|
495 | vpn_t vpn; |
---|
496 | uint32_t attr; |
---|
497 | ppn_t ppn; |
---|
498 | xptr_t page_xp; |
---|
499 | cxy_t page_cxy; |
---|
500 | page_t * page_ptr; |
---|
501 | xptr_t forks_xp; |
---|
502 | xptr_t lock_xp; |
---|
503 | |
---|
504 | // update flags in remote GPT |
---|
505 | hal_gpt_set_cow( remote_gpt_xp, |
---|
506 | vpn_base, |
---|
507 | vpn_size ); |
---|
508 | |
---|
509 | // atomically increment pending forks counter in physical pages, |
---|
510 | // for all vseg pages that are mapped in reference cluster |
---|
511 | if( remote_process_cxy == local_cxy ) |
---|
512 | { |
---|
513 | // scan all pages in vseg |
---|
514 | for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) |
---|
515 | { |
---|
516 | // get page attributes and PPN from reference GPT |
---|
517 | hal_gpt_get_pte( remote_gpt_xp , vpn , &attr , &ppn ); |
---|
518 | |
---|
519 | // atomically update pending forks counter if page is mapped |
---|
520 | if( attr & GPT_MAPPED ) |
---|
521 | { |
---|
522 | // get pointers and cluster on page descriptor |
---|
523 | page_xp = ppm_ppn2page( ppn ); |
---|
524 | page_cxy = GET_CXY( page_xp ); |
---|
525 | page_ptr = GET_PTR( page_xp ); |
---|
526 | |
---|
527 | // get extended pointers on "forks" and "lock" |
---|
528 | forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
529 | lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
530 | |
---|
531 | // take lock protecting "forks" counter |
---|
532 | remote_busylock_acquire( lock_xp ); |
---|
533 | |
---|
534 | // increment "forks" |
---|
535 | hal_remote_atomic_add( forks_xp , 1 ); |
---|
536 | |
---|
537 | // release lock protecting "forks" counter |
---|
538 | remote_busylock_release( lock_xp ); |
---|
539 | } |
---|
540 | } // end loop on vpn |
---|
541 | } // end if local |
---|
542 | } // end if vseg type |
---|
543 | } // end loop on vsegs |
---|
544 | } // end loop on process copies |
---|
545 | |
---|
546 | #if DEBUG_VMM_SET_COW |
---|
547 | cycle = (uint32_t)hal_get_cycles(); |
---|
548 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
549 | printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", |
---|
550 | __FUNCTION__, this->process->pid, this->trdid, process->pid , cycle ); |
---|
551 | #endif |
---|
552 | |
---|
553 | } // end vmm_set-cow() |
---|
554 | |
---|
555 | ///////////////////////////////////////////////// |
---|
556 | error_t vmm_fork_copy( process_t * child_process, |
---|
557 | xptr_t parent_process_xp ) |
---|
558 | { |
---|
559 | error_t error; |
---|
560 | cxy_t parent_cxy; |
---|
561 | process_t * parent_process; |
---|
562 | vmm_t * parent_vmm; |
---|
563 | xptr_t parent_lock_xp; |
---|
564 | vmm_t * child_vmm; |
---|
565 | xptr_t iter_xp; |
---|
566 | xptr_t parent_vseg_xp; |
---|
567 | vseg_t * parent_vseg; |
---|
568 | vseg_t * child_vseg; |
---|
569 | uint32_t type; |
---|
570 | bool_t cow; |
---|
571 | vpn_t vpn; |
---|
572 | vpn_t vpn_base; |
---|
573 | vpn_t vpn_size; |
---|
574 | xptr_t page_xp; // extended pointer on page descriptor |
---|
575 | page_t * page_ptr; |
---|
576 | cxy_t page_cxy; |
---|
577 | xptr_t forks_xp; // extended pointer on forks counter in page descriptor |
---|
578 | xptr_t lock_xp; // extended pointer on lock protecting the forks counter |
---|
579 | xptr_t parent_root_xp; |
---|
580 | bool_t mapped; |
---|
581 | ppn_t ppn; |
---|
582 | |
---|
583 | #if DEBUG_VMM_FORK_COPY |
---|
584 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
585 | thread_t * this = CURRENT_THREAD; |
---|
586 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
587 | printk("\n[%s] thread %x enter / cycle %d\n", |
---|
588 | __FUNCTION__ , this->process->pid, this->trdid, cycle ); |
---|
589 | #endif |
---|
590 | |
---|
591 | // get parent process cluster and local pointer |
---|
592 | parent_cxy = GET_CXY( parent_process_xp ); |
---|
593 | parent_process = GET_PTR( parent_process_xp ); |
---|
594 | |
---|
595 | // get local pointers on parent and child VMM |
---|
596 | parent_vmm = &parent_process->vmm; |
---|
597 | child_vmm = &child_process->vmm; |
---|
598 | |
---|
599 | // get extended pointer on lock protecting the parent VSL |
---|
600 | parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsegs_lock ); |
---|
601 | |
---|
602 | // initialize the lock protecting the child VSL |
---|
603 | remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsegs_lock ), LOCK_VMM_STACK ); |
---|
604 | |
---|
605 | // initialize the child VSL as empty |
---|
606 | xlist_root_init( XPTR( local_cxy, &child_vmm->vsegs_root ) ); |
---|
607 | child_vmm->vsegs_nr = 0; |
---|
608 | |
---|
609 | // create child GPT |
---|
610 | error = hal_gpt_create( &child_vmm->gpt ); |
---|
611 | |
---|
612 | if( error ) |
---|
613 | { |
---|
614 | printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ ); |
---|
615 | return -1; |
---|
616 | } |
---|
617 | |
---|
618 | // build extended pointer on parent VSL |
---|
619 | parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root ); |
---|
620 | |
---|
621 | // take the lock protecting the parent VSL in read mode |
---|
622 | remote_rwlock_rd_acquire( parent_lock_xp ); |
---|
623 | |
---|
624 | // loop on parent VSL xlist |
---|
625 | XLIST_FOREACH( parent_root_xp , iter_xp ) |
---|
626 | { |
---|
627 | // get local and extended pointers on current parent vseg |
---|
628 | parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
629 | parent_vseg = GET_PTR( parent_vseg_xp ); |
---|
630 | |
---|
631 | // get vseg type |
---|
632 | type = hal_remote_l32( XPTR( parent_cxy , &parent_vseg->type ) ); |
---|
633 | |
---|
634 | #if DEBUG_VMM_FORK_COPY |
---|
635 | cycle = (uint32_t)hal_get_cycles(); |
---|
636 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
637 | printk("\n[%s] thread[%x,%x] found parent vseg %s / vpn_base = %x / cycle %d\n", |
---|
638 | __FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type), |
---|
639 | hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); |
---|
640 | #endif |
---|
641 | |
---|
642 | // all parent vsegs - but STACK - must be copied in child VSL |
---|
643 | if( type != VSEG_TYPE_STACK ) |
---|
644 | { |
---|
645 | // allocate memory for a new child vseg |
---|
646 | child_vseg = vseg_alloc(); |
---|
647 | if( child_vseg == NULL ) // release all allocated vsegs |
---|
648 | { |
---|
649 | vmm_destroy( child_process ); |
---|
650 | printk("\n[ERROR] in %s : cannot create vseg for child\n", __FUNCTION__ ); |
---|
651 | return -1; |
---|
652 | } |
---|
653 | |
---|
654 | // copy parent vseg to child vseg |
---|
655 | vseg_init_from_ref( child_vseg , parent_vseg_xp ); |
---|
656 | |
---|
657 | // register child vseg in child VSL |
---|
658 | vmm_attach_vseg_to_vsl( child_vmm , child_vseg ); |
---|
659 | |
---|
660 | #if DEBUG_VMM_FORK_COPY |
---|
661 | cycle = (uint32_t)hal_get_cycles(); |
---|
662 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
663 | printk("\n[%s] thread[%x,%x] copied vseg %s / vpn_base = %x to child VSL / cycle %d\n", |
---|
664 | __FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type), |
---|
665 | hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); |
---|
666 | #endif |
---|
667 | |
---|
668 | // copy DATA, MMAP, REMOTE, FILE parent GPT entries to child GPT |
---|
669 | if( type != VSEG_TYPE_CODE ) |
---|
670 | { |
---|
671 | // activate the COW for DATA, MMAP, REMOTE vsegs only |
---|
672 | cow = ( type != VSEG_TYPE_FILE ); |
---|
673 | |
---|
674 | vpn_base = child_vseg->vpn_base; |
---|
675 | vpn_size = child_vseg->vpn_size; |
---|
676 | |
---|
677 | // scan pages in parent vseg |
---|
678 | for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) |
---|
679 | { |
---|
680 | error = hal_gpt_pte_copy( &child_vmm->gpt, |
---|
681 | XPTR( parent_cxy , &parent_vmm->gpt ), |
---|
682 | vpn, |
---|
683 | cow, |
---|
684 | &ppn, |
---|
685 | &mapped ); |
---|
686 | if( error ) |
---|
687 | { |
---|
688 | vmm_destroy( child_process ); |
---|
689 | printk("\n[ERROR] in %s : cannot copy GPT\n", __FUNCTION__ ); |
---|
690 | return -1; |
---|
691 | } |
---|
692 | |
---|
693 | // increment pending forks counter in page if mapped |
---|
694 | if( mapped ) |
---|
695 | { |
---|
696 | // get pointers and cluster on page descriptor |
---|
697 | page_xp = ppm_ppn2page( ppn ); |
---|
698 | page_cxy = GET_CXY( page_xp ); |
---|
699 | page_ptr = GET_PTR( page_xp ); |
---|
700 | |
---|
701 | // get extended pointers on "forks" and "lock" |
---|
702 | forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
703 | lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
704 | |
---|
705 | // get lock protecting "forks" counter |
---|
706 | remote_busylock_acquire( lock_xp ); |
---|
707 | |
---|
708 | // increment "forks" |
---|
709 | hal_remote_atomic_add( forks_xp , 1 ); |
---|
710 | |
---|
711 | // release lock protecting "forks" counter |
---|
712 | remote_busylock_release( lock_xp ); |
---|
713 | |
---|
714 | #if DEBUG_VMM_FORK_COPY |
---|
715 | cycle = (uint32_t)hal_get_cycles(); |
---|
716 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
717 | printk("\n[%s] thread[%x,%x] copied vpn %x to child GPT / cycle %d\n", |
---|
718 | __FUNCTION__ , this->process->pid, this->trdid , vpn , cycle ); |
---|
719 | #endif |
---|
720 | } |
---|
721 | } |
---|
722 | } // end if no code & no stack |
---|
723 | } // end if no stack |
---|
724 | } // end loop on vsegs |
---|
725 | |
---|
726 | // release the parent VSL lock in read mode |
---|
727 | remote_rwlock_rd_release( parent_lock_xp ); |
---|
728 | |
---|
729 | // initialize child GPT (architecture specic) |
---|
730 | // => For TSAR, identity map the kentry_vseg |
---|
731 | error = hal_vmm_init( child_vmm ); |
---|
732 | |
---|
733 | if( error ) |
---|
734 | { |
---|
735 | printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ ); |
---|
736 | return -1; |
---|
737 | } |
---|
738 | |
---|
739 | // initialize the child VMM STACK allocator |
---|
740 | child_vmm->stack_mgr.bitmap = 0; |
---|
741 | child_vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE; |
---|
742 | |
---|
743 | // initialize the child VMM MMAP allocator |
---|
744 | uint32_t i; |
---|
745 | child_vmm->mmap_mgr.vpn_base = CONFIG_VMM_HEAP_BASE; |
---|
746 | child_vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; |
---|
747 | child_vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; |
---|
748 | for( i = 0 ; i < 32 ; i++ ) list_root_init( &child_vmm->mmap_mgr.zombi_list[i] ); |
---|
749 | |
---|
750 | // initialize instrumentation counters |
---|
751 | child_vmm->pgfault_nr = 0; |
---|
752 | |
---|
753 | // copy base addresses from parent VMM to child VMM |
---|
754 | child_vmm->kent_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->kent_vpn_base)); |
---|
755 | child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base)); |
---|
756 | child_vmm->envs_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->envs_vpn_base)); |
---|
757 | child_vmm->heap_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->heap_vpn_base)); |
---|
758 | child_vmm->code_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->code_vpn_base)); |
---|
759 | child_vmm->data_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->data_vpn_base)); |
---|
760 | |
---|
761 | child_vmm->entry_point = (intptr_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->entry_point)); |
---|
762 | |
---|
763 | hal_fence(); |
---|
764 | |
---|
765 | #if DEBUG_VMM_FORK_COPY |
---|
766 | cycle = (uint32_t)hal_get_cycles(); |
---|
767 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
768 | printk("\n[%s] thread[%x,%x] exit successfully / cycle %d\n", |
---|
769 | __FUNCTION__ , this->process->pid, this->trdid , cycle ); |
---|
770 | #endif |
---|
771 | |
---|
772 | return 0; |
---|
773 | |
---|
774 | } // vmm_fork_copy() |
---|
775 | |
---|
776 | /////////////////////////////////////// |
---|
777 | void vmm_destroy( process_t * process ) |
---|
778 | { |
---|
779 | xptr_t vseg_xp; |
---|
780 | vseg_t * vseg; |
---|
781 | |
---|
782 | #if DEBUG_VMM_DESTROY |
---|
783 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
784 | thread_t * this = CURRENT_THREAD; |
---|
785 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
786 | printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", |
---|
787 | __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle ); |
---|
788 | #endif |
---|
789 | |
---|
790 | #if (DEBUG_VMM_DESTROY & 1 ) |
---|
791 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
792 | vmm_display( process , true ); |
---|
793 | #endif |
---|
794 | |
---|
795 | // get pointer on local VMM |
---|
796 | vmm_t * vmm = &process->vmm; |
---|
797 | |
---|
798 | // get extended pointer on VSL root and VSL lock |
---|
799 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
800 | |
---|
801 | // scan the VSL to delete all registered vsegs |
---|
802 | // (don't use a FOREACH for item deletion in xlist) |
---|
803 | while( !xlist_is_empty( root_xp ) ) |
---|
804 | { |
---|
805 | // get pointer on first vseg in VSL |
---|
806 | vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); |
---|
807 | vseg = GET_PTR( vseg_xp ); |
---|
808 | |
---|
809 | // delete vseg and release physical pages |
---|
810 | vmm_delete_vseg( process->pid , vseg->min ); |
---|
811 | |
---|
812 | #if( DEBUG_VMM_DESTROY & 1 ) |
---|
813 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
814 | printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n", |
---|
815 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
816 | #endif |
---|
817 | |
---|
818 | } |
---|
819 | |
---|
820 | // remove all vsegs from zombi_lists in MMAP allocator |
---|
821 | uint32_t i; |
---|
822 | for( i = 0 ; i<32 ; i++ ) |
---|
823 | { |
---|
824 | while( !list_is_empty( &vmm->mmap_mgr.zombi_list[i] ) ) |
---|
825 | { |
---|
826 | vseg = LIST_FIRST( &vmm->mmap_mgr.zombi_list[i] , vseg_t , zlist ); |
---|
827 | |
---|
828 | #if( DEBUG_VMM_DESTROY & 1 ) |
---|
829 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
830 | printk("\n[%s] found zombi vseg / vpn_base %x / vpn_size %d\n", |
---|
831 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
832 | #endif |
---|
833 | // clean vseg descriptor |
---|
834 | vseg->vmm = NULL; |
---|
835 | |
---|
836 | // remove vseg from xlist |
---|
837 | xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); |
---|
838 | |
---|
839 | // release vseg descriptor |
---|
840 | vseg_free( vseg ); |
---|
841 | |
---|
842 | #if( DEBUG_VMM_DESTROY & 1 ) |
---|
843 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
844 | printk("\n[%s] zombi vseg released / vpn_base %x / vpn_size %d\n", |
---|
845 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
846 | #endif |
---|
847 | } |
---|
848 | } |
---|
849 | |
---|
850 | // release memory allocated to the GPT itself |
---|
851 | hal_gpt_destroy( &vmm->gpt ); |
---|
852 | |
---|
853 | #if DEBUG_VMM_DESTROY |
---|
854 | cycle = (uint32_t)hal_get_cycles(); |
---|
855 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
856 | printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", |
---|
857 | __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle ); |
---|
858 | #endif |
---|
859 | |
---|
860 | } // end vmm_destroy() |
---|
861 | |
---|
862 | ///////////////////////////////////////////////// |
---|
863 | vseg_t * vmm_check_conflict( process_t * process, |
---|
864 | vpn_t vpn_base, |
---|
865 | vpn_t vpn_size ) |
---|
866 | { |
---|
867 | vmm_t * vmm = &process->vmm; |
---|
868 | |
---|
869 | // scan the VSL |
---|
870 | vseg_t * vseg; |
---|
871 | xptr_t iter_xp; |
---|
872 | xptr_t vseg_xp; |
---|
873 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
874 | |
---|
875 | XLIST_FOREACH( root_xp , iter_xp ) |
---|
876 | { |
---|
877 | vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
878 | vseg = GET_PTR( vseg_xp ); |
---|
879 | |
---|
880 | if( ((vpn_base + vpn_size) > vseg->vpn_base) && |
---|
881 | (vpn_base < (vseg->vpn_base + vseg->vpn_size)) ) return vseg; |
---|
882 | } |
---|
883 | return NULL; |
---|
884 | |
---|
885 | } // end vmm_check_conflict() |
---|
886 | |
---|
887 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
888 | // This static function is called by the vmm_create_vseg() function, and implements |
---|
889 | // the VMM stack_vseg specific allocator. |
---|
890 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
891 | // @ vmm : pointer on VMM. |
---|
892 | // @ vpn_base : (return value) first allocated page |
---|
893 | // @ vpn_size : (return value) number of allocated pages |
---|
894 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
895 | static error_t vmm_stack_alloc( vmm_t * vmm, |
---|
896 | vpn_t * vpn_base, |
---|
897 | vpn_t * vpn_size ) |
---|
898 | { |
---|
899 | // get stack allocator pointer |
---|
900 | stack_mgr_t * mgr = &vmm->stack_mgr; |
---|
901 | |
---|
902 | // get lock on stack allocator |
---|
903 | busylock_acquire( &mgr->lock ); |
---|
904 | |
---|
905 | // get first free slot index in bitmap |
---|
906 | int32_t index = bitmap_ffc( &mgr->bitmap , 4 ); |
---|
907 | if( (index < 0) || (index > 31) ) |
---|
908 | { |
---|
909 | busylock_release( &mgr->lock ); |
---|
910 | return 0xFFFFFFFF; |
---|
911 | } |
---|
912 | |
---|
913 | // update bitmap |
---|
914 | bitmap_set( &mgr->bitmap , index ); |
---|
915 | |
---|
916 | // release lock on stack allocator |
---|
917 | busylock_release( &mgr->lock ); |
---|
918 | |
---|
919 | // returns vpn_base, vpn_size (one page non allocated) |
---|
920 | *vpn_base = mgr->vpn_base + index * CONFIG_VMM_STACK_SIZE + 1; |
---|
921 | *vpn_size = CONFIG_VMM_STACK_SIZE - 1; |
---|
922 | return 0; |
---|
923 | |
---|
924 | } // end vmm_stack_alloc() |
---|
925 | |
---|
926 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
927 | // This static function is called by the vmm_create_vseg() function, and implements |
---|
928 | // the VMM MMAP specific allocator. |
---|
929 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
930 | // @ vmm : [in] pointer on VMM. |
---|
931 | // @ npages : [in] requested number of pages. |
---|
932 | // @ vpn_base : [out] first allocated page. |
---|
933 | // @ vpn_size : [out] actual number of allocated pages. |
---|
934 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
935 | static error_t vmm_mmap_alloc( vmm_t * vmm, |
---|
936 | vpn_t npages, |
---|
937 | vpn_t * vpn_base, |
---|
938 | vpn_t * vpn_size ) |
---|
939 | { |
---|
940 | uint32_t index; |
---|
941 | vseg_t * vseg; |
---|
942 | vpn_t base; |
---|
943 | vpn_t size; |
---|
944 | vpn_t free; |
---|
945 | |
---|
946 | // mmap vseg size must be power of 2 |
---|
947 | // compute actual size and index in zombi_list array |
---|
948 | size = POW2_ROUNDUP( npages ); |
---|
949 | index = bits_log2( size ); |
---|
950 | |
---|
951 | // get mmap allocator pointer |
---|
952 | mmap_mgr_t * mgr = &vmm->mmap_mgr; |
---|
953 | |
---|
954 | // get lock on mmap allocator |
---|
955 | busylock_acquire( &mgr->lock ); |
---|
956 | |
---|
957 | // get vseg from zombi_list or from mmap zone |
---|
958 | if( list_is_empty( &mgr->zombi_list[index] ) ) // from mmap zone |
---|
959 | { |
---|
960 | // check overflow |
---|
961 | free = mgr->first_free_vpn; |
---|
962 | if( (free + size) > mgr->vpn_size ) return ENOMEM; |
---|
963 | |
---|
964 | // update STACK allocator |
---|
965 | mgr->first_free_vpn += size; |
---|
966 | |
---|
967 | // compute base |
---|
968 | base = free; |
---|
969 | } |
---|
970 | else // from zombi_list |
---|
971 | { |
---|
972 | // get pointer on zombi vseg from zombi_list |
---|
973 | vseg = LIST_FIRST( &mgr->zombi_list[index] , vseg_t , zlist ); |
---|
974 | |
---|
975 | // remove vseg from free-list |
---|
976 | list_unlink( &vseg->zlist ); |
---|
977 | |
---|
978 | // compute base |
---|
979 | base = vseg->vpn_base; |
---|
980 | } |
---|
981 | |
---|
982 | // release lock on mmap allocator |
---|
983 | busylock_release( &mgr->lock ); |
---|
984 | |
---|
985 | // returns vpn_base, vpn_size |
---|
986 | *vpn_base = base; |
---|
987 | *vpn_size = size; |
---|
988 | return 0; |
---|
989 | |
---|
990 | } // end vmm_mmap_alloc() |
---|
991 | |
---|
992 | //////////////////////////////////////////////// |
---|
993 | vseg_t * vmm_create_vseg( process_t * process, |
---|
994 | vseg_type_t type, |
---|
995 | intptr_t base, |
---|
996 | uint32_t size, |
---|
997 | uint32_t file_offset, |
---|
998 | uint32_t file_size, |
---|
999 | xptr_t mapper_xp, |
---|
1000 | cxy_t cxy ) |
---|
1001 | { |
---|
1002 | vseg_t * vseg; // created vseg pointer |
---|
1003 | vpn_t vpn_base; // first page index |
---|
1004 | vpn_t vpn_size; // number of pages covered by vseg |
---|
1005 | error_t error; |
---|
1006 | |
---|
1007 | #if DEBUG_VMM_CREATE_VSEG |
---|
1008 | thread_t * this = CURRENT_THREAD; |
---|
1009 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1010 | if( DEBUG_VMM_CREATE_VSEG < cycle ) |
---|
1011 | printk("\n[%s] thread[%x,%x] enter / %s / cxy %x / cycle %d\n", |
---|
1012 | __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), cxy, cycle ); |
---|
1013 | #endif |
---|
1014 | |
---|
1015 | // get pointer on VMM |
---|
1016 | vmm_t * vmm = &process->vmm; |
---|
1017 | |
---|
1018 | // compute base, size, vpn_base, vpn_size, depending on vseg type |
---|
1019 | // we use the VMM specific allocators for "stack", "file", "anon", & "remote" vsegs |
---|
1020 | |
---|
1021 | if( type == VSEG_TYPE_STACK ) |
---|
1022 | { |
---|
1023 | // get vpn_base and vpn_size from STACK allocator |
---|
1024 | error = vmm_stack_alloc( vmm , &vpn_base , &vpn_size ); |
---|
1025 | if( error ) |
---|
1026 | { |
---|
1027 | printk("\n[ERROR] in %s : no space for stack vseg / process %x in cluster %x\n", |
---|
1028 | __FUNCTION__ , process->pid , local_cxy ); |
---|
1029 | return NULL; |
---|
1030 | } |
---|
1031 | |
---|
1032 | // compute vseg base and size from vpn_base and vpn_size |
---|
1033 | base = vpn_base << CONFIG_PPM_PAGE_SHIFT; |
---|
1034 | size = vpn_size << CONFIG_PPM_PAGE_SHIFT; |
---|
1035 | } |
---|
1036 | else if( type == VSEG_TYPE_FILE ) |
---|
1037 | { |
---|
1038 | // compute page index (in mapper) for first byte |
---|
1039 | vpn_t vpn_min = file_offset >> CONFIG_PPM_PAGE_SHIFT; |
---|
1040 | |
---|
1041 | // compute page index (in mapper) for last byte |
---|
1042 | vpn_t vpn_max = (file_offset + size - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
1043 | |
---|
1044 | // compute offset in first page |
---|
1045 | uint32_t offset = file_offset & CONFIG_PPM_PAGE_MASK; |
---|
1046 | |
---|
1047 | // compute number of pages required in virtual space |
---|
1048 | vpn_t npages = vpn_max - vpn_min + 1; |
---|
1049 | |
---|
1050 | // get vpn_base and vpn_size from MMAP allocator |
---|
1051 | error = vmm_mmap_alloc( vmm , npages , &vpn_base , &vpn_size ); |
---|
1052 | if( error ) |
---|
1053 | { |
---|
1054 | printk("\n[ERROR] in %s : no vspace for mmap vseg / process %x in cluster %x\n", |
---|
1055 | __FUNCTION__ , process->pid , local_cxy ); |
---|
1056 | return NULL; |
---|
1057 | } |
---|
1058 | |
---|
1059 | // set the vseg base (not always aligned for FILE) |
---|
1060 | base = (vpn_base << CONFIG_PPM_PAGE_SHIFT) + offset; |
---|
1061 | } |
---|
1062 | else if( (type == VSEG_TYPE_ANON) || |
---|
1063 | (type == VSEG_TYPE_REMOTE) ) |
---|
1064 | { |
---|
1065 | // compute number of required pages in virtual space |
---|
1066 | vpn_t npages = size >> CONFIG_PPM_PAGE_SHIFT; |
---|
1067 | if( size & CONFIG_PPM_PAGE_MASK) npages++; |
---|
1068 | |
---|
1069 | // get vpn_base and vpn_size from MMAP allocator |
---|
1070 | error = vmm_mmap_alloc( vmm , npages , &vpn_base , &vpn_size ); |
---|
1071 | if( error ) |
---|
1072 | { |
---|
1073 | printk("\n[ERROR] in %s : no vspace for mmap vseg / process %x in cluster %x\n", |
---|
1074 | __FUNCTION__ , process->pid , local_cxy ); |
---|
1075 | return NULL; |
---|
1076 | } |
---|
1077 | |
---|
1078 | // set vseg base (always aligned for ANON or REMOTE) |
---|
1079 | base = vpn_base << CONFIG_PPM_PAGE_SHIFT; |
---|
1080 | } |
---|
1081 | else // VSEG_TYPE_DATA or VSEG_TYPE_CODE |
---|
1082 | { |
---|
1083 | uint32_t vpn_min = base >> CONFIG_PPM_PAGE_SHIFT; |
---|
1084 | uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
1085 | |
---|
1086 | vpn_base = vpn_min; |
---|
1087 | vpn_size = vpn_max - vpn_min + 1; |
---|
1088 | } |
---|
1089 | |
---|
1090 | // check collisions |
---|
1091 | vseg = vmm_check_conflict( process , vpn_base , vpn_size ); |
---|
1092 | if( vseg != NULL ) |
---|
1093 | { |
---|
1094 | printk("\n[ERROR] in %s for process %x : new vseg [vpn_base = %x / vpn_size = %x]\n" |
---|
1095 | " overlap existing vseg [vpn_base = %x / vpn_size = %x]\n", |
---|
1096 | __FUNCTION__ , process->pid, vpn_base, vpn_size, vseg->vpn_base, vseg->vpn_size ); |
---|
1097 | return NULL; |
---|
1098 | } |
---|
1099 | |
---|
1100 | // allocate physical memory for vseg descriptor |
---|
1101 | vseg = vseg_alloc(); |
---|
1102 | if( vseg == NULL ) |
---|
1103 | { |
---|
1104 | printk("\n[ERROR] in %s for process %x : cannot allocate memory for vseg\n", |
---|
1105 | __FUNCTION__ , process->pid ); |
---|
1106 | return NULL; |
---|
1107 | } |
---|
1108 | |
---|
1109 | // initialize vseg descriptor |
---|
1110 | vseg_init( vseg, |
---|
1111 | type, |
---|
1112 | base, |
---|
1113 | size, |
---|
1114 | vpn_base, |
---|
1115 | vpn_size, |
---|
1116 | file_offset, |
---|
1117 | file_size, |
---|
1118 | mapper_xp, |
---|
1119 | cxy ); |
---|
1120 | |
---|
1121 | // attach vseg to VSL |
---|
1122 | vmm_attach_vseg_to_vsl( vmm , vseg ); |
---|
1123 | |
---|
1124 | #if DEBUG_VMM_CREATE_VSEG |
---|
1125 | cycle = (uint32_t)hal_get_cycles(); |
---|
1126 | if( DEBUG_VMM_CREATE_VSEG < cycle ) |
---|
1127 | printk("\n[%s] thread[%x,%x] exit / %s / cxy %x / cycle %d\n", |
---|
1128 | __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), cxy, cycle ); |
---|
1129 | #endif |
---|
1130 | |
---|
1131 | return vseg; |
---|
1132 | |
---|
1133 | } // vmm_create_vseg() |
---|
1134 | |
---|
1135 | /////////////////////////////////// |
---|
1136 | void vmm_delete_vseg( pid_t pid, |
---|
1137 | intptr_t vaddr ) |
---|
1138 | { |
---|
1139 | process_t * process; // local pointer on local process |
---|
1140 | vmm_t * vmm; // local pointer on local process VMM |
---|
1141 | vseg_t * vseg; // local pointer on local vseg containing vaddr |
---|
1142 | gpt_t * gpt; // local pointer on local process GPT |
---|
1143 | vpn_t vpn; // VPN of current PTE |
---|
1144 | vpn_t vpn_min; // VPN of first PTE |
---|
1145 | vpn_t vpn_max; // VPN of last PTE (excluded) |
---|
1146 | ppn_t ppn; // current PTE ppn value |
---|
1147 | uint32_t attr; // current PTE attributes |
---|
1148 | kmem_req_t req; // request to release memory |
---|
1149 | xptr_t page_xp; // extended pointer on page descriptor |
---|
1150 | cxy_t page_cxy; // page descriptor cluster |
---|
1151 | page_t * page_ptr; // page descriptor pointer |
---|
1152 | xptr_t forks_xp; // extended pointer on pending forks counter |
---|
1153 | xptr_t lock_xp; // extended pointer on lock protecting forks counter |
---|
1154 | uint32_t forks; // actual number of pendinf forks |
---|
1155 | |
---|
1156 | #if DEBUG_VMM_DELETE_VSEG |
---|
1157 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1158 | thread_t * this = CURRENT_THREAD; |
---|
1159 | if( DEBUG_VMM_DELETE_VSEG < cycle ) |
---|
1160 | printk("\n[%s] thread[%x,%x] enter / process %x / vaddr %x / cycle %d\n", |
---|
1161 | __FUNCTION__, this->process->pid, this->trdid, pid, vaddr, cycle ); |
---|
1162 | #endif |
---|
1163 | |
---|
1164 | // get local pointer on local process descriptor |
---|
1165 | process = cluster_get_local_process_from_pid( pid ); |
---|
1166 | |
---|
1167 | if( process == NULL ) return; |
---|
1168 | |
---|
1169 | // get pointers on local process VMM an GPT |
---|
1170 | vmm = &process->vmm; |
---|
1171 | gpt = &process->vmm.gpt; |
---|
1172 | |
---|
1173 | // get local pointer on vseg containing vaddr |
---|
1174 | vseg = vmm_vseg_from_vaddr( vmm , vaddr ); |
---|
1175 | |
---|
1176 | if( vseg == NULL ) return; |
---|
1177 | |
---|
1178 | // loop to invalidate all vseg PTEs in GPT |
---|
1179 | vpn_min = vseg->vpn_base; |
---|
1180 | vpn_max = vpn_min + vseg->vpn_size; |
---|
1181 | for( vpn = vpn_min ; vpn < vpn_max ; vpn++ ) |
---|
1182 | { |
---|
1183 | // get GPT entry |
---|
1184 | hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn ); |
---|
1185 | |
---|
1186 | if( attr & GPT_MAPPED ) // entry is mapped |
---|
1187 | { |
---|
1188 | |
---|
1189 | #if( DEBUG_VMM_DELETE_VSEG & 1 ) |
---|
1190 | if( DEBUG_VMM_DELETE_VSEG < cycle ) |
---|
1191 | printk("- unmap vpn %x / ppn %x / vseg %s \n" , vpn , ppn, vseg_type_str(vseg->type) ); |
---|
1192 | #endif |
---|
1193 | |
---|
1194 | // check small page |
---|
1195 | assert( (attr & GPT_SMALL) , "an user vseg must use small pages" ); |
---|
1196 | |
---|
1197 | // unmap GPT entry in local GPT |
---|
1198 | hal_gpt_reset_pte( gpt , vpn ); |
---|
1199 | |
---|
1200 | // handle pending forks counter if |
---|
1201 | // 1) not identity mapped |
---|
1202 | // 2) reference cluster |
---|
1203 | if( ((vseg->flags & VSEG_IDENT) == 0) && |
---|
1204 | (GET_CXY( process->ref_xp ) == local_cxy) ) |
---|
1205 | { |
---|
1206 | // get extended pointer on physical page descriptor |
---|
1207 | page_xp = ppm_ppn2page( ppn ); |
---|
1208 | page_cxy = GET_CXY( page_xp ); |
---|
1209 | page_ptr = GET_PTR( page_xp ); |
---|
1210 | |
---|
1211 | // get extended pointers on forks and lock fields |
---|
1212 | forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
1213 | lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
1214 | |
---|
1215 | // get pending forks counter |
---|
1216 | forks = hal_remote_l32( forks_xp ); |
---|
1217 | |
---|
1218 | if( forks ) // decrement pending forks counter |
---|
1219 | { |
---|
1220 | hal_remote_atomic_add( forks_xp , -1 ); |
---|
1221 | } |
---|
1222 | else // release physical page to relevant cluster |
---|
1223 | { |
---|
1224 | if( page_cxy == local_cxy ) // local cluster |
---|
1225 | { |
---|
1226 | req.type = KMEM_PAGE; |
---|
1227 | req.ptr = page_ptr; |
---|
1228 | kmem_free( &req ); |
---|
1229 | } |
---|
1230 | else // remote cluster |
---|
1231 | { |
---|
1232 | rpc_pmem_release_pages_client( page_cxy , page_ptr ); |
---|
1233 | } |
---|
1234 | |
---|
1235 | #if( DEBUG_VMM_DELETE_VSEG & 1 ) |
---|
1236 | if( DEBUG_VMM_DELETE_VSEG < cycle ) |
---|
1237 | printk("- release ppn %x\n", ppn ); |
---|
1238 | #endif |
---|
1239 | } |
---|
1240 | } |
---|
1241 | } |
---|
1242 | } |
---|
1243 | |
---|
1244 | // remove vseg from VSL and release vseg descriptor (if not MMAP) |
---|
1245 | vmm_detach_vseg_from_vsl( vmm , vseg ); |
---|
1246 | |
---|
1247 | #if DEBUG_VMM_DELETE_VSEG |
---|
1248 | cycle = (uint32_t)hal_get_cycles(); |
---|
1249 | if( DEBUG_VMM_DELETE_VSEG < cycle ) |
---|
1250 | printk("\n[%s] thread[%x,%x] exit / process %x / vseg %s / base %x / cycle %d\n", |
---|
1251 | __FUNCTION__, this->process->pid, this->trdid, pid, vseg_type_str(vseg->type), vaddr, cycle ); |
---|
1252 | #endif |
---|
1253 | |
---|
1254 | } // end vmm_delete_vseg() |
---|
1255 | |
---|
1256 | ///////////////////////////////////////////// |
---|
1257 | vseg_t * vmm_vseg_from_vaddr( vmm_t * vmm, |
---|
1258 | intptr_t vaddr ) |
---|
1259 | { |
---|
1260 | xptr_t iter_xp; |
---|
1261 | xptr_t vseg_xp; |
---|
1262 | vseg_t * vseg; |
---|
1263 | |
---|
1264 | // get extended pointers on VSL lock and root |
---|
1265 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); |
---|
1266 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
1267 | |
---|
1268 | // get lock protecting the VSL |
---|
1269 | remote_rwlock_rd_acquire( lock_xp ); |
---|
1270 | |
---|
1271 | // scan the list of vsegs in VSL |
---|
1272 | XLIST_FOREACH( root_xp , iter_xp ) |
---|
1273 | { |
---|
1274 | vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
1275 | vseg = GET_PTR( vseg_xp ); |
---|
1276 | |
---|
1277 | if( (vaddr >= vseg->min) && (vaddr < vseg->max) ) |
---|
1278 | { |
---|
1279 | // return success |
---|
1280 | remote_rwlock_rd_release( lock_xp ); |
---|
1281 | return vseg; |
---|
1282 | } |
---|
1283 | } |
---|
1284 | |
---|
1285 | // return failure |
---|
1286 | remote_rwlock_rd_release( lock_xp ); |
---|
1287 | return NULL; |
---|
1288 | |
---|
1289 | } // end vmm_vseg_from_vaddr() |
---|
1290 | |
---|
1291 | ///////////////////////////////////////////// |
---|
1292 | error_t vmm_resize_vseg( process_t * process, |
---|
1293 | intptr_t base, |
---|
1294 | intptr_t size ) |
---|
1295 | { |
---|
1296 | error_t error; |
---|
1297 | vseg_t * new; |
---|
1298 | vpn_t vpn_min; |
---|
1299 | vpn_t vpn_max; |
---|
1300 | |
---|
1301 | // get pointer on process VMM |
---|
1302 | vmm_t * vmm = &process->vmm; |
---|
1303 | |
---|
1304 | intptr_t addr_min = base; |
---|
1305 | intptr_t addr_max = base + size; |
---|
1306 | |
---|
1307 | // get pointer on vseg |
---|
1308 | vseg_t * vseg = vmm_vseg_from_vaddr( vmm , base ); |
---|
1309 | |
---|
1310 | if( vseg == NULL) return EINVAL; |
---|
1311 | |
---|
1312 | // get extended pointer on VSL lock |
---|
1313 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); |
---|
1314 | |
---|
1315 | // get lock protecting VSL |
---|
1316 | remote_rwlock_wr_acquire( lock_xp ); |
---|
1317 | |
---|
1318 | if( (vseg->min > addr_min) || (vseg->max < addr_max) ) // not included in vseg |
---|
1319 | { |
---|
1320 | error = -1; |
---|
1321 | } |
---|
1322 | else if( (vseg->min == addr_min) && (vseg->max == addr_max) ) // vseg must be deleted |
---|
1323 | { |
---|
1324 | vmm_delete_vseg( process->pid , vseg->min ); |
---|
1325 | error = 0; |
---|
1326 | } |
---|
1327 | else if( vseg->min == addr_min ) // vseg must be resized |
---|
1328 | { |
---|
1329 | // update vseg base address |
---|
1330 | vseg->min = addr_max; |
---|
1331 | |
---|
1332 | // update vpn_base and vpn_size |
---|
1333 | vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; |
---|
1334 | vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
1335 | vseg->vpn_base = vpn_min; |
---|
1336 | vseg->vpn_size = vpn_max - vpn_min + 1; |
---|
1337 | error = 0; |
---|
1338 | } |
---|
1339 | else if( vseg->max == addr_max ) // vseg must be resized |
---|
1340 | { |
---|
1341 | // update vseg max address |
---|
1342 | vseg->max = addr_min; |
---|
1343 | |
---|
1344 | // update vpn_base and vpn_size |
---|
1345 | vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; |
---|
1346 | vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
1347 | vseg->vpn_base = vpn_min; |
---|
1348 | vseg->vpn_size = vpn_max - vpn_min + 1; |
---|
1349 | error = 0; |
---|
1350 | } |
---|
1351 | else // vseg cut in three regions |
---|
1352 | { |
---|
1353 | // resize existing vseg |
---|
1354 | vseg->max = addr_min; |
---|
1355 | |
---|
1356 | // update vpn_base and vpn_size |
---|
1357 | vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; |
---|
1358 | vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
1359 | vseg->vpn_base = vpn_min; |
---|
1360 | vseg->vpn_size = vpn_max - vpn_min + 1; |
---|
1361 | |
---|
1362 | // create new vseg |
---|
1363 | new = vmm_create_vseg( process, |
---|
1364 | vseg->type, |
---|
1365 | addr_min, |
---|
1366 | (vseg->max - addr_max), |
---|
1367 | vseg->file_offset, |
---|
1368 | vseg->file_size, |
---|
1369 | vseg->mapper_xp, |
---|
1370 | vseg->cxy ); |
---|
1371 | |
---|
1372 | if( new == NULL ) error = EINVAL; |
---|
1373 | else error = 0; |
---|
1374 | } |
---|
1375 | |
---|
1376 | // release VMM lock |
---|
1377 | remote_rwlock_wr_release( lock_xp ); |
---|
1378 | |
---|
1379 | return error; |
---|
1380 | |
---|
1381 | } // vmm_resize_vseg() |
---|
1382 | |
---|
1383 | /////////////////////////////////////////// |
---|
1384 | error_t vmm_get_vseg( process_t * process, |
---|
1385 | intptr_t vaddr, |
---|
1386 | vseg_t ** found_vseg ) |
---|
1387 | { |
---|
1388 | xptr_t vseg_xp; |
---|
1389 | vseg_t * vseg; |
---|
1390 | vmm_t * vmm; |
---|
1391 | error_t error; |
---|
1392 | |
---|
1393 | // get pointer on local VMM |
---|
1394 | vmm = &process->vmm; |
---|
1395 | |
---|
1396 | // try to get vseg from local VMM |
---|
1397 | vseg = vmm_vseg_from_vaddr( vmm , vaddr ); |
---|
1398 | |
---|
1399 | if( vseg == NULL ) // vseg not found in local cluster => try to get it from ref |
---|
1400 | { |
---|
1401 | // get extended pointer on reference process |
---|
1402 | xptr_t ref_xp = process->ref_xp; |
---|
1403 | |
---|
1404 | // get cluster and local pointer on reference process |
---|
1405 | cxy_t ref_cxy = GET_CXY( ref_xp ); |
---|
1406 | process_t * ref_ptr = GET_PTR( ref_xp ); |
---|
1407 | |
---|
1408 | if( local_cxy == ref_cxy ) return -1; // local cluster is the reference |
---|
1409 | |
---|
1410 | // get extended pointer on reference vseg |
---|
1411 | rpc_vmm_get_vseg_client( ref_cxy , ref_ptr , vaddr , &vseg_xp , &error ); |
---|
1412 | |
---|
1413 | if( error ) return -1; // vseg not found => illegal user vaddr |
---|
1414 | |
---|
1415 | // allocate a vseg in local cluster |
---|
1416 | vseg = vseg_alloc(); |
---|
1417 | |
---|
1418 | if( vseg == NULL ) return -1; // cannot allocate a local vseg |
---|
1419 | |
---|
1420 | // initialise local vseg from reference |
---|
1421 | vseg_init_from_ref( vseg , vseg_xp ); |
---|
1422 | |
---|
1423 | // register local vseg in local VSL |
---|
1424 | vmm_attach_vseg_to_vsl( vmm , vseg ); |
---|
1425 | } |
---|
1426 | |
---|
1427 | // success |
---|
1428 | *found_vseg = vseg; |
---|
1429 | return 0; |
---|
1430 | |
---|
1431 | } // end vmm_get_vseg() |
---|
1432 | |
---|
1433 | ////////////////////////////////////////////////////////////////////////////////////// |
---|
1434 | // This static function compute the target cluster to allocate a physical page |
---|
1435 | // for a given <vpn> in a given <vseg>, allocates the page (with an RPC if required) |
---|
1436 | // and returns an extended pointer on the allocated page descriptor. |
---|
1437 | // It can be called by a thread running in any cluster. |
---|
1438 | // The vseg cannot have the FILE type. |
---|
1439 | ////////////////////////////////////////////////////////////////////////////////////// |
---|
1440 | static xptr_t vmm_page_allocate( vseg_t * vseg, |
---|
1441 | vpn_t vpn ) |
---|
1442 | { |
---|
1443 | |
---|
1444 | #if DEBUG_VMM_ALLOCATE_PAGE |
---|
1445 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1446 | thread_t * this = CURRENT_THREAD; |
---|
1447 | if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) |
---|
1448 | printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", |
---|
1449 | __FUNCTION__ , this->process->pid, this->trdid, vpn, cycle ); |
---|
1450 | #endif |
---|
1451 | |
---|
1452 | page_t * page_ptr; |
---|
1453 | cxy_t page_cxy; |
---|
1454 | kmem_req_t req; |
---|
1455 | uint32_t index; |
---|
1456 | |
---|
1457 | uint32_t type = vseg->type; |
---|
1458 | uint32_t flags = vseg->flags; |
---|
1459 | uint32_t x_size = LOCAL_CLUSTER->x_size; |
---|
1460 | uint32_t y_size = LOCAL_CLUSTER->y_size; |
---|
1461 | |
---|
1462 | // check vseg type |
---|
1463 | assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" ); |
---|
1464 | |
---|
1465 | if( flags & VSEG_DISTRIB ) // distributed => cxy depends on vpn LSB |
---|
1466 | { |
---|
1467 | index = vpn & ((x_size * y_size) - 1); |
---|
1468 | page_cxy = HAL_CXY_FROM_XY( (index / y_size) , (index % y_size) ); |
---|
1469 | |
---|
1470 | // If the cluster selected from VPN's LSBs is empty, we select one randomly |
---|
1471 | if ( cluster_is_active( page_cxy ) == false ) |
---|
1472 | { |
---|
1473 | page_cxy = cluster_random_select(); |
---|
1474 | } |
---|
1475 | } |
---|
1476 | else // other cases => cxy specified in vseg |
---|
1477 | { |
---|
1478 | page_cxy = vseg->cxy; |
---|
1479 | } |
---|
1480 | |
---|
1481 | // allocate a physical page from target cluster |
---|
1482 | if( page_cxy == local_cxy ) // target cluster is the local cluster |
---|
1483 | { |
---|
1484 | req.type = KMEM_PAGE; |
---|
1485 | req.size = 0; |
---|
1486 | req.flags = AF_NONE; |
---|
1487 | page_ptr = (page_t *)kmem_alloc( &req ); |
---|
1488 | } |
---|
1489 | else // target cluster is not the local cluster |
---|
1490 | { |
---|
1491 | rpc_pmem_get_pages_client( page_cxy , 0 , &page_ptr ); |
---|
1492 | } |
---|
1493 | |
---|
1494 | #if DEBUG_VMM_ALLOCATE_PAGE |
---|
1495 | cycle = (uint32_t)hal_get_cycles(); |
---|
1496 | if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) |
---|
1497 | printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n", |
---|
1498 | __FUNCTION__ , this->process->pid, this->trdid, vpn, |
---|
1499 | ppm_page2ppn( XPTR( page_cxy , page_ptr ) , cycle ); |
---|
1500 | #endif |
---|
1501 | |
---|
1502 | if( page_ptr == NULL ) return XPTR_NULL; |
---|
1503 | else return XPTR( page_cxy , page_ptr ); |
---|
1504 | |
---|
1505 | } // end vmm_page_allocate() |
---|
1506 | |
---|
1507 | //////////////////////////////////////// |
---|
1508 | error_t vmm_get_one_ppn( vseg_t * vseg, |
---|
1509 | vpn_t vpn, |
---|
1510 | ppn_t * ppn ) |
---|
1511 | { |
---|
1512 | error_t error; |
---|
1513 | xptr_t page_xp; // extended pointer on physical page descriptor |
---|
1514 | uint32_t page_id; // missing page index in vseg mapper |
---|
1515 | uint32_t type; // vseg type; |
---|
1516 | |
---|
1517 | type = vseg->type; |
---|
1518 | page_id = vpn - vseg->vpn_base; |
---|
1519 | |
---|
1520 | #if DEBUG_VMM_GET_ONE_PPN |
---|
1521 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1522 | thread_t * this = CURRENT_THREAD; |
---|
1523 | if( DEBUG_VMM_GET_ONE_PPN < cycle ) |
---|
1524 | printk("\n[%s] thread[%x,%x] enter for vpn %x / type %s / page_id %d / cycle %d\n", |
---|
1525 | __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle ); |
---|
1526 | #endif |
---|
1527 | |
---|
1528 | // FILE type : get the physical page from the file mapper |
---|
1529 | if( type == VSEG_TYPE_FILE ) |
---|
1530 | { |
---|
1531 | // get extended pointer on mapper |
---|
1532 | xptr_t mapper_xp = vseg->mapper_xp; |
---|
1533 | |
---|
1534 | assert( (mapper_xp != XPTR_NULL), |
---|
1535 | "mapper not defined for a FILE vseg\n" ); |
---|
1536 | |
---|
1537 | // get extended pointer on page descriptor |
---|
1538 | page_xp = mapper_remote_get_page( mapper_xp , page_id ); |
---|
1539 | |
---|
1540 | if ( page_xp == XPTR_NULL ) return EINVAL; |
---|
1541 | } |
---|
1542 | |
---|
1543 | // Other types : allocate a physical page from target cluster, |
---|
1544 | // as defined by vseg type and vpn value |
---|
1545 | else |
---|
1546 | { |
---|
1547 | // allocate one physical page |
---|
1548 | page_xp = vmm_page_allocate( vseg , vpn ); |
---|
1549 | |
---|
1550 | if( page_xp == XPTR_NULL ) return ENOMEM; |
---|
1551 | |
---|
1552 | // initialise missing page from .elf file mapper for DATA and CODE types |
---|
1553 | // the vseg->mapper_xp field is an extended pointer on the .elf file mapper |
---|
1554 | if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) ) |
---|
1555 | { |
---|
1556 | // get extended pointer on mapper |
---|
1557 | xptr_t mapper_xp = vseg->mapper_xp; |
---|
1558 | |
---|
1559 | assert( (mapper_xp != XPTR_NULL), |
---|
1560 | "mapper not defined for a CODE or DATA vseg\n" ); |
---|
1561 | |
---|
1562 | // compute missing page offset in vseg |
---|
1563 | uint32_t offset = page_id << CONFIG_PPM_PAGE_SHIFT; |
---|
1564 | |
---|
1565 | // compute missing page offset in .elf file |
---|
1566 | uint32_t elf_offset = vseg->file_offset + offset; |
---|
1567 | |
---|
1568 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
1569 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
1570 | printk("\n[%s] thread[%x,%x] for vpn = %x / elf_offset = %x\n", |
---|
1571 | __FUNCTION__, this->process->pid, this->trdid, vpn, elf_offset ); |
---|
1572 | #endif |
---|
1573 | // compute extended pointer on page base |
---|
1574 | xptr_t base_xp = ppm_page2base( page_xp ); |
---|
1575 | |
---|
1576 | // file_size (in .elf mapper) can be smaller than vseg_size (BSS) |
---|
1577 | uint32_t file_size = vseg->file_size; |
---|
1578 | |
---|
1579 | if( file_size < offset ) // missing page fully in BSS |
---|
1580 | { |
---|
1581 | |
---|
1582 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
1583 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
1584 | printk("\n[%s] thread[%x,%x] for vpn %x / fully in BSS\n", |
---|
1585 | __FUNCTION__, this->process->pid, this->trdid, vpn ); |
---|
1586 | #endif |
---|
1587 | if( GET_CXY( page_xp ) == local_cxy ) |
---|
1588 | { |
---|
1589 | memset( GET_PTR( base_xp ) , 0 , CONFIG_PPM_PAGE_SIZE ); |
---|
1590 | } |
---|
1591 | else |
---|
1592 | { |
---|
1593 | hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE ); |
---|
1594 | } |
---|
1595 | } |
---|
1596 | else if( file_size >= (offset + CONFIG_PPM_PAGE_SIZE) ) // fully in mapper |
---|
1597 | { |
---|
1598 | |
---|
1599 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
1600 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
1601 | printk("\n[%s] thread[%x,%x] for vpn %x / fully in mapper\n", |
---|
1602 | __FUNCTION__, this->process->pid, this->trdid, vpn ); |
---|
1603 | #endif |
---|
1604 | error = mapper_move_kernel( mapper_xp, |
---|
1605 | true, // to_buffer |
---|
1606 | elf_offset, |
---|
1607 | base_xp, |
---|
1608 | CONFIG_PPM_PAGE_SIZE ); |
---|
1609 | if( error ) return EINVAL; |
---|
1610 | } |
---|
1611 | else // both in mapper and in BSS : |
---|
1612 | // - (file_size - offset) bytes from mapper |
---|
1613 | // - (page_size + offset - file_size) bytes from BSS |
---|
1614 | { |
---|
1615 | |
---|
1616 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
1617 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
1618 | printk("\n[%s] thread[%x,%x] for vpn %x / both mapper & BSS\n" |
---|
1619 | " %d bytes from mapper / %d bytes from BSS\n", |
---|
1620 | __FUNCTION__, this->process->pid, this->trdid, vpn, |
---|
1621 | file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size ); |
---|
1622 | #endif |
---|
1623 | // initialize mapper part |
---|
1624 | error = mapper_move_kernel( mapper_xp, |
---|
1625 | true, // to buffer |
---|
1626 | elf_offset, |
---|
1627 | base_xp, |
---|
1628 | file_size - offset ); |
---|
1629 | if( error ) return EINVAL; |
---|
1630 | |
---|
1631 | // initialize BSS part |
---|
1632 | if( GET_CXY( page_xp ) == local_cxy ) |
---|
1633 | { |
---|
1634 | memset( GET_PTR( base_xp ) + file_size - offset , 0 , |
---|
1635 | offset + CONFIG_PPM_PAGE_SIZE - file_size ); |
---|
1636 | } |
---|
1637 | else |
---|
1638 | { |
---|
1639 | hal_remote_memset( base_xp + file_size - offset , 0 , |
---|
1640 | offset + CONFIG_PPM_PAGE_SIZE - file_size ); |
---|
1641 | } |
---|
1642 | } |
---|
1643 | } // end initialisation for CODE or DATA types |
---|
1644 | } |
---|
1645 | |
---|
1646 | // return ppn |
---|
1647 | *ppn = ppm_page2ppn( page_xp ); |
---|
1648 | |
---|
1649 | #if DEBUG_VMM_GET_ONE_PPN |
---|
1650 | cycle = (uint32_t)hal_get_cycles(); |
---|
1651 | if( DEBUG_VMM_GET_ONE_PPN < cycle ) |
---|
1652 | printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle\n", |
---|
1653 | __FUNCTION__ , this->process->pid, this->trdid , vpn , *ppn, cycle ); |
---|
1654 | #endif |
---|
1655 | |
---|
1656 | return 0; |
---|
1657 | |
---|
1658 | } // end vmm_get_one_ppn() |
---|
1659 | |
---|
1660 | /////////////////////////////////////////////////// |
---|
1661 | error_t vmm_handle_page_fault( process_t * process, |
---|
1662 | vpn_t vpn ) |
---|
1663 | { |
---|
1664 | vseg_t * vseg; // vseg containing vpn |
---|
1665 | uint32_t new_attr; // new PTE_ATTR value |
---|
1666 | ppn_t new_ppn; // new PTE_PPN value |
---|
1667 | uint32_t ref_attr; // PTE_ATTR value in reference GPT |
---|
1668 | ppn_t ref_ppn; // PTE_PPN value in reference GPT |
---|
1669 | cxy_t ref_cxy; // reference cluster for missing vpn |
---|
1670 | process_t * ref_ptr; // reference process for missing vpn |
---|
1671 | xptr_t local_gpt_xp; // extended pointer on local GPT |
---|
1672 | xptr_t local_lock_xp; // extended pointer on local GPT lock |
---|
1673 | xptr_t ref_gpt_xp; // extended pointer on reference GPT |
---|
1674 | xptr_t ref_lock_xp; // extended pointer on reference GPT lock |
---|
1675 | error_t error; // value returned by called functions |
---|
1676 | |
---|
1677 | // get local vseg (access to reference VSL can be required) |
---|
1678 | error = vmm_get_vseg( process, |
---|
1679 | (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT, |
---|
1680 | &vseg ); |
---|
1681 | if( error ) |
---|
1682 | { |
---|
1683 | printk("\n[ERROR] in %s : vpn %x in process %x not in a registered vseg\n", |
---|
1684 | __FUNCTION__ , vpn , process->pid ); |
---|
1685 | |
---|
1686 | return EXCP_USER_ERROR; |
---|
1687 | } |
---|
1688 | |
---|
1689 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
1690 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1691 | thread_t * this = CURRENT_THREAD; |
---|
1692 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
1693 | printk("\n[%s] threadr[%x,%x] enter for vpn %x / %s / cycle %d\n", |
---|
1694 | __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(vseg->type), cycle ); |
---|
1695 | #endif |
---|
1696 | |
---|
1697 | //////////////// private vseg => access only the local GPT |
---|
1698 | if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) |
---|
1699 | { |
---|
1700 | // build extended pointer on local GPT and local GPT lock |
---|
1701 | local_gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); |
---|
1702 | local_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); |
---|
1703 | |
---|
1704 | // take local GPT lock in write mode |
---|
1705 | remote_rwlock_wr_acquire( local_lock_xp ); |
---|
1706 | |
---|
1707 | // check VPN still unmapped in local GPT |
---|
1708 | |
---|
1709 | // do nothing if VPN has been mapped by a a concurrent page_fault |
---|
1710 | hal_gpt_get_pte( local_gpt_xp, |
---|
1711 | vpn, |
---|
1712 | &new_attr, |
---|
1713 | &new_ppn ); |
---|
1714 | |
---|
1715 | if( (new_attr & GPT_MAPPED) == 0 ) // VPN still unmapped |
---|
1716 | { |
---|
1717 | // allocate and initialise a physical page depending on the vseg type |
---|
1718 | error = vmm_get_one_ppn( vseg , vpn , &new_ppn ); |
---|
1719 | |
---|
1720 | if( error ) |
---|
1721 | { |
---|
1722 | printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", |
---|
1723 | __FUNCTION__ , process->pid , vpn ); |
---|
1724 | |
---|
1725 | // release local GPT lock in write mode |
---|
1726 | remote_rwlock_wr_release( local_lock_xp ); |
---|
1727 | |
---|
1728 | return EXCP_KERNEL_PANIC; |
---|
1729 | } |
---|
1730 | |
---|
1731 | // define new_attr from vseg flags |
---|
1732 | new_attr = GPT_MAPPED | GPT_SMALL; |
---|
1733 | if( vseg->flags & VSEG_USER ) new_attr |= GPT_USER; |
---|
1734 | if( vseg->flags & VSEG_WRITE ) new_attr |= GPT_WRITABLE; |
---|
1735 | if( vseg->flags & VSEG_EXEC ) new_attr |= GPT_EXECUTABLE; |
---|
1736 | if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE; |
---|
1737 | |
---|
1738 | // set PTE (PPN & attribute) to local GPT |
---|
1739 | error = hal_gpt_set_pte( local_gpt_xp, |
---|
1740 | vpn, |
---|
1741 | new_attr, |
---|
1742 | new_ppn ); |
---|
1743 | if ( error ) |
---|
1744 | { |
---|
1745 | printk("\n[ERROR] in %s : cannot update local GPT / process %x / vpn = %x\n", |
---|
1746 | __FUNCTION__ , process->pid , vpn ); |
---|
1747 | |
---|
1748 | // release local GPT lock in write mode |
---|
1749 | remote_rwlock_wr_release( local_lock_xp ); |
---|
1750 | |
---|
1751 | return EXCP_KERNEL_PANIC; |
---|
1752 | } |
---|
1753 | } |
---|
1754 | |
---|
1755 | // release local GPT lock in write mode |
---|
1756 | remote_rwlock_wr_release( local_lock_xp ); |
---|
1757 | |
---|
1758 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
1759 | cycle = (uint32_t)hal_get_cycles(); |
---|
1760 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
1761 | printk("\n[%s] private page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", |
---|
1762 | __FUNCTION__, vpn, new_ppn, new_attr, cycle ); |
---|
1763 | #endif |
---|
1764 | return EXCP_NON_FATAL; |
---|
1765 | |
---|
1766 | } // end local GPT access |
---|
1767 | |
---|
1768 | //////////// public vseg => access reference GPT |
---|
1769 | else |
---|
1770 | { |
---|
1771 | // get reference process cluster and local pointer |
---|
1772 | ref_cxy = GET_CXY( process->ref_xp ); |
---|
1773 | ref_ptr = GET_PTR( process->ref_xp ); |
---|
1774 | |
---|
1775 | // build extended pointer on reference GPT and reference GPT lock |
---|
1776 | ref_gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt ); |
---|
1777 | ref_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt_lock ); |
---|
1778 | |
---|
1779 | // build extended pointer on local GPT and local GPT lock |
---|
1780 | local_gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); |
---|
1781 | local_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); |
---|
1782 | |
---|
1783 | // take reference GPT lock in read mode |
---|
1784 | remote_rwlock_rd_acquire( ref_lock_xp ); |
---|
1785 | |
---|
1786 | // get directly PPN & attributes from reference GPT |
---|
1787 | // this can avoids a costly RPC for a false page fault |
---|
1788 | hal_gpt_get_pte( ref_gpt_xp, |
---|
1789 | vpn, |
---|
1790 | &ref_attr, |
---|
1791 | &ref_ppn ); |
---|
1792 | |
---|
1793 | // release reference GPT lock in read mode |
---|
1794 | remote_rwlock_rd_release( ref_lock_xp ); |
---|
1795 | |
---|
1796 | if( ref_attr & GPT_MAPPED ) // false page fault => update local GPT |
---|
1797 | { |
---|
1798 | // take local GPT lock in write mode |
---|
1799 | remote_rwlock_wr_acquire( local_lock_xp ); |
---|
1800 | |
---|
1801 | // check VPN still unmapped in local GPT |
---|
1802 | hal_gpt_get_pte( local_gpt_xp, |
---|
1803 | vpn, |
---|
1804 | &new_attr, |
---|
1805 | &new_ppn ); |
---|
1806 | |
---|
1807 | if( (new_attr & GPT_MAPPED) == 0 ) // VPN still unmapped |
---|
1808 | { |
---|
1809 | // update local GPT from reference GPT |
---|
1810 | error = hal_gpt_set_pte( local_gpt_xp, |
---|
1811 | vpn, |
---|
1812 | ref_attr, |
---|
1813 | ref_ppn ); |
---|
1814 | if( error ) |
---|
1815 | { |
---|
1816 | printk("\n[ERROR] in %s : cannot update local GPT / process %x / vpn %x\n", |
---|
1817 | __FUNCTION__ , process->pid , vpn ); |
---|
1818 | |
---|
1819 | // release local GPT lock in write mode |
---|
1820 | remote_rwlock_wr_release( local_lock_xp ); |
---|
1821 | |
---|
1822 | return EXCP_KERNEL_PANIC; |
---|
1823 | } |
---|
1824 | } |
---|
1825 | else // VPN has been mapped by a a concurrent page_fault |
---|
1826 | { |
---|
1827 | // keep PTE from local GPT |
---|
1828 | ref_attr = new_attr; |
---|
1829 | ref_ppn = new_ppn; |
---|
1830 | } |
---|
1831 | |
---|
1832 | // release local GPT lock in write mode |
---|
1833 | remote_rwlock_wr_release( local_lock_xp ); |
---|
1834 | |
---|
1835 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
1836 | cycle = (uint32_t)hal_get_cycles(); |
---|
1837 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
1838 | printk("\n[%s] false page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", |
---|
1839 | __FUNCTION__, vpn, ref_ppn, ref_attr, cycle ); |
---|
1840 | #endif |
---|
1841 | return EXCP_NON_FATAL; |
---|
1842 | } |
---|
1843 | else // true page fault => update reference GPT |
---|
1844 | { |
---|
1845 | // take reference GPT lock in write mode |
---|
1846 | remote_rwlock_wr_acquire( ref_lock_xp ); |
---|
1847 | |
---|
1848 | // check VPN still unmapped in reference GPT |
---|
1849 | // do nothing if VPN has been mapped by a a concurrent page_fault |
---|
1850 | hal_gpt_get_pte( ref_gpt_xp, |
---|
1851 | vpn, |
---|
1852 | &ref_attr, |
---|
1853 | &ref_ppn ); |
---|
1854 | |
---|
1855 | if( (ref_attr & GPT_MAPPED) == 0 ) // VPN actually unmapped |
---|
1856 | { |
---|
1857 | // allocate and initialise a physical page depending on the vseg type |
---|
1858 | error = vmm_get_one_ppn( vseg , vpn , &new_ppn ); |
---|
1859 | |
---|
1860 | if( error ) |
---|
1861 | { |
---|
1862 | printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", |
---|
1863 | __FUNCTION__ , process->pid , vpn ); |
---|
1864 | |
---|
1865 | // release reference GPT lock in write mode |
---|
1866 | remote_rwlock_wr_release( ref_lock_xp ); |
---|
1867 | |
---|
1868 | return EXCP_KERNEL_PANIC; |
---|
1869 | } |
---|
1870 | |
---|
1871 | // define new_attr from vseg flags |
---|
1872 | new_attr = GPT_MAPPED | GPT_SMALL; |
---|
1873 | if( vseg->flags & VSEG_USER ) new_attr |= GPT_USER; |
---|
1874 | if( vseg->flags & VSEG_WRITE ) new_attr |= GPT_WRITABLE; |
---|
1875 | if( vseg->flags & VSEG_EXEC ) new_attr |= GPT_EXECUTABLE; |
---|
1876 | if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE; |
---|
1877 | |
---|
1878 | // update reference GPT |
---|
1879 | error = hal_gpt_set_pte( ref_gpt_xp, |
---|
1880 | vpn, |
---|
1881 | new_attr, |
---|
1882 | new_ppn ); |
---|
1883 | |
---|
1884 | // update local GPT (protected by reference GPT lock) |
---|
1885 | error |= hal_gpt_set_pte( local_gpt_xp, |
---|
1886 | vpn, |
---|
1887 | new_attr, |
---|
1888 | new_ppn ); |
---|
1889 | |
---|
1890 | if( error ) |
---|
1891 | { |
---|
1892 | printk("\n[ERROR] in %s : cannot update GPT / process %x / vpn = %x\n", |
---|
1893 | __FUNCTION__ , process->pid , vpn ); |
---|
1894 | |
---|
1895 | // release reference GPT lock in write mode |
---|
1896 | remote_rwlock_wr_release( ref_lock_xp ); |
---|
1897 | |
---|
1898 | return EXCP_KERNEL_PANIC; |
---|
1899 | } |
---|
1900 | } |
---|
1901 | |
---|
1902 | // release reference GPT lock in write mode |
---|
1903 | remote_rwlock_wr_release( ref_lock_xp ); |
---|
1904 | |
---|
1905 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
1906 | cycle = (uint32_t)hal_get_cycles(); |
---|
1907 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
1908 | printk("\n[%s] true page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", |
---|
1909 | __FUNCTION__, vpn, new_ppn, new_attr, cycle ); |
---|
1910 | #endif |
---|
1911 | return EXCP_NON_FATAL; |
---|
1912 | } |
---|
1913 | } |
---|
1914 | } // end vmm_handle_page_fault() |
---|
1915 | |
---|
1916 | //////////////////////////////////////////// |
---|
1917 | error_t vmm_handle_cow( process_t * process, |
---|
1918 | vpn_t vpn ) |
---|
1919 | { |
---|
1920 | vseg_t * vseg; // vseg containing vpn |
---|
1921 | cxy_t ref_cxy; // reference cluster for missing vpn |
---|
1922 | process_t * ref_ptr; // reference process for missing vpn |
---|
1923 | xptr_t gpt_xp; // extended pointer on GPT |
---|
1924 | xptr_t gpt_lock_xp; // extended pointer on GPT lock |
---|
1925 | uint32_t old_attr; // current PTE_ATTR value |
---|
1926 | ppn_t old_ppn; // current PTE_PPN value |
---|
1927 | uint32_t new_attr; // new PTE_ATTR value |
---|
1928 | ppn_t new_ppn; // new PTE_PPN value |
---|
1929 | error_t error; |
---|
1930 | |
---|
1931 | #if DEBUG_VMM_HANDLE_COW |
---|
1932 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1933 | thread_t * this = CURRENT_THREAD; |
---|
1934 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
1935 | printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n", |
---|
1936 | __FUNCTION__, process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); |
---|
1937 | #endif |
---|
1938 | |
---|
1939 | // access local GPT to get GPT_COW flag |
---|
1940 | bool_t cow = hal_gpt_pte_is_cow( &(process->vmm.gpt), vpn ); |
---|
1941 | |
---|
1942 | if( cow == false ) return EXCP_USER_ERROR; |
---|
1943 | |
---|
1944 | // get local vseg |
---|
1945 | error = vmm_get_vseg( process, |
---|
1946 | (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT, |
---|
1947 | &vseg ); |
---|
1948 | if( error ) |
---|
1949 | { |
---|
1950 | printk("\n[PANIC] in %s : vpn %x in process %x not in a registered vseg\n", |
---|
1951 | __FUNCTION__, vpn, process->pid ); |
---|
1952 | |
---|
1953 | return EXCP_KERNEL_PANIC; |
---|
1954 | } |
---|
1955 | |
---|
1956 | // get reference GPT cluster and local pointer |
---|
1957 | ref_cxy = GET_CXY( process->ref_xp ); |
---|
1958 | ref_ptr = GET_PTR( process->ref_xp ); |
---|
1959 | |
---|
1960 | // build relevant extended pointers on relevant GPT and GPT lock |
---|
1961 | // - access local GPT for a private vseg |
---|
1962 | // - access reference GPT for a public vseg |
---|
1963 | if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) |
---|
1964 | { |
---|
1965 | gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); |
---|
1966 | gpt_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); |
---|
1967 | } |
---|
1968 | else |
---|
1969 | { |
---|
1970 | gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt ); |
---|
1971 | gpt_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt_lock ); |
---|
1972 | } |
---|
1973 | |
---|
1974 | // take GPT lock in write mode |
---|
1975 | remote_rwlock_wr_acquire( gpt_lock_xp ); |
---|
1976 | |
---|
1977 | // get current PTE from reference GPT |
---|
1978 | hal_gpt_get_pte( gpt_xp, |
---|
1979 | vpn, |
---|
1980 | &old_attr, |
---|
1981 | &old_ppn ); |
---|
1982 | |
---|
1983 | // the PTE must be mapped for a COW |
---|
1984 | if( (old_attr & GPT_MAPPED) == 0 ) |
---|
1985 | { |
---|
1986 | printk("\n[PANIC] in %s : VPN %x in process %x unmapped\n", |
---|
1987 | __FUNCTION__, vpn, process->pid ); |
---|
1988 | |
---|
1989 | // release GPT lock in write mode |
---|
1990 | remote_rwlock_wr_acquire( gpt_lock_xp ); |
---|
1991 | |
---|
1992 | return EXCP_KERNEL_PANIC; |
---|
1993 | } |
---|
1994 | |
---|
1995 | // get extended pointer, cluster and local pointer on physical page descriptor |
---|
1996 | xptr_t page_xp = ppm_ppn2page( old_ppn ); |
---|
1997 | cxy_t page_cxy = GET_CXY( page_xp ); |
---|
1998 | page_t * page_ptr = GET_PTR( page_xp ); |
---|
1999 | |
---|
2000 | // get extended pointers on forks and lock field in page descriptor |
---|
2001 | xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
2002 | xptr_t forks_lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
2003 | |
---|
2004 | // take lock protecting "forks" counter |
---|
2005 | remote_busylock_acquire( forks_lock_xp ); |
---|
2006 | |
---|
2007 | // get number of pending forks from page descriptor |
---|
2008 | uint32_t forks = hal_remote_l32( forks_xp ); |
---|
2009 | |
---|
2010 | if( forks ) // pending fork => allocate a new page, and copy old to new |
---|
2011 | { |
---|
2012 | // allocate a new physical page |
---|
2013 | page_xp = vmm_page_allocate( vseg , vpn ); |
---|
2014 | if( page_xp == XPTR_NULL ) |
---|
2015 | { |
---|
2016 | printk("\n[PANIC] in %s : no memory for vpn %x in process %x\n", |
---|
2017 | __FUNCTION__ , vpn, process->pid ); |
---|
2018 | |
---|
2019 | // release GPT lock in write mode |
---|
2020 | remote_rwlock_wr_acquire( gpt_lock_xp ); |
---|
2021 | |
---|
2022 | // release lock protecting "forks" counter |
---|
2023 | remote_busylock_release( forks_lock_xp ); |
---|
2024 | |
---|
2025 | return EXCP_KERNEL_PANIC; |
---|
2026 | } |
---|
2027 | |
---|
2028 | // compute allocated page PPN |
---|
2029 | new_ppn = ppm_page2ppn( page_xp ); |
---|
2030 | |
---|
2031 | // copy old page content to new page |
---|
2032 | xptr_t old_base_xp = ppm_ppn2base( old_ppn ); |
---|
2033 | xptr_t new_base_xp = ppm_ppn2base( new_ppn ); |
---|
2034 | memcpy( GET_PTR( new_base_xp ), |
---|
2035 | GET_PTR( old_base_xp ), |
---|
2036 | CONFIG_PPM_PAGE_SIZE ); |
---|
2037 | |
---|
2038 | // decrement pending forks counter in page descriptor |
---|
2039 | hal_remote_atomic_add( forks_xp , -1 ); |
---|
2040 | |
---|
2041 | #if(DEBUG_VMM_HANDLE_COW & 1) |
---|
2042 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
2043 | printk("\n[%s] thread[%x,%x] : pending forks => allocate a new PPN %x\n", |
---|
2044 | __FUNCTION__, process->pid, this->trdid, new_ppn ); |
---|
2045 | #endif |
---|
2046 | |
---|
2047 | } |
---|
2048 | else // no pending fork => keep the existing page |
---|
2049 | { |
---|
2050 | |
---|
2051 | #if(DEBUG_VMM_HANDLE_COW & 1) |
---|
2052 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
2053 | printk("\n[%s] thread[%x,%x] no pending forks => keep existing PPN %x\n", |
---|
2054 | __FUNCTION__, process->pid, this->trdid, new_ppn ); |
---|
2055 | #endif |
---|
2056 | new_ppn = old_ppn; |
---|
2057 | } |
---|
2058 | |
---|
2059 | // release lock protecting "forks" counter |
---|
2060 | remote_busylock_release( forks_lock_xp ); |
---|
2061 | |
---|
2062 | // build new_attr : reset COW and set WRITABLE, |
---|
2063 | new_attr = (old_attr | GPT_WRITABLE) & (~GPT_COW); |
---|
2064 | |
---|
2065 | // update the relevan GPT |
---|
2066 | // - private vseg => update local GPT |
---|
2067 | // - public vseg => update all GPT copies |
---|
2068 | if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) |
---|
2069 | { |
---|
2070 | hal_gpt_set_pte( gpt_xp, |
---|
2071 | vpn, |
---|
2072 | new_attr, |
---|
2073 | new_ppn ); |
---|
2074 | } |
---|
2075 | else |
---|
2076 | { |
---|
2077 | if( ref_cxy == local_cxy ) // reference cluster is local |
---|
2078 | { |
---|
2079 | vmm_global_update_pte( process, |
---|
2080 | vpn, |
---|
2081 | new_attr, |
---|
2082 | new_ppn ); |
---|
2083 | } |
---|
2084 | else // reference cluster is remote |
---|
2085 | { |
---|
2086 | rpc_vmm_global_update_pte_client( ref_cxy, |
---|
2087 | ref_ptr, |
---|
2088 | vpn, |
---|
2089 | new_attr, |
---|
2090 | new_ppn ); |
---|
2091 | } |
---|
2092 | } |
---|
2093 | |
---|
2094 | // release GPT lock in write mode |
---|
2095 | remote_rwlock_wr_release( gpt_lock_xp ); |
---|
2096 | |
---|
2097 | #if DEBUG_VMM_HANDLE_COW |
---|
2098 | cycle = (uint32_t)hal_get_cycles(); |
---|
2099 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
2100 | printk("\n[%s] thread[%x,%x] exit for vpn %x / core[%x,%d] / cycle %d\n", |
---|
2101 | __FUNCTION__, process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); |
---|
2102 | #endif |
---|
2103 | |
---|
2104 | return EXCP_NON_FATAL; |
---|
2105 | |
---|
2106 | } // end vmm_handle_cow() |
---|
2107 | |
---|