1 | /* |
---|
2 | * vmm.c - virtual memory manager related operations definition. |
---|
3 | * |
---|
4 | * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) |
---|
5 | * Mohamed Lamine Karaoui (2015) |
---|
6 | * Alain Greiner (2016,2017,2018) |
---|
7 | * |
---|
8 | * Copyright (c) UPMC Sorbonne Universites |
---|
9 | * |
---|
10 | * This file is part of ALMOS-MKH. |
---|
11 | * |
---|
12 | * ALMOS-MKH is free software; you can redistribute it and/or modify it |
---|
13 | * under the terms of the GNU General Public License as published by |
---|
14 | * the Free Software Foundation; version 2.0 of the License. |
---|
15 | * |
---|
16 | * ALMOS-MKH is distributed in the hope that it will be useful, but |
---|
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
19 | * General Public License for more details. |
---|
20 | * |
---|
21 | * You should have received a copy of the GNU General Public License |
---|
22 | * along with ALMOS-MKH; if not, write to the Free Software Foundation, |
---|
23 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
24 | */ |
---|
25 | |
---|
26 | #include <kernel_config.h> |
---|
27 | #include <hal_kernel_types.h> |
---|
28 | #include <hal_special.h> |
---|
29 | #include <hal_gpt.h> |
---|
30 | #include <hal_vmm.h> |
---|
31 | #include <hal_macros.h> |
---|
32 | #include <printk.h> |
---|
33 | #include <memcpy.h> |
---|
34 | #include <remote_rwlock.h> |
---|
35 | #include <remote_queuelock.h> |
---|
36 | #include <list.h> |
---|
37 | #include <xlist.h> |
---|
38 | #include <bits.h> |
---|
39 | #include <process.h> |
---|
40 | #include <thread.h> |
---|
41 | #include <vseg.h> |
---|
42 | #include <cluster.h> |
---|
43 | #include <scheduler.h> |
---|
44 | #include <vfs.h> |
---|
45 | #include <mapper.h> |
---|
46 | #include <page.h> |
---|
47 | #include <kmem.h> |
---|
48 | #include <vmm.h> |
---|
49 | #include <hal_exception.h> |
---|
50 | |
---|
51 | ////////////////////////////////////////////////////////////////////////////////// |
---|
52 | // Extern global variables |
---|
53 | ////////////////////////////////////////////////////////////////////////////////// |
---|
54 | |
---|
55 | extern process_t process_zero; // allocated in cluster.c |
---|
56 | |
---|
57 | /////////////////////////////////////// |
---|
58 | error_t vmm_init( process_t * process ) |
---|
59 | { |
---|
60 | error_t error; |
---|
61 | vseg_t * vseg_args; |
---|
62 | vseg_t * vseg_envs; |
---|
63 | intptr_t base; |
---|
64 | intptr_t size; |
---|
65 | uint32_t i; |
---|
66 | |
---|
67 | #if DEBUG_VMM_INIT |
---|
68 | thread_t * this = CURRENT_THREAD; |
---|
69 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
70 | if( DEBUG_VMM_INIT ) |
---|
71 | printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", |
---|
72 | __FUNCTION__ , this->process->pid, this->trdid, process->pid, local_cxy, cycle ); |
---|
73 | #endif |
---|
74 | |
---|
75 | // get pointer on VMM |
---|
76 | vmm_t * vmm = &process->vmm; |
---|
77 | |
---|
78 | // initialize VSL (empty) |
---|
79 | vmm->vsegs_nr = 0; |
---|
80 | xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) ); |
---|
81 | remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) , LOCK_VMM_VSL ); |
---|
82 | |
---|
83 | assert( ((CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) <= |
---|
84 | (CONFIG_VMM_ELF_BASE - CONFIG_VMM_UTILS_BASE)) , |
---|
85 | "UTILS zone too small\n" ); |
---|
86 | |
---|
87 | assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <= |
---|
88 | (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , |
---|
89 | "STACK zone too small\n"); |
---|
90 | |
---|
91 | // register args vseg in VSL |
---|
92 | base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_SHIFT; |
---|
93 | size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT; |
---|
94 | |
---|
95 | vseg_args = vmm_create_vseg( process, |
---|
96 | VSEG_TYPE_DATA, |
---|
97 | base, |
---|
98 | size, |
---|
99 | 0, // file_offset unused |
---|
100 | 0, // file_size unused |
---|
101 | XPTR_NULL, // mapper_xp unused |
---|
102 | local_cxy ); |
---|
103 | |
---|
104 | if( vseg_args == NULL ) |
---|
105 | { |
---|
106 | printk("\n[ERROR] in %s : cannot register args vseg\n", __FUNCTION__ ); |
---|
107 | return -1; |
---|
108 | } |
---|
109 | |
---|
110 | vmm->args_vpn_base = base; |
---|
111 | |
---|
112 | // register the envs vseg in VSL |
---|
113 | base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_SHIFT; |
---|
114 | size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT; |
---|
115 | |
---|
116 | vseg_envs = vmm_create_vseg( process, |
---|
117 | VSEG_TYPE_DATA, |
---|
118 | base, |
---|
119 | size, |
---|
120 | 0, // file_offset unused |
---|
121 | 0, // file_size unused |
---|
122 | XPTR_NULL, // mapper_xp unused |
---|
123 | local_cxy ); |
---|
124 | |
---|
125 | if( vseg_envs == NULL ) |
---|
126 | { |
---|
127 | printk("\n[ERROR] in %s : cannot register envs vseg\n", __FUNCTION__ ); |
---|
128 | return -1; |
---|
129 | } |
---|
130 | |
---|
131 | vmm->envs_vpn_base = base; |
---|
132 | |
---|
133 | // create GPT (empty) |
---|
134 | error = hal_gpt_create( &vmm->gpt ); |
---|
135 | |
---|
136 | if( error ) |
---|
137 | { |
---|
138 | printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ ); |
---|
139 | return -1; |
---|
140 | } |
---|
141 | |
---|
142 | // initialize GPT lock |
---|
143 | remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT ); |
---|
144 | |
---|
145 | // update process VMM with kernel vsegs as required by the hardware architecture |
---|
146 | error = hal_vmm_kernel_update( process ); |
---|
147 | |
---|
148 | if( error ) |
---|
149 | { |
---|
150 | printk("\n[ERROR] in %s : cannot update GPT for kernel vsegs\n", __FUNCTION__ ); |
---|
151 | return -1; |
---|
152 | } |
---|
153 | |
---|
154 | // initialize STACK allocator |
---|
155 | vmm->stack_mgr.bitmap = 0; |
---|
156 | vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE; |
---|
157 | busylock_init( &vmm->stack_mgr.lock , LOCK_VMM_STACK ); |
---|
158 | |
---|
159 | // initialize MMAP allocator |
---|
160 | vmm->mmap_mgr.vpn_base = CONFIG_VMM_HEAP_BASE; |
---|
161 | vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; |
---|
162 | vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; |
---|
163 | busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP ); |
---|
164 | for( i = 0 ; i < 32 ; i++ ) list_root_init( &vmm->mmap_mgr.zombi_list[i] ); |
---|
165 | |
---|
166 | // initialize instrumentation counters |
---|
167 | vmm->pgfault_nr = 0; |
---|
168 | |
---|
169 | hal_fence(); |
---|
170 | |
---|
171 | #if DEBUG_VMM_INIT |
---|
172 | cycle = (uint32_t)hal_get_cycles(); |
---|
173 | if( DEBUG_VMM_INIT ) |
---|
174 | printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", |
---|
175 | __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle ); |
---|
176 | #endif |
---|
177 | |
---|
178 | return 0; |
---|
179 | |
---|
180 | } // end vmm_init() |
---|
181 | |
---|
182 | |
---|
183 | ////////////////////////////////////////// |
---|
184 | void vmm_attach_vseg_to_vsl( vmm_t * vmm, |
---|
185 | vseg_t * vseg ) |
---|
186 | { |
---|
187 | // build extended pointer on rwlock protecting VSL |
---|
188 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); |
---|
189 | |
---|
190 | // get rwlock in write mode |
---|
191 | remote_rwlock_wr_acquire( lock_xp ); |
---|
192 | |
---|
193 | // update vseg descriptor |
---|
194 | vseg->vmm = vmm; |
---|
195 | |
---|
196 | // increment vsegs number |
---|
197 | vmm->vsegs_nr++; |
---|
198 | |
---|
199 | // add vseg in vmm list |
---|
200 | xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ), |
---|
201 | XPTR( local_cxy , &vseg->xlist ) ); |
---|
202 | |
---|
203 | // release rwlock in write mode |
---|
204 | remote_rwlock_wr_release( lock_xp ); |
---|
205 | } |
---|
206 | |
---|
207 | //////////////////////////////////////////// |
---|
208 | void vmm_detach_vseg_from_vsl( vmm_t * vmm, |
---|
209 | vseg_t * vseg ) |
---|
210 | { |
---|
211 | // get vseg type |
---|
212 | uint32_t type = vseg->type; |
---|
213 | |
---|
214 | // build extended pointer on rwlock protecting VSL |
---|
215 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); |
---|
216 | |
---|
217 | // get rwlock in write mode |
---|
218 | remote_rwlock_wr_acquire( lock_xp ); |
---|
219 | |
---|
220 | // update vseg descriptor |
---|
221 | vseg->vmm = NULL; |
---|
222 | |
---|
223 | // remove vseg from VSL |
---|
224 | xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); |
---|
225 | |
---|
226 | // release rwlock in write mode |
---|
227 | remote_rwlock_wr_release( lock_xp ); |
---|
228 | |
---|
229 | // release the stack slot to VMM stack allocator if STACK type |
---|
230 | if( type == VSEG_TYPE_STACK ) |
---|
231 | { |
---|
232 | // get pointer on stack allocator |
---|
233 | stack_mgr_t * mgr = &vmm->stack_mgr; |
---|
234 | |
---|
235 | // compute slot index |
---|
236 | uint32_t index = ((vseg->vpn_base - mgr->vpn_base - 1) / CONFIG_VMM_STACK_SIZE); |
---|
237 | |
---|
238 | // update stacks_bitmap |
---|
239 | busylock_acquire( &mgr->lock ); |
---|
240 | bitmap_clear( &mgr->bitmap , index ); |
---|
241 | busylock_release( &mgr->lock ); |
---|
242 | } |
---|
243 | |
---|
244 | // release the vseg to VMM mmap allocator if MMAP type |
---|
245 | if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_FILE) || (type == VSEG_TYPE_REMOTE) ) |
---|
246 | { |
---|
247 | // get pointer on mmap allocator |
---|
248 | mmap_mgr_t * mgr = &vmm->mmap_mgr; |
---|
249 | |
---|
250 | // compute zombi_list index |
---|
251 | uint32_t index = bits_log2( vseg->vpn_size ); |
---|
252 | |
---|
253 | // update zombi_list |
---|
254 | busylock_acquire( &mgr->lock ); |
---|
255 | list_add_first( &mgr->zombi_list[index] , &vseg->zlist ); |
---|
256 | busylock_release( &mgr->lock ); |
---|
257 | } |
---|
258 | |
---|
259 | // release physical memory allocated for vseg if no MMAP and no kernel type |
---|
260 | if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) && |
---|
261 | (type != VSEG_TYPE_KCODE) && (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) ) |
---|
262 | { |
---|
263 | vseg_free( vseg ); |
---|
264 | } |
---|
265 | |
---|
266 | } // end vmm_remove_vseg_from_vsl() |
---|
267 | |
---|
268 | //////////////////////////////////////////////// |
---|
269 | void vmm_global_update_pte( process_t * process, |
---|
270 | vpn_t vpn, |
---|
271 | uint32_t attr, |
---|
272 | ppn_t ppn ) |
---|
273 | { |
---|
274 | xlist_entry_t * process_root_ptr; |
---|
275 | xptr_t process_root_xp; |
---|
276 | xptr_t process_iter_xp; |
---|
277 | |
---|
278 | xptr_t remote_process_xp; |
---|
279 | cxy_t remote_process_cxy; |
---|
280 | process_t * remote_process_ptr; |
---|
281 | xptr_t remote_gpt_xp; |
---|
282 | |
---|
283 | pid_t pid; |
---|
284 | cxy_t owner_cxy; |
---|
285 | lpid_t owner_lpid; |
---|
286 | |
---|
287 | #if DEBUG_VMM_UPDATE_PTE |
---|
288 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
289 | thread_t * this = CURRENT_THREAD; |
---|
290 | if( DEBUG_VMM_UPDATE_PTE < cycle ) |
---|
291 | printk("\n[%s] thread[%x,%x] enter for process %x / vpn %x / cycle %d\n", |
---|
292 | __FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle ); |
---|
293 | #endif |
---|
294 | |
---|
295 | // check cluster is reference |
---|
296 | assert( (GET_CXY( process->ref_xp ) == local_cxy) , "not called in reference cluster\n"); |
---|
297 | |
---|
298 | // get extended pointer on root of process copies xlist in owner cluster |
---|
299 | pid = process->pid; |
---|
300 | owner_cxy = CXY_FROM_PID( pid ); |
---|
301 | owner_lpid = LPID_FROM_PID( pid ); |
---|
302 | process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; |
---|
303 | process_root_xp = XPTR( owner_cxy , process_root_ptr ); |
---|
304 | |
---|
305 | // loop on destination process copies |
---|
306 | XLIST_FOREACH( process_root_xp , process_iter_xp ) |
---|
307 | { |
---|
308 | // get cluster and local pointer on remote process |
---|
309 | remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); |
---|
310 | remote_process_ptr = GET_PTR( remote_process_xp ); |
---|
311 | remote_process_cxy = GET_CXY( remote_process_xp ); |
---|
312 | |
---|
313 | #if (DEBUG_VMM_UPDATE_PTE & 0x1) |
---|
314 | if( DEBUG_VMM_UPDATE_PTE < cycle ) |
---|
315 | printk("\n[%s] threadr[%x,%x] handling vpn %x for process %x in cluster %x\n", |
---|
316 | __FUNCTION__, this->process->pid, this->trdid, vpn, process->pid, remote_process_cxy ); |
---|
317 | #endif |
---|
318 | |
---|
319 | // get extended pointer on remote gpt |
---|
320 | remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt ); |
---|
321 | |
---|
322 | // update remote GPT |
---|
323 | hal_gpt_update_pte( remote_gpt_xp, vpn, attr, ppn ); |
---|
324 | } |
---|
325 | |
---|
326 | #if DEBUG_VMM_UPDATE_PTE |
---|
327 | cycle = (uint32_t)hal_get_cycles(); |
---|
328 | if( DEBUG_VMM_UPDATE_PTE < cycle ) |
---|
329 | printk("\n[%s] thread[%x,%x] exit for process %x / vpn %x / cycle %d\n", |
---|
330 | __FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle ); |
---|
331 | #endif |
---|
332 | |
---|
333 | } // end vmm_global_update_pte() |
---|
334 | |
---|
335 | /////////////////////////////////////// |
---|
336 | void vmm_set_cow( process_t * process ) |
---|
337 | { |
---|
338 | vmm_t * vmm; |
---|
339 | |
---|
340 | xlist_entry_t * process_root_ptr; |
---|
341 | xptr_t process_root_xp; |
---|
342 | xptr_t process_iter_xp; |
---|
343 | |
---|
344 | xptr_t remote_process_xp; |
---|
345 | cxy_t remote_process_cxy; |
---|
346 | process_t * remote_process_ptr; |
---|
347 | xptr_t remote_gpt_xp; |
---|
348 | |
---|
349 | xptr_t vseg_root_xp; |
---|
350 | xptr_t vseg_iter_xp; |
---|
351 | |
---|
352 | xptr_t vseg_xp; |
---|
353 | vseg_t * vseg; |
---|
354 | |
---|
355 | pid_t pid; |
---|
356 | cxy_t owner_cxy; |
---|
357 | lpid_t owner_lpid; |
---|
358 | |
---|
359 | #if DEBUG_VMM_SET_COW |
---|
360 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
361 | thread_t * this = CURRENT_THREAD; |
---|
362 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
363 | printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n", |
---|
364 | __FUNCTION__, this->process->pid, this->trdid, process->pid , cycle ); |
---|
365 | #endif |
---|
366 | |
---|
367 | // check cluster is reference |
---|
368 | assert( (GET_CXY( process->ref_xp ) == local_cxy) , |
---|
369 | "local cluster is not process reference cluster\n"); |
---|
370 | |
---|
371 | // get pointer on reference VMM |
---|
372 | vmm = &process->vmm; |
---|
373 | |
---|
374 | // get extended pointer on root of process copies xlist in owner cluster |
---|
375 | pid = process->pid; |
---|
376 | owner_cxy = CXY_FROM_PID( pid ); |
---|
377 | owner_lpid = LPID_FROM_PID( pid ); |
---|
378 | process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; |
---|
379 | process_root_xp = XPTR( owner_cxy , process_root_ptr ); |
---|
380 | |
---|
381 | // get extended pointer on root of vsegs xlist from reference VMM |
---|
382 | vseg_root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
383 | |
---|
384 | // loop on destination process copies |
---|
385 | XLIST_FOREACH( process_root_xp , process_iter_xp ) |
---|
386 | { |
---|
387 | // get cluster and local pointer on remote process |
---|
388 | remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); |
---|
389 | remote_process_ptr = GET_PTR( remote_process_xp ); |
---|
390 | remote_process_cxy = GET_CXY( remote_process_xp ); |
---|
391 | |
---|
392 | #if (DEBUG_VMM_SET_COW & 1) |
---|
393 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
394 | printk("\n[%s] thread[%x,%x] handling process %x in cluster %x\n", |
---|
395 | __FUNCTION__, this->process->pid, this->trdid, process->pid , remote_process_cxy ); |
---|
396 | #endif |
---|
397 | |
---|
398 | // get extended pointer on remote gpt |
---|
399 | remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt ); |
---|
400 | |
---|
401 | // loop on vsegs in (local) reference process VSL |
---|
402 | XLIST_FOREACH( vseg_root_xp , vseg_iter_xp ) |
---|
403 | { |
---|
404 | // get pointer on vseg |
---|
405 | vseg_xp = XLIST_ELEMENT( vseg_iter_xp , vseg_t , xlist ); |
---|
406 | vseg = GET_PTR( vseg_xp ); |
---|
407 | |
---|
408 | assert( (GET_CXY( vseg_xp ) == local_cxy) , |
---|
409 | "all vsegs in reference VSL must be local\n" ); |
---|
410 | |
---|
411 | // get vseg type, base and size |
---|
412 | uint32_t type = vseg->type; |
---|
413 | vpn_t vpn_base = vseg->vpn_base; |
---|
414 | vpn_t vpn_size = vseg->vpn_size; |
---|
415 | |
---|
416 | #if (DEBUG_VMM_SET_COW & 1) |
---|
417 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
418 | printk("\n[%s] thread[%x,%x] handling vseg %s / vpn_base = %x / vpn_size = %x\n", |
---|
419 | __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size ); |
---|
420 | #endif |
---|
421 | // only DATA, ANON and REMOTE vsegs |
---|
422 | if( (type == VSEG_TYPE_DATA) || |
---|
423 | (type == VSEG_TYPE_ANON) || |
---|
424 | (type == VSEG_TYPE_REMOTE) ) |
---|
425 | { |
---|
426 | vpn_t vpn; |
---|
427 | uint32_t attr; |
---|
428 | ppn_t ppn; |
---|
429 | xptr_t page_xp; |
---|
430 | cxy_t page_cxy; |
---|
431 | page_t * page_ptr; |
---|
432 | xptr_t forks_xp; |
---|
433 | xptr_t lock_xp; |
---|
434 | |
---|
435 | // update flags in remote GPT |
---|
436 | hal_gpt_set_cow( remote_gpt_xp, |
---|
437 | vpn_base, |
---|
438 | vpn_size ); |
---|
439 | |
---|
440 | // atomically increment pending forks counter in physical pages, |
---|
441 | // for all vseg pages that are mapped in reference cluster |
---|
442 | if( remote_process_cxy == local_cxy ) |
---|
443 | { |
---|
444 | // scan all pages in vseg |
---|
445 | for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) |
---|
446 | { |
---|
447 | // get page attributes and PPN from reference GPT |
---|
448 | hal_gpt_get_pte( remote_gpt_xp , vpn , &attr , &ppn ); |
---|
449 | |
---|
450 | // atomically update pending forks counter if page is mapped |
---|
451 | if( attr & GPT_MAPPED ) |
---|
452 | { |
---|
453 | // get pointers and cluster on page descriptor |
---|
454 | page_xp = ppm_ppn2page( ppn ); |
---|
455 | page_cxy = GET_CXY( page_xp ); |
---|
456 | page_ptr = GET_PTR( page_xp ); |
---|
457 | |
---|
458 | // get extended pointers on "forks" and "lock" |
---|
459 | forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
460 | lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
461 | |
---|
462 | // take lock protecting "forks" counter |
---|
463 | remote_busylock_acquire( lock_xp ); |
---|
464 | |
---|
465 | // increment "forks" |
---|
466 | hal_remote_atomic_add( forks_xp , 1 ); |
---|
467 | |
---|
468 | // release lock protecting "forks" counter |
---|
469 | remote_busylock_release( lock_xp ); |
---|
470 | } |
---|
471 | } // end loop on vpn |
---|
472 | } // end if local |
---|
473 | } // end if vseg type |
---|
474 | } // end loop on vsegs |
---|
475 | } // end loop on process copies |
---|
476 | |
---|
477 | #if DEBUG_VMM_SET_COW |
---|
478 | cycle = (uint32_t)hal_get_cycles(); |
---|
479 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
480 | printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", |
---|
481 | __FUNCTION__, this->process->pid, this->trdid, process->pid , cycle ); |
---|
482 | #endif |
---|
483 | |
---|
484 | } // end vmm_set-cow() |
---|
485 | |
---|
486 | ///////////////////////////////////////////////// |
---|
487 | error_t vmm_fork_copy( process_t * child_process, |
---|
488 | xptr_t parent_process_xp ) |
---|
489 | { |
---|
490 | error_t error; |
---|
491 | cxy_t parent_cxy; |
---|
492 | process_t * parent_process; |
---|
493 | vmm_t * parent_vmm; |
---|
494 | xptr_t parent_lock_xp; |
---|
495 | vmm_t * child_vmm; |
---|
496 | xptr_t iter_xp; |
---|
497 | xptr_t parent_vseg_xp; |
---|
498 | vseg_t * parent_vseg; |
---|
499 | vseg_t * child_vseg; |
---|
500 | uint32_t type; |
---|
501 | bool_t cow; |
---|
502 | vpn_t vpn; |
---|
503 | vpn_t vpn_base; |
---|
504 | vpn_t vpn_size; |
---|
505 | xptr_t page_xp; // extended pointer on page descriptor |
---|
506 | page_t * page_ptr; |
---|
507 | cxy_t page_cxy; |
---|
508 | xptr_t forks_xp; // extended pointer on forks counter in page descriptor |
---|
509 | xptr_t lock_xp; // extended pointer on lock protecting the forks counter |
---|
510 | xptr_t parent_root_xp; |
---|
511 | bool_t mapped; |
---|
512 | ppn_t ppn; |
---|
513 | |
---|
514 | #if DEBUG_VMM_FORK_COPY |
---|
515 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
516 | thread_t * this = CURRENT_THREAD; |
---|
517 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
518 | printk("\n[%s] thread %x enter / cycle %d\n", |
---|
519 | __FUNCTION__ , this->process->pid, this->trdid, cycle ); |
---|
520 | #endif |
---|
521 | |
---|
522 | // get parent process cluster and local pointer |
---|
523 | parent_cxy = GET_CXY( parent_process_xp ); |
---|
524 | parent_process = GET_PTR( parent_process_xp ); |
---|
525 | |
---|
526 | // get local pointers on parent and child VMM |
---|
527 | parent_vmm = &parent_process->vmm; |
---|
528 | child_vmm = &child_process->vmm; |
---|
529 | |
---|
530 | // get extended pointer on lock protecting the parent VSL |
---|
531 | parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsegs_lock ); |
---|
532 | |
---|
533 | // initialize the lock protecting the child VSL |
---|
534 | remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsegs_lock ), LOCK_VMM_STACK ); |
---|
535 | |
---|
536 | // initialize the child VSL as empty |
---|
537 | xlist_root_init( XPTR( local_cxy, &child_vmm->vsegs_root ) ); |
---|
538 | child_vmm->vsegs_nr = 0; |
---|
539 | |
---|
540 | // create the child GPT |
---|
541 | error = hal_gpt_create( &child_vmm->gpt ); |
---|
542 | |
---|
543 | if( error ) |
---|
544 | { |
---|
545 | printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ ); |
---|
546 | return -1; |
---|
547 | } |
---|
548 | |
---|
549 | // build extended pointer on parent VSL |
---|
550 | parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root ); |
---|
551 | |
---|
552 | // take the lock protecting the parent VSL in read mode |
---|
553 | remote_rwlock_rd_acquire( parent_lock_xp ); |
---|
554 | |
---|
555 | // loop on parent VSL xlist |
---|
556 | XLIST_FOREACH( parent_root_xp , iter_xp ) |
---|
557 | { |
---|
558 | // get local and extended pointers on current parent vseg |
---|
559 | parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
560 | parent_vseg = GET_PTR( parent_vseg_xp ); |
---|
561 | |
---|
562 | // get vseg type |
---|
563 | type = hal_remote_l32( XPTR( parent_cxy , &parent_vseg->type ) ); |
---|
564 | |
---|
565 | #if DEBUG_VMM_FORK_COPY |
---|
566 | cycle = (uint32_t)hal_get_cycles(); |
---|
567 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
568 | printk("\n[%s] thread[%x,%x] found parent vseg %s / vpn_base = %x / cycle %d\n", |
---|
569 | __FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type), |
---|
570 | hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); |
---|
571 | #endif |
---|
572 | |
---|
573 | // all parent vsegs - but STACK and kernel vsegs - must be copied in child VSL |
---|
574 | if( (type != VSEG_TYPE_STACK) && (type != VSEG_TYPE_KCODE) && |
---|
575 | (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) ) |
---|
576 | { |
---|
577 | // allocate memory for a new child vseg |
---|
578 | child_vseg = vseg_alloc(); |
---|
579 | if( child_vseg == NULL ) // release all allocated vsegs |
---|
580 | { |
---|
581 | vmm_destroy( child_process ); |
---|
582 | printk("\n[ERROR] in %s : cannot create vseg for child\n", __FUNCTION__ ); |
---|
583 | return -1; |
---|
584 | } |
---|
585 | |
---|
586 | // copy parent vseg to child vseg |
---|
587 | vseg_init_from_ref( child_vseg , parent_vseg_xp ); |
---|
588 | |
---|
589 | // register child vseg in child VSL |
---|
590 | vmm_attach_vseg_to_vsl( child_vmm , child_vseg ); |
---|
591 | |
---|
592 | #if DEBUG_VMM_FORK_COPY |
---|
593 | cycle = (uint32_t)hal_get_cycles(); |
---|
594 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
595 | printk("\n[%s] thread[%x,%x] copied vseg %s / vpn_base = %x to child VSL / cycle %d\n", |
---|
596 | __FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type), |
---|
597 | hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); |
---|
598 | #endif |
---|
599 | |
---|
600 | // copy DATA, MMAP, REMOTE, FILE parent GPT entries to child GPT |
---|
601 | if( type != VSEG_TYPE_CODE ) |
---|
602 | { |
---|
603 | // activate the COW for DATA, MMAP, REMOTE vsegs only |
---|
604 | cow = ( type != VSEG_TYPE_FILE ); |
---|
605 | |
---|
606 | vpn_base = child_vseg->vpn_base; |
---|
607 | vpn_size = child_vseg->vpn_size; |
---|
608 | |
---|
609 | // scan pages in parent vseg |
---|
610 | for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) |
---|
611 | { |
---|
612 | error = hal_gpt_pte_copy( &child_vmm->gpt, |
---|
613 | XPTR( parent_cxy , &parent_vmm->gpt ), |
---|
614 | vpn, |
---|
615 | cow, |
---|
616 | &ppn, |
---|
617 | &mapped ); |
---|
618 | if( error ) |
---|
619 | { |
---|
620 | vmm_destroy( child_process ); |
---|
621 | printk("\n[ERROR] in %s : cannot copy GPT\n", __FUNCTION__ ); |
---|
622 | return -1; |
---|
623 | } |
---|
624 | |
---|
625 | // increment pending forks counter in page if mapped |
---|
626 | if( mapped ) |
---|
627 | { |
---|
628 | // get pointers and cluster on page descriptor |
---|
629 | page_xp = ppm_ppn2page( ppn ); |
---|
630 | page_cxy = GET_CXY( page_xp ); |
---|
631 | page_ptr = GET_PTR( page_xp ); |
---|
632 | |
---|
633 | // get extended pointers on "forks" and "lock" |
---|
634 | forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
635 | lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
636 | |
---|
637 | // get lock protecting "forks" counter |
---|
638 | remote_busylock_acquire( lock_xp ); |
---|
639 | |
---|
640 | // increment "forks" |
---|
641 | hal_remote_atomic_add( forks_xp , 1 ); |
---|
642 | |
---|
643 | // release lock protecting "forks" counter |
---|
644 | remote_busylock_release( lock_xp ); |
---|
645 | |
---|
646 | #if DEBUG_VMM_FORK_COPY |
---|
647 | cycle = (uint32_t)hal_get_cycles(); |
---|
648 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
649 | printk("\n[%s] thread[%x,%x] copied vpn %x to child GPT / cycle %d\n", |
---|
650 | __FUNCTION__ , this->process->pid, this->trdid , vpn , cycle ); |
---|
651 | #endif |
---|
652 | } |
---|
653 | } |
---|
654 | } // end if no code & no stack |
---|
655 | } // end if no stack |
---|
656 | } // end loop on vsegs |
---|
657 | |
---|
658 | // release the parent VSL lock in read mode |
---|
659 | remote_rwlock_rd_release( parent_lock_xp ); |
---|
660 | |
---|
661 | // update child VMM with kernel vsegs |
---|
662 | error = hal_vmm_kernel_update( child_process ); |
---|
663 | |
---|
664 | if( error ) |
---|
665 | { |
---|
666 | printk("\n[ERROR] in %s : cannot update child VMM\n", __FUNCTION__ ); |
---|
667 | return -1; |
---|
668 | } |
---|
669 | |
---|
670 | // initialize the child VMM STACK allocator |
---|
671 | child_vmm->stack_mgr.bitmap = 0; |
---|
672 | child_vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE; |
---|
673 | |
---|
674 | // initialize the child VMM MMAP allocator |
---|
675 | uint32_t i; |
---|
676 | child_vmm->mmap_mgr.vpn_base = CONFIG_VMM_HEAP_BASE; |
---|
677 | child_vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; |
---|
678 | child_vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; |
---|
679 | for( i = 0 ; i < 32 ; i++ ) list_root_init( &child_vmm->mmap_mgr.zombi_list[i] ); |
---|
680 | |
---|
681 | // initialize instrumentation counters |
---|
682 | child_vmm->pgfault_nr = 0; |
---|
683 | |
---|
684 | // copy base addresses from parent VMM to child VMM |
---|
685 | child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base)); |
---|
686 | child_vmm->envs_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->envs_vpn_base)); |
---|
687 | child_vmm->heap_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->heap_vpn_base)); |
---|
688 | child_vmm->code_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->code_vpn_base)); |
---|
689 | child_vmm->data_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->data_vpn_base)); |
---|
690 | |
---|
691 | child_vmm->entry_point = (intptr_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->entry_point)); |
---|
692 | |
---|
693 | hal_fence(); |
---|
694 | |
---|
695 | #if DEBUG_VMM_FORK_COPY |
---|
696 | cycle = (uint32_t)hal_get_cycles(); |
---|
697 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
698 | printk("\n[%s] thread[%x,%x] exit successfully / cycle %d\n", |
---|
699 | __FUNCTION__ , this->process->pid, this->trdid , cycle ); |
---|
700 | #endif |
---|
701 | |
---|
702 | return 0; |
---|
703 | |
---|
704 | } // vmm_fork_copy() |
---|
705 | |
---|
706 | /////////////////////////////////////// |
---|
707 | void vmm_destroy( process_t * process ) |
---|
708 | { |
---|
709 | xptr_t vseg_xp; |
---|
710 | vseg_t * vseg; |
---|
711 | |
---|
712 | #if DEBUG_VMM_DESTROY |
---|
713 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
714 | thread_t * this = CURRENT_THREAD; |
---|
715 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
716 | printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", |
---|
717 | __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle ); |
---|
718 | #endif |
---|
719 | |
---|
720 | #if (DEBUG_VMM_DESTROY & 1 ) |
---|
721 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
722 | hal_vmm_display( process , true ); |
---|
723 | #endif |
---|
724 | |
---|
725 | // get pointer on local VMM |
---|
726 | vmm_t * vmm = &process->vmm; |
---|
727 | |
---|
728 | // get extended pointer on VSL root and VSL lock |
---|
729 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
730 | |
---|
731 | // scan the VSL to delete all registered vsegs |
---|
732 | // (don't use a FOREACH for item deletion in xlist) |
---|
733 | |
---|
734 | while( !xlist_is_empty( root_xp ) ) |
---|
735 | { |
---|
736 | // get pointer on first vseg in VSL |
---|
737 | vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); |
---|
738 | vseg = GET_PTR( vseg_xp ); |
---|
739 | |
---|
740 | // delete vseg and release physical pages |
---|
741 | vmm_delete_vseg( process->pid , vseg->min ); |
---|
742 | |
---|
743 | #if( DEBUG_VMM_DESTROY & 1 ) |
---|
744 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
745 | printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n", |
---|
746 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
747 | #endif |
---|
748 | |
---|
749 | } |
---|
750 | |
---|
751 | // remove all vsegs from zombi_lists in MMAP allocator |
---|
752 | uint32_t i; |
---|
753 | for( i = 0 ; i<32 ; i++ ) |
---|
754 | { |
---|
755 | while( !list_is_empty( &vmm->mmap_mgr.zombi_list[i] ) ) |
---|
756 | { |
---|
757 | vseg = LIST_FIRST( &vmm->mmap_mgr.zombi_list[i] , vseg_t , zlist ); |
---|
758 | |
---|
759 | #if( DEBUG_VMM_DESTROY & 1 ) |
---|
760 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
761 | printk("\n[%s] found zombi vseg / vpn_base %x / vpn_size %d\n", |
---|
762 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
763 | #endif |
---|
764 | // clean vseg descriptor |
---|
765 | vseg->vmm = NULL; |
---|
766 | |
---|
767 | // remove vseg from xlist |
---|
768 | xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); |
---|
769 | |
---|
770 | // release vseg descriptor |
---|
771 | vseg_free( vseg ); |
---|
772 | |
---|
773 | #if( DEBUG_VMM_DESTROY & 1 ) |
---|
774 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
775 | printk("\n[%s] zombi vseg released / vpn_base %x / vpn_size %d\n", |
---|
776 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
777 | #endif |
---|
778 | } |
---|
779 | } |
---|
780 | |
---|
781 | // release memory allocated to the GPT itself |
---|
782 | hal_gpt_destroy( &vmm->gpt ); |
---|
783 | |
---|
784 | #if DEBUG_VMM_DESTROY |
---|
785 | cycle = (uint32_t)hal_get_cycles(); |
---|
786 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
787 | printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", |
---|
788 | __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle ); |
---|
789 | #endif |
---|
790 | |
---|
791 | } // end vmm_destroy() |
---|
792 | |
---|
793 | ///////////////////////////////////////////////// |
---|
794 | vseg_t * vmm_check_conflict( process_t * process, |
---|
795 | vpn_t vpn_base, |
---|
796 | vpn_t vpn_size ) |
---|
797 | { |
---|
798 | vmm_t * vmm = &process->vmm; |
---|
799 | |
---|
800 | // scan the VSL |
---|
801 | vseg_t * vseg; |
---|
802 | xptr_t iter_xp; |
---|
803 | xptr_t vseg_xp; |
---|
804 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
805 | |
---|
806 | XLIST_FOREACH( root_xp , iter_xp ) |
---|
807 | { |
---|
808 | vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
809 | vseg = GET_PTR( vseg_xp ); |
---|
810 | |
---|
811 | if( ((vpn_base + vpn_size) > vseg->vpn_base) && |
---|
812 | (vpn_base < (vseg->vpn_base + vseg->vpn_size)) ) return vseg; |
---|
813 | } |
---|
814 | return NULL; |
---|
815 | |
---|
816 | } // end vmm_check_conflict() |
---|
817 | |
---|
818 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
819 | // This static function is called by the vmm_create_vseg() function, and implements |
---|
820 | // the VMM stack_vseg specific allocator. |
---|
821 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
822 | // @ vmm : pointer on VMM. |
---|
823 | // @ vpn_base : (return value) first allocated page |
---|
824 | // @ vpn_size : (return value) number of allocated pages |
---|
825 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
826 | static error_t vmm_stack_alloc( vmm_t * vmm, |
---|
827 | vpn_t * vpn_base, |
---|
828 | vpn_t * vpn_size ) |
---|
829 | { |
---|
830 | // get stack allocator pointer |
---|
831 | stack_mgr_t * mgr = &vmm->stack_mgr; |
---|
832 | |
---|
833 | // get lock on stack allocator |
---|
834 | busylock_acquire( &mgr->lock ); |
---|
835 | |
---|
836 | // get first free slot index in bitmap |
---|
837 | int32_t index = bitmap_ffc( &mgr->bitmap , 4 ); |
---|
838 | if( (index < 0) || (index > 31) ) |
---|
839 | { |
---|
840 | busylock_release( &mgr->lock ); |
---|
841 | return 0xFFFFFFFF; |
---|
842 | } |
---|
843 | |
---|
844 | // update bitmap |
---|
845 | bitmap_set( &mgr->bitmap , index ); |
---|
846 | |
---|
847 | // release lock on stack allocator |
---|
848 | busylock_release( &mgr->lock ); |
---|
849 | |
---|
850 | // returns vpn_base, vpn_size (one page non allocated) |
---|
851 | *vpn_base = mgr->vpn_base + index * CONFIG_VMM_STACK_SIZE + 1; |
---|
852 | *vpn_size = CONFIG_VMM_STACK_SIZE - 1; |
---|
853 | return 0; |
---|
854 | |
---|
855 | } // end vmm_stack_alloc() |
---|
856 | |
---|
857 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
858 | // This static function is called by the vmm_create_vseg() function, and implements |
---|
859 | // the VMM MMAP specific allocator. |
---|
860 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
861 | // @ vmm : [in] pointer on VMM. |
---|
862 | // @ npages : [in] requested number of pages. |
---|
863 | // @ vpn_base : [out] first allocated page. |
---|
864 | // @ vpn_size : [out] actual number of allocated pages. |
---|
865 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
866 | static error_t vmm_mmap_alloc( vmm_t * vmm, |
---|
867 | vpn_t npages, |
---|
868 | vpn_t * vpn_base, |
---|
869 | vpn_t * vpn_size ) |
---|
870 | { |
---|
871 | uint32_t index; |
---|
872 | vseg_t * vseg; |
---|
873 | vpn_t base; |
---|
874 | vpn_t size; |
---|
875 | vpn_t free; |
---|
876 | |
---|
877 | #if DEBUG_VMM_MMAP_ALLOC |
---|
878 | thread_t * this = CURRENT_THREAD; |
---|
879 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
880 | if( DEBUG_VMM_MMAP_ALLOC < cycle ) |
---|
881 | printk("\n[%s] thread[%x,%x] enter / cycle %d\n", |
---|
882 | __FUNCTION__, this->process->pid, this->trdid, cycle ); |
---|
883 | #endif |
---|
884 | |
---|
885 | // vseg size must be power of 2 |
---|
886 | // compute actual size and index in zombi_list array |
---|
887 | size = POW2_ROUNDUP( npages ); |
---|
888 | index = bits_log2( size ); |
---|
889 | |
---|
890 | // get mmap allocator pointer |
---|
891 | mmap_mgr_t * mgr = &vmm->mmap_mgr; |
---|
892 | |
---|
893 | // get lock on mmap allocator |
---|
894 | busylock_acquire( &mgr->lock ); |
---|
895 | |
---|
896 | // get vseg from zombi_list or from mmap zone |
---|
897 | if( list_is_empty( &mgr->zombi_list[index] ) ) // from mmap zone |
---|
898 | { |
---|
899 | // check overflow |
---|
900 | free = mgr->first_free_vpn; |
---|
901 | if( (free + size) > mgr->vpn_size ) return -1; |
---|
902 | |
---|
903 | // update MMAP allocator |
---|
904 | mgr->first_free_vpn += size; |
---|
905 | |
---|
906 | // compute base |
---|
907 | base = free; |
---|
908 | } |
---|
909 | else // from zombi_list |
---|
910 | { |
---|
911 | // get pointer on zombi vseg from zombi_list |
---|
912 | vseg = LIST_FIRST( &mgr->zombi_list[index] , vseg_t , zlist ); |
---|
913 | |
---|
914 | // remove vseg from free-list |
---|
915 | list_unlink( &vseg->zlist ); |
---|
916 | |
---|
917 | // compute base |
---|
918 | base = vseg->vpn_base; |
---|
919 | } |
---|
920 | |
---|
921 | // release lock on mmap allocator |
---|
922 | busylock_release( &mgr->lock ); |
---|
923 | |
---|
924 | #if DEBUG_VMM_MMAP_ALLOC |
---|
925 | cycle = (uint32_t)hal_get_cycles(); |
---|
926 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
927 | printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n", |
---|
928 | __FUNCTION__, this->process->pid, this->trdid, base, size, cycle ); |
---|
929 | #endif |
---|
930 | |
---|
931 | // returns vpn_base, vpn_size |
---|
932 | *vpn_base = base; |
---|
933 | *vpn_size = size; |
---|
934 | return 0; |
---|
935 | |
---|
936 | } // end vmm_mmap_alloc() |
---|
937 | |
---|
938 | //////////////////////////////////////////////// |
---|
939 | vseg_t * vmm_create_vseg( process_t * process, |
---|
940 | vseg_type_t type, |
---|
941 | intptr_t base, |
---|
942 | uint32_t size, |
---|
943 | uint32_t file_offset, |
---|
944 | uint32_t file_size, |
---|
945 | xptr_t mapper_xp, |
---|
946 | cxy_t cxy ) |
---|
947 | { |
---|
948 | vseg_t * vseg; // created vseg pointer |
---|
949 | vpn_t vpn_base; // first page index |
---|
950 | vpn_t vpn_size; // number of pages covered by vseg |
---|
951 | error_t error; |
---|
952 | |
---|
953 | #if DEBUG_VMM_CREATE_VSEG |
---|
954 | thread_t * this = CURRENT_THREAD; |
---|
955 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
956 | if( DEBUG_VMM_CREATE_VSEG < cycle ) |
---|
957 | printk("\n[%s] thread[%x,%x] enter for process %x / %s / cxy %x / cycle %d\n", |
---|
958 | __FUNCTION__, this->process->pid, this->trdid, process->pid, vseg_type_str(type), cxy, cycle ); |
---|
959 | #endif |
---|
960 | |
---|
961 | // get pointer on VMM |
---|
962 | vmm_t * vmm = &process->vmm; |
---|
963 | |
---|
964 | // compute base, size, vpn_base, vpn_size, depending on vseg type |
---|
965 | // we use the VMM specific allocators for "stack", "file", "anon", & "remote" vsegs |
---|
966 | |
---|
967 | if( type == VSEG_TYPE_STACK ) |
---|
968 | { |
---|
969 | // get vpn_base and vpn_size from STACK allocator |
---|
970 | error = vmm_stack_alloc( vmm , &vpn_base , &vpn_size ); |
---|
971 | if( error ) |
---|
972 | { |
---|
973 | printk("\n[ERROR] in %s : no space for stack vseg / process %x in cluster %x\n", |
---|
974 | __FUNCTION__ , process->pid , local_cxy ); |
---|
975 | return NULL; |
---|
976 | } |
---|
977 | |
---|
978 | // compute vseg base and size from vpn_base and vpn_size |
---|
979 | base = vpn_base << CONFIG_PPM_PAGE_SHIFT; |
---|
980 | size = vpn_size << CONFIG_PPM_PAGE_SHIFT; |
---|
981 | } |
---|
982 | else if( type == VSEG_TYPE_FILE ) |
---|
983 | { |
---|
984 | // compute page index (in mapper) for first byte |
---|
985 | vpn_t vpn_min = file_offset >> CONFIG_PPM_PAGE_SHIFT; |
---|
986 | |
---|
987 | // compute page index (in mapper) for last byte |
---|
988 | vpn_t vpn_max = (file_offset + size - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
989 | |
---|
990 | // compute offset in first page |
---|
991 | uint32_t offset = file_offset & CONFIG_PPM_PAGE_MASK; |
---|
992 | |
---|
993 | // compute number of pages required in virtual space |
---|
994 | vpn_t npages = vpn_max - vpn_min + 1; |
---|
995 | |
---|
996 | // get vpn_base and vpn_size from MMAP allocator |
---|
997 | error = vmm_mmap_alloc( vmm , npages , &vpn_base , &vpn_size ); |
---|
998 | if( error ) |
---|
999 | { |
---|
1000 | printk("\n[ERROR] in %s : no vspace for mmap vseg / process %x in cluster %x\n", |
---|
1001 | __FUNCTION__ , process->pid , local_cxy ); |
---|
1002 | return NULL; |
---|
1003 | } |
---|
1004 | |
---|
1005 | // set the vseg base (not always aligned for FILE) |
---|
1006 | base = (vpn_base << CONFIG_PPM_PAGE_SHIFT) + offset; |
---|
1007 | } |
---|
1008 | else if( (type == VSEG_TYPE_ANON) || |
---|
1009 | (type == VSEG_TYPE_REMOTE) ) |
---|
1010 | { |
---|
1011 | // compute number of required pages in virtual space |
---|
1012 | vpn_t npages = size >> CONFIG_PPM_PAGE_SHIFT; |
---|
1013 | if( size & CONFIG_PPM_PAGE_MASK) npages++; |
---|
1014 | |
---|
1015 | // get vpn_base and vpn_size from MMAP allocator |
---|
1016 | error = vmm_mmap_alloc( vmm , npages , &vpn_base , &vpn_size ); |
---|
1017 | if( error ) |
---|
1018 | { |
---|
1019 | printk("\n[ERROR] in %s : no vspace for mmap vseg / process %x in cluster %x\n", |
---|
1020 | __FUNCTION__ , process->pid , local_cxy ); |
---|
1021 | return NULL; |
---|
1022 | } |
---|
1023 | |
---|
1024 | // set vseg base (always aligned for ANON or REMOTE) |
---|
1025 | base = vpn_base << CONFIG_PPM_PAGE_SHIFT; |
---|
1026 | } |
---|
1027 | else // VSEG_TYPE_DATA, VSEG_TYPE_CODE or KERNEL vseg |
---|
1028 | { |
---|
1029 | uint32_t vpn_min = base >> CONFIG_PPM_PAGE_SHIFT; |
---|
1030 | uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
1031 | |
---|
1032 | vpn_base = vpn_min; |
---|
1033 | vpn_size = vpn_max - vpn_min + 1; |
---|
1034 | } |
---|
1035 | |
---|
1036 | // check collisions |
---|
1037 | vseg = vmm_check_conflict( process , vpn_base , vpn_size ); |
---|
1038 | |
---|
1039 | if( vseg != NULL ) |
---|
1040 | { |
---|
1041 | printk("\n[ERROR] in %s for process %x : new vseg [vpn_base %x / vpn_size %x]\n" |
---|
1042 | " overlap existing vseg [vpn_base %x / vpn_size %x]\n", |
---|
1043 | __FUNCTION__ , process->pid, vpn_base, vpn_size, vseg->vpn_base, vseg->vpn_size ); |
---|
1044 | return NULL; |
---|
1045 | } |
---|
1046 | |
---|
1047 | // allocate physical memory for vseg descriptor |
---|
1048 | vseg = vseg_alloc(); |
---|
1049 | if( vseg == NULL ) |
---|
1050 | { |
---|
1051 | printk("\n[ERROR] in %s for process %x : cannot allocate memory for vseg\n", |
---|
1052 | __FUNCTION__ , process->pid ); |
---|
1053 | return NULL; |
---|
1054 | } |
---|
1055 | |
---|
1056 | #if DEBUG_VMM_CREATE_VSEG |
---|
1057 | if( DEBUG_VMM_CREATE_VSEG < cycle ) |
---|
1058 | printk("\n[%s] thread[%x,%x] : base %x / size %x / vpn_base %x / vpn_size %x\n", |
---|
1059 | __FUNCTION__, this->process->pid, this->trdid, base, size, vpn_base, vpn_size ); |
---|
1060 | #endif |
---|
1061 | |
---|
1062 | // initialize vseg descriptor |
---|
1063 | vseg_init( vseg, |
---|
1064 | type, |
---|
1065 | base, |
---|
1066 | size, |
---|
1067 | vpn_base, |
---|
1068 | vpn_size, |
---|
1069 | file_offset, |
---|
1070 | file_size, |
---|
1071 | mapper_xp, |
---|
1072 | cxy ); |
---|
1073 | |
---|
1074 | // attach vseg to VSL |
---|
1075 | vmm_attach_vseg_to_vsl( vmm , vseg ); |
---|
1076 | |
---|
1077 | #if DEBUG_VMM_CREATE_VSEG |
---|
1078 | cycle = (uint32_t)hal_get_cycles(); |
---|
1079 | if( DEBUG_VMM_CREATE_VSEG < cycle ) |
---|
1080 | printk("\n[%s] thread[%x,%x] exit / %s / cxy %x / cycle %d\n", |
---|
1081 | __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), cxy, cycle ); |
---|
1082 | #endif |
---|
1083 | |
---|
1084 | return vseg; |
---|
1085 | |
---|
1086 | } // vmm_create_vseg() |
---|
1087 | |
---|
1088 | /////////////////////////////////// |
---|
1089 | void vmm_delete_vseg( pid_t pid, |
---|
1090 | intptr_t vaddr ) |
---|
1091 | { |
---|
1092 | process_t * process; // local pointer on local process |
---|
1093 | vmm_t * vmm; // local pointer on local process VMM |
---|
1094 | vseg_t * vseg; // local pointer on local vseg containing vaddr |
---|
1095 | gpt_t * gpt; // local pointer on local process GPT |
---|
1096 | vpn_t vpn; // VPN of current PTE |
---|
1097 | vpn_t vpn_min; // VPN of first PTE |
---|
1098 | vpn_t vpn_max; // VPN of last PTE (excluded) |
---|
1099 | ppn_t ppn; // current PTE ppn value |
---|
1100 | uint32_t attr; // current PTE attributes |
---|
1101 | kmem_req_t req; // request to release memory |
---|
1102 | xptr_t page_xp; // extended pointer on page descriptor |
---|
1103 | cxy_t page_cxy; // page descriptor cluster |
---|
1104 | page_t * page_ptr; // page descriptor pointer |
---|
1105 | xptr_t forks_xp; // extended pointer on pending forks counter |
---|
1106 | xptr_t lock_xp; // extended pointer on lock protecting forks counter |
---|
1107 | uint32_t forks; // actual number of pendinf forks |
---|
1108 | uint32_t vseg_type; // vseg type |
---|
1109 | |
---|
1110 | #if DEBUG_VMM_DELETE_VSEG |
---|
1111 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1112 | thread_t * this = CURRENT_THREAD; |
---|
1113 | if( DEBUG_VMM_DELETE_VSEG < cycle ) |
---|
1114 | printk("\n[%s] thread[%x,%x] enter / process %x / vaddr %x / cycle %d\n", |
---|
1115 | __FUNCTION__, this->process->pid, this->trdid, pid, vaddr, cycle ); |
---|
1116 | #endif |
---|
1117 | |
---|
1118 | // get local pointer on local process descriptor |
---|
1119 | process = cluster_get_local_process_from_pid( pid ); |
---|
1120 | |
---|
1121 | if( process == NULL ) |
---|
1122 | { |
---|
1123 | printk("\n[ERRORR] in %s : cannot get local process descriptor\n", |
---|
1124 | __FUNCTION__ ); |
---|
1125 | return; |
---|
1126 | } |
---|
1127 | |
---|
1128 | // get pointers on local process VMM an GPT |
---|
1129 | vmm = &process->vmm; |
---|
1130 | gpt = &process->vmm.gpt; |
---|
1131 | |
---|
1132 | // get local pointer on vseg containing vaddr |
---|
1133 | vseg = vmm_vseg_from_vaddr( vmm , vaddr ); |
---|
1134 | |
---|
1135 | if( vseg == NULL ) |
---|
1136 | { |
---|
1137 | printk("\n[ERRORR] in %s : cannot get vseg descriptor\n", |
---|
1138 | __FUNCTION__ ); |
---|
1139 | return; |
---|
1140 | } |
---|
1141 | |
---|
1142 | // get relevant vseg infos |
---|
1143 | vseg_type = vseg->type; |
---|
1144 | vpn_min = vseg->vpn_base; |
---|
1145 | vpn_max = vpn_min + vseg->vpn_size; |
---|
1146 | |
---|
1147 | // loop to invalidate all vseg PTEs in GPT |
---|
1148 | for( vpn = vpn_min ; vpn < vpn_max ; vpn++ ) |
---|
1149 | { |
---|
1150 | // get ppn and attr from GPT entry |
---|
1151 | hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn ); |
---|
1152 | |
---|
1153 | if( attr & GPT_MAPPED ) // entry is mapped |
---|
1154 | { |
---|
1155 | |
---|
1156 | #if( DEBUG_VMM_DELETE_VSEG & 1 ) |
---|
1157 | if( DEBUG_VMM_DELETE_VSEG < cycle ) |
---|
1158 | printk("- unmap vpn %x / ppn %x / vseg %s \n" , vpn , ppn, vseg_type_str(vseg->type) ); |
---|
1159 | #endif |
---|
1160 | // unmap GPT entry in local GPT |
---|
1161 | hal_gpt_reset_pte( gpt , vpn ); |
---|
1162 | |
---|
1163 | // the allocated page is not released to for kernel vseg |
---|
1164 | if( (vseg_type != VSEG_TYPE_KCODE) && |
---|
1165 | (vseg_type != VSEG_TYPE_KDATA) && |
---|
1166 | (vseg_type != VSEG_TYPE_KDEV ) ) |
---|
1167 | { |
---|
1168 | // get extended pointer on physical page descriptor |
---|
1169 | page_xp = ppm_ppn2page( ppn ); |
---|
1170 | page_cxy = GET_CXY( page_xp ); |
---|
1171 | page_ptr = GET_PTR( page_xp ); |
---|
1172 | |
---|
1173 | // FIXME This code must be re-written, as the actual release depends on vseg type, |
---|
1174 | // the reference cluster, the page refcount and/or the forks counter... |
---|
1175 | |
---|
1176 | // get extended pointers on forks and lock fields |
---|
1177 | forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
1178 | lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
1179 | |
---|
1180 | // get the lock protecting the page |
---|
1181 | remote_busylock_acquire( lock_xp ); |
---|
1182 | |
---|
1183 | // get pending forks counter |
---|
1184 | forks = hal_remote_l32( forks_xp ); |
---|
1185 | |
---|
1186 | if( forks ) // decrement pending forks counter |
---|
1187 | { |
---|
1188 | // update forks counter |
---|
1189 | hal_remote_atomic_add( forks_xp , -1 ); |
---|
1190 | |
---|
1191 | // release the lock protecting the page |
---|
1192 | remote_busylock_release( lock_xp ); |
---|
1193 | } |
---|
1194 | else // release physical page to relevant cluster |
---|
1195 | { |
---|
1196 | // release the lock protecting the page |
---|
1197 | remote_busylock_release( lock_xp ); |
---|
1198 | |
---|
1199 | // release the page to kmem |
---|
1200 | if( page_cxy == local_cxy ) // local cluster |
---|
1201 | { |
---|
1202 | req.type = KMEM_PAGE; |
---|
1203 | req.ptr = page_ptr; |
---|
1204 | kmem_free( &req ); |
---|
1205 | } |
---|
1206 | else // remote cluster |
---|
1207 | { |
---|
1208 | rpc_pmem_release_pages_client( page_cxy , page_ptr ); |
---|
1209 | } |
---|
1210 | |
---|
1211 | #if( DEBUG_VMM_DELETE_VSEG & 1 ) |
---|
1212 | if( DEBUG_VMM_DELETE_VSEG < cycle ) |
---|
1213 | printk("- release ppn %x\n", ppn ); |
---|
1214 | #endif |
---|
1215 | } |
---|
1216 | |
---|
1217 | } |
---|
1218 | } |
---|
1219 | } |
---|
1220 | |
---|
1221 | // remove vseg from VSL and release vseg descriptor (if not MMAP) |
---|
1222 | vmm_detach_vseg_from_vsl( vmm , vseg ); |
---|
1223 | |
---|
1224 | #if DEBUG_VMM_DELETE_VSEG |
---|
1225 | cycle = (uint32_t)hal_get_cycles(); |
---|
1226 | if( DEBUG_VMM_DELETE_VSEG < cycle ) |
---|
1227 | printk("\n[%s] thread[%x,%x] exit / process %x / vseg %s / base %x / cycle %d\n", |
---|
1228 | __FUNCTION__, this->process->pid, this->trdid, pid, vseg_type_str(vseg->type), vaddr, cycle ); |
---|
1229 | #endif |
---|
1230 | |
---|
1231 | } // end vmm_delete_vseg() |
---|
1232 | |
---|
1233 | ///////////////////////////////////////////// |
---|
1234 | vseg_t * vmm_vseg_from_vaddr( vmm_t * vmm, |
---|
1235 | intptr_t vaddr ) |
---|
1236 | { |
---|
1237 | xptr_t iter_xp; |
---|
1238 | xptr_t vseg_xp; |
---|
1239 | vseg_t * vseg; |
---|
1240 | |
---|
1241 | // get extended pointers on VSL lock and root |
---|
1242 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); |
---|
1243 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
1244 | |
---|
1245 | // get lock protecting the VSL |
---|
1246 | remote_rwlock_rd_acquire( lock_xp ); |
---|
1247 | |
---|
1248 | // scan the list of vsegs in VSL |
---|
1249 | XLIST_FOREACH( root_xp , iter_xp ) |
---|
1250 | { |
---|
1251 | vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
1252 | vseg = GET_PTR( vseg_xp ); |
---|
1253 | |
---|
1254 | if( (vaddr >= vseg->min) && (vaddr < vseg->max) ) |
---|
1255 | { |
---|
1256 | // return success |
---|
1257 | remote_rwlock_rd_release( lock_xp ); |
---|
1258 | return vseg; |
---|
1259 | } |
---|
1260 | } |
---|
1261 | |
---|
1262 | // return failure |
---|
1263 | remote_rwlock_rd_release( lock_xp ); |
---|
1264 | |
---|
1265 | return NULL; |
---|
1266 | |
---|
1267 | } // end vmm_vseg_from_vaddr() |
---|
1268 | |
---|
1269 | ///////////////////////////////////////////// |
---|
1270 | error_t vmm_resize_vseg( process_t * process, |
---|
1271 | intptr_t base, |
---|
1272 | intptr_t size ) |
---|
1273 | { |
---|
1274 | error_t error; |
---|
1275 | vseg_t * new; |
---|
1276 | vpn_t vpn_min; |
---|
1277 | vpn_t vpn_max; |
---|
1278 | |
---|
1279 | #if DEBUG_VMM_RESIZE_VSEG |
---|
1280 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1281 | thread_t * this = CURRENT_THREAD; |
---|
1282 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1283 | printk("\n[%s] thread[%x,%x] enter / process %x / base %x / size %d / cycle %d\n", |
---|
1284 | __FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle ); |
---|
1285 | #endif |
---|
1286 | |
---|
1287 | // get pointer on process VMM |
---|
1288 | vmm_t * vmm = &process->vmm; |
---|
1289 | |
---|
1290 | intptr_t addr_min = base; |
---|
1291 | intptr_t addr_max = base + size; |
---|
1292 | |
---|
1293 | // get pointer on vseg |
---|
1294 | vseg_t * vseg = vmm_vseg_from_vaddr( vmm , base ); |
---|
1295 | |
---|
1296 | if( vseg == NULL) |
---|
1297 | { |
---|
1298 | printk("\n[ERROR] in %s : vseg(%x,%d) not found\n", |
---|
1299 | __FUNCTION__, base , size ); |
---|
1300 | return -1; |
---|
1301 | } |
---|
1302 | |
---|
1303 | // resize depends on unmapped region base and size |
---|
1304 | if( (vseg->min > addr_min) || (vseg->max < addr_max) ) // not included in vseg |
---|
1305 | { |
---|
1306 | printk("\n[ERROR] in %s : unmapped region[%x->%x[ not included in vseg[%x->%x[\n", |
---|
1307 | __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); |
---|
1308 | |
---|
1309 | error = -1; |
---|
1310 | } |
---|
1311 | else if( (vseg->min == addr_min) && (vseg->max == addr_max) ) // vseg must be deleted |
---|
1312 | { |
---|
1313 | |
---|
1314 | #if( DEBUG_VMM_RESIZE_VSEG & 1 ) |
---|
1315 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1316 | printk("\n[%s] unmapped region[%x->%x[ equal vseg[%x->%x[\n", |
---|
1317 | __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); |
---|
1318 | #endif |
---|
1319 | vmm_delete_vseg( process->pid , vseg->min ); |
---|
1320 | |
---|
1321 | #if( DEBUG_VMM_RESIZE_VSEG & 1 ) |
---|
1322 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1323 | printk("\n[%s] thread[%x,%x] deleted vseg\n", |
---|
1324 | __FUNCTION__, this->process->pid, this->trdid ); |
---|
1325 | #endif |
---|
1326 | error = 0; |
---|
1327 | } |
---|
1328 | else if( vseg->min == addr_min ) // vseg must be resized |
---|
1329 | { |
---|
1330 | |
---|
1331 | #if( DEBUG_VMM_RESIZE_VSEG & 1 ) |
---|
1332 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1333 | printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n", |
---|
1334 | __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); |
---|
1335 | #endif |
---|
1336 | // update vseg min address |
---|
1337 | vseg->min = addr_max; |
---|
1338 | |
---|
1339 | // update vpn_base and vpn_size |
---|
1340 | vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; |
---|
1341 | vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
1342 | vseg->vpn_base = vpn_min; |
---|
1343 | vseg->vpn_size = vpn_max - vpn_min + 1; |
---|
1344 | |
---|
1345 | #if( DEBUG_VMM_RESIZE_VSEG & 1 ) |
---|
1346 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1347 | printk("\n[%s] thread[%x,%x] changed vseg_min\n", |
---|
1348 | __FUNCTION__, this->process->pid, this->trdid ); |
---|
1349 | #endif |
---|
1350 | error = 0; |
---|
1351 | } |
---|
1352 | else if( vseg->max == addr_max ) // vseg must be resized |
---|
1353 | { |
---|
1354 | |
---|
1355 | #if( DEBUG_VMM_RESIZE_VSEG & 1 ) |
---|
1356 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1357 | printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n", |
---|
1358 | __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); |
---|
1359 | #endif |
---|
1360 | // update vseg max address |
---|
1361 | vseg->max = addr_min; |
---|
1362 | |
---|
1363 | // update vpn_base and vpn_size |
---|
1364 | vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; |
---|
1365 | vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
1366 | vseg->vpn_base = vpn_min; |
---|
1367 | vseg->vpn_size = vpn_max - vpn_min + 1; |
---|
1368 | |
---|
1369 | #if( DEBUG_VMM_RESIZE_VSEG & 1 ) |
---|
1370 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1371 | printk("\n[%s] thread[%x,%x] changed vseg_max\n", |
---|
1372 | __FUNCTION__, this->process->pid, this->trdid ); |
---|
1373 | #endif |
---|
1374 | error = 0; |
---|
1375 | |
---|
1376 | } |
---|
1377 | else // vseg cut in three regions |
---|
1378 | { |
---|
1379 | |
---|
1380 | #if( DEBUG_VMM_RESIZE_VSEG & 1 ) |
---|
1381 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1382 | printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n", |
---|
1383 | __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); |
---|
1384 | #endif |
---|
1385 | // resize existing vseg |
---|
1386 | vseg->max = addr_min; |
---|
1387 | |
---|
1388 | // update vpn_base and vpn_size |
---|
1389 | vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; |
---|
1390 | vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
1391 | vseg->vpn_base = vpn_min; |
---|
1392 | vseg->vpn_size = vpn_max - vpn_min + 1; |
---|
1393 | |
---|
1394 | // create new vseg |
---|
1395 | new = vmm_create_vseg( process, |
---|
1396 | vseg->type, |
---|
1397 | addr_min, |
---|
1398 | (vseg->max - addr_max), |
---|
1399 | vseg->file_offset, |
---|
1400 | vseg->file_size, |
---|
1401 | vseg->mapper_xp, |
---|
1402 | vseg->cxy ); |
---|
1403 | |
---|
1404 | #if( DEBUG_VMM_RESIZE_VSEG & 1 ) |
---|
1405 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1406 | printk("\n[%s] thread[%x,%x] replaced vseg by two smal vsegs\n", |
---|
1407 | __FUNCTION__, this->process->pid, this->trdid ); |
---|
1408 | #endif |
---|
1409 | |
---|
1410 | if( new == NULL ) error = -1; |
---|
1411 | else error = 0; |
---|
1412 | } |
---|
1413 | |
---|
1414 | #if DEBUG_VMM_RESIZE_VSEG |
---|
1415 | if( DEBUG_VMM_RESIZE_VSEG < cycle ) |
---|
1416 | printk("\n[%s] thread[%x,%x] exit / process %x / base %x / size %d / cycle %d\n", |
---|
1417 | __FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle ); |
---|
1418 | #endif |
---|
1419 | |
---|
1420 | return error; |
---|
1421 | |
---|
1422 | } // vmm_resize_vseg() |
---|
1423 | |
---|
1424 | /////////////////////////////////////////// |
---|
1425 | error_t vmm_get_vseg( process_t * process, |
---|
1426 | intptr_t vaddr, |
---|
1427 | vseg_t ** found_vseg ) |
---|
1428 | { |
---|
1429 | xptr_t vseg_xp; |
---|
1430 | vseg_t * vseg; |
---|
1431 | vmm_t * vmm; |
---|
1432 | error_t error; |
---|
1433 | |
---|
1434 | // get pointer on local VMM |
---|
1435 | vmm = &process->vmm; |
---|
1436 | |
---|
1437 | // try to get vseg from local VMM |
---|
1438 | vseg = vmm_vseg_from_vaddr( vmm , vaddr ); |
---|
1439 | |
---|
1440 | if( vseg == NULL ) // vseg not found in local cluster => try to get it from ref |
---|
1441 | { |
---|
1442 | // get extended pointer on reference process |
---|
1443 | xptr_t ref_xp = process->ref_xp; |
---|
1444 | |
---|
1445 | // get cluster and local pointer on reference process |
---|
1446 | cxy_t ref_cxy = GET_CXY( ref_xp ); |
---|
1447 | process_t * ref_ptr = GET_PTR( ref_xp ); |
---|
1448 | |
---|
1449 | if( local_cxy == ref_cxy ) return -1; // local cluster is the reference |
---|
1450 | |
---|
1451 | // get extended pointer on reference vseg |
---|
1452 | rpc_vmm_get_vseg_client( ref_cxy , ref_ptr , vaddr , &vseg_xp , &error ); |
---|
1453 | |
---|
1454 | if( error ) return -1; // vseg not found => illegal user vaddr |
---|
1455 | |
---|
1456 | // allocate a vseg in local cluster |
---|
1457 | vseg = vseg_alloc(); |
---|
1458 | |
---|
1459 | if( vseg == NULL ) return -1; // cannot allocate a local vseg |
---|
1460 | |
---|
1461 | // initialise local vseg from reference |
---|
1462 | vseg_init_from_ref( vseg , vseg_xp ); |
---|
1463 | |
---|
1464 | // register local vseg in local VSL |
---|
1465 | vmm_attach_vseg_to_vsl( vmm , vseg ); |
---|
1466 | } |
---|
1467 | |
---|
1468 | // success |
---|
1469 | *found_vseg = vseg; |
---|
1470 | return 0; |
---|
1471 | |
---|
1472 | } // end vmm_get_vseg() |
---|
1473 | |
---|
1474 | ////////////////////////////////////////////////////////////////////////////////////// |
---|
1475 | // This static function compute the target cluster to allocate a physical page |
---|
1476 | // for a given <vpn> in a given <vseg>, allocates the page (with an RPC if required) |
---|
1477 | // and returns an extended pointer on the allocated page descriptor. |
---|
1478 | // It can be called by a thread running in any cluster. |
---|
1479 | // The vseg cannot have the FILE type. |
---|
1480 | ////////////////////////////////////////////////////////////////////////////////////// |
---|
1481 | static xptr_t vmm_page_allocate( vseg_t * vseg, |
---|
1482 | vpn_t vpn ) |
---|
1483 | { |
---|
1484 | |
---|
1485 | #if DEBUG_VMM_ALLOCATE_PAGE |
---|
1486 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1487 | thread_t * this = CURRENT_THREAD; |
---|
1488 | xptr_t this_xp = XPTR( local_cxy , this ); |
---|
1489 | if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) |
---|
1490 | printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", |
---|
1491 | __FUNCTION__ , this->process->pid, this->trdid, vpn, cycle ); |
---|
1492 | #endif |
---|
1493 | |
---|
1494 | page_t * page_ptr; |
---|
1495 | cxy_t page_cxy; |
---|
1496 | kmem_req_t req; |
---|
1497 | uint32_t index; |
---|
1498 | |
---|
1499 | uint32_t type = vseg->type; |
---|
1500 | uint32_t flags = vseg->flags; |
---|
1501 | uint32_t x_size = LOCAL_CLUSTER->x_size; |
---|
1502 | uint32_t y_size = LOCAL_CLUSTER->y_size; |
---|
1503 | |
---|
1504 | // check vseg type |
---|
1505 | assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" ); |
---|
1506 | |
---|
1507 | if( flags & VSEG_DISTRIB ) // distributed => cxy depends on vpn LSB |
---|
1508 | { |
---|
1509 | index = vpn & ((x_size * y_size) - 1); |
---|
1510 | page_cxy = HAL_CXY_FROM_XY( (index / y_size) , (index % y_size) ); |
---|
1511 | |
---|
1512 | // If the cluster selected from VPN's LSBs is empty, we select one randomly |
---|
1513 | if ( cluster_is_active( page_cxy ) == false ) |
---|
1514 | { |
---|
1515 | page_cxy = cluster_random_select(); |
---|
1516 | } |
---|
1517 | } |
---|
1518 | else // other cases => cxy specified in vseg |
---|
1519 | { |
---|
1520 | page_cxy = vseg->cxy; |
---|
1521 | } |
---|
1522 | |
---|
1523 | // allocate a physical page from target cluster |
---|
1524 | if( page_cxy == local_cxy ) // target cluster is the local cluster |
---|
1525 | { |
---|
1526 | req.type = KMEM_PAGE; |
---|
1527 | req.size = 0; |
---|
1528 | req.flags = AF_NONE; |
---|
1529 | page_ptr = (page_t *)kmem_alloc( &req ); |
---|
1530 | } |
---|
1531 | else // target cluster is not the local cluster |
---|
1532 | { |
---|
1533 | rpc_pmem_get_pages_client( page_cxy , 0 , &page_ptr ); |
---|
1534 | } |
---|
1535 | |
---|
1536 | #if DEBUG_VMM_ALLOCATE_PAGE |
---|
1537 | cycle = (uint32_t)hal_get_cycles(); |
---|
1538 | if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) |
---|
1539 | printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n", |
---|
1540 | __FUNCTION__ , this->process->pid, this->trdid, vpn, |
---|
1541 | ppm_page2ppn( XPTR( page_cxy , page_ptr ) , cycle ); |
---|
1542 | #endif |
---|
1543 | |
---|
1544 | if( page_ptr == NULL ) return XPTR_NULL; |
---|
1545 | else return XPTR( page_cxy , page_ptr ); |
---|
1546 | |
---|
1547 | } // end vmm_page_allocate() |
---|
1548 | |
---|
1549 | //////////////////////////////////////// |
---|
1550 | error_t vmm_get_one_ppn( vseg_t * vseg, |
---|
1551 | vpn_t vpn, |
---|
1552 | ppn_t * ppn ) |
---|
1553 | { |
---|
1554 | error_t error; |
---|
1555 | xptr_t page_xp; // extended pointer on physical page descriptor |
---|
1556 | uint32_t page_id; // missing page index in vseg mapper |
---|
1557 | uint32_t type; // vseg type; |
---|
1558 | |
---|
1559 | type = vseg->type; |
---|
1560 | page_id = vpn - vseg->vpn_base; |
---|
1561 | |
---|
1562 | #if DEBUG_VMM_GET_ONE_PPN |
---|
1563 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1564 | thread_t * this = CURRENT_THREAD; |
---|
1565 | if( DEBUG_VMM_GET_ONE_PPN < cycle ) |
---|
1566 | printk("\n[%s] thread[%x,%x] enter for vpn %x / type %s / page_id %d / cycle %d\n", |
---|
1567 | __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle ); |
---|
1568 | #endif |
---|
1569 | |
---|
1570 | // FILE type : get the physical page from the file mapper |
---|
1571 | if( type == VSEG_TYPE_FILE ) |
---|
1572 | { |
---|
1573 | // get extended pointer on mapper |
---|
1574 | xptr_t mapper_xp = vseg->mapper_xp; |
---|
1575 | |
---|
1576 | assert( (mapper_xp != XPTR_NULL), |
---|
1577 | "mapper not defined for a FILE vseg\n" ); |
---|
1578 | |
---|
1579 | // get extended pointer on page descriptor |
---|
1580 | page_xp = mapper_remote_get_page( mapper_xp , page_id ); |
---|
1581 | |
---|
1582 | if ( page_xp == XPTR_NULL ) return EINVAL; |
---|
1583 | } |
---|
1584 | |
---|
1585 | // Other types : allocate a physical page from target cluster, |
---|
1586 | // as defined by vseg type and vpn value |
---|
1587 | else |
---|
1588 | { |
---|
1589 | // allocate one physical page |
---|
1590 | page_xp = vmm_page_allocate( vseg , vpn ); |
---|
1591 | |
---|
1592 | if( page_xp == XPTR_NULL ) return ENOMEM; |
---|
1593 | |
---|
1594 | // initialise missing page from .elf file mapper for DATA and CODE types |
---|
1595 | // the vseg->mapper_xp field is an extended pointer on the .elf file mapper |
---|
1596 | if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) ) |
---|
1597 | { |
---|
1598 | // get extended pointer on mapper |
---|
1599 | xptr_t mapper_xp = vseg->mapper_xp; |
---|
1600 | |
---|
1601 | assert( (mapper_xp != XPTR_NULL), |
---|
1602 | "mapper not defined for a CODE or DATA vseg\n" ); |
---|
1603 | |
---|
1604 | // compute missing page offset in vseg |
---|
1605 | uint32_t offset = page_id << CONFIG_PPM_PAGE_SHIFT; |
---|
1606 | |
---|
1607 | // compute missing page offset in .elf file |
---|
1608 | uint32_t elf_offset = vseg->file_offset + offset; |
---|
1609 | |
---|
1610 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
1611 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
1612 | printk("\n[%s] thread[%x,%x] for vpn = %x / elf_offset = %x\n", |
---|
1613 | __FUNCTION__, this->process->pid, this->trdid, vpn, elf_offset ); |
---|
1614 | #endif |
---|
1615 | // compute extended pointer on page base |
---|
1616 | xptr_t base_xp = ppm_page2base( page_xp ); |
---|
1617 | |
---|
1618 | // file_size (in .elf mapper) can be smaller than vseg_size (BSS) |
---|
1619 | uint32_t file_size = vseg->file_size; |
---|
1620 | |
---|
1621 | if( file_size < offset ) // missing page fully in BSS |
---|
1622 | { |
---|
1623 | |
---|
1624 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
1625 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
1626 | printk("\n[%s] thread[%x,%x] for vpn %x / fully in BSS\n", |
---|
1627 | __FUNCTION__, this->process->pid, this->trdid, vpn ); |
---|
1628 | #endif |
---|
1629 | if( GET_CXY( page_xp ) == local_cxy ) |
---|
1630 | { |
---|
1631 | memset( GET_PTR( base_xp ) , 0 , CONFIG_PPM_PAGE_SIZE ); |
---|
1632 | } |
---|
1633 | else |
---|
1634 | { |
---|
1635 | hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE ); |
---|
1636 | } |
---|
1637 | } |
---|
1638 | else if( file_size >= (offset + CONFIG_PPM_PAGE_SIZE) ) // fully in mapper |
---|
1639 | { |
---|
1640 | |
---|
1641 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
1642 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
1643 | printk("\n[%s] thread[%x,%x] for vpn %x / fully in mapper\n", |
---|
1644 | __FUNCTION__, this->process->pid, this->trdid, vpn ); |
---|
1645 | #endif |
---|
1646 | error = mapper_move_kernel( mapper_xp, |
---|
1647 | true, // to_buffer |
---|
1648 | elf_offset, |
---|
1649 | base_xp, |
---|
1650 | CONFIG_PPM_PAGE_SIZE ); |
---|
1651 | if( error ) return EINVAL; |
---|
1652 | } |
---|
1653 | else // both in mapper and in BSS : |
---|
1654 | // - (file_size - offset) bytes from mapper |
---|
1655 | // - (page_size + offset - file_size) bytes from BSS |
---|
1656 | { |
---|
1657 | |
---|
1658 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
1659 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
1660 | printk("\n[%s] thread[%x,%x] for vpn %x / both mapper & BSS\n" |
---|
1661 | " %d bytes from mapper / %d bytes from BSS\n", |
---|
1662 | __FUNCTION__, this->process->pid, this->trdid, vpn, |
---|
1663 | file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size ); |
---|
1664 | #endif |
---|
1665 | // initialize mapper part |
---|
1666 | error = mapper_move_kernel( mapper_xp, |
---|
1667 | true, // to buffer |
---|
1668 | elf_offset, |
---|
1669 | base_xp, |
---|
1670 | file_size - offset ); |
---|
1671 | if( error ) return EINVAL; |
---|
1672 | |
---|
1673 | // initialize BSS part |
---|
1674 | if( GET_CXY( page_xp ) == local_cxy ) |
---|
1675 | { |
---|
1676 | memset( GET_PTR( base_xp ) + file_size - offset , 0 , |
---|
1677 | offset + CONFIG_PPM_PAGE_SIZE - file_size ); |
---|
1678 | } |
---|
1679 | else |
---|
1680 | { |
---|
1681 | hal_remote_memset( base_xp + file_size - offset , 0 , |
---|
1682 | offset + CONFIG_PPM_PAGE_SIZE - file_size ); |
---|
1683 | } |
---|
1684 | } |
---|
1685 | } // end initialisation for CODE or DATA types |
---|
1686 | } |
---|
1687 | |
---|
1688 | // return ppn |
---|
1689 | *ppn = ppm_page2ppn( page_xp ); |
---|
1690 | |
---|
1691 | #if DEBUG_VMM_GET_ONE_PPN |
---|
1692 | cycle = (uint32_t)hal_get_cycles(); |
---|
1693 | if( DEBUG_VMM_GET_ONE_PPN < cycle ) |
---|
1694 | printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle\n", |
---|
1695 | __FUNCTION__ , this->process->pid, this->trdid , vpn , *ppn, cycle ); |
---|
1696 | #endif |
---|
1697 | |
---|
1698 | return 0; |
---|
1699 | |
---|
1700 | } // end vmm_get_one_ppn() |
---|
1701 | |
---|
1702 | /////////////////////////////////////////////////// |
---|
1703 | error_t vmm_handle_page_fault( process_t * process, |
---|
1704 | vpn_t vpn ) |
---|
1705 | { |
---|
1706 | vseg_t * vseg; // vseg containing vpn |
---|
1707 | uint32_t new_attr; // new PTE_ATTR value |
---|
1708 | ppn_t new_ppn; // new PTE_PPN value |
---|
1709 | uint32_t ref_attr; // PTE_ATTR value in reference GPT |
---|
1710 | ppn_t ref_ppn; // PTE_PPN value in reference GPT |
---|
1711 | cxy_t ref_cxy; // reference cluster for missing vpn |
---|
1712 | process_t * ref_ptr; // reference process for missing vpn |
---|
1713 | xptr_t local_gpt_xp; // extended pointer on local GPT |
---|
1714 | xptr_t local_lock_xp; // extended pointer on local GPT lock |
---|
1715 | xptr_t ref_gpt_xp; // extended pointer on reference GPT |
---|
1716 | xptr_t ref_lock_xp; // extended pointer on reference GPT lock |
---|
1717 | error_t error; // value returned by called functions |
---|
1718 | |
---|
1719 | // get local vseg (access to reference VSL can be required) |
---|
1720 | error = vmm_get_vseg( process, |
---|
1721 | (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT, |
---|
1722 | &vseg ); |
---|
1723 | if( error ) |
---|
1724 | { |
---|
1725 | printk("\n[ERROR] in %s : vpn %x in process %x not in a registered vseg\n", |
---|
1726 | __FUNCTION__ , vpn , process->pid ); |
---|
1727 | |
---|
1728 | return EXCP_USER_ERROR; |
---|
1729 | } |
---|
1730 | |
---|
1731 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
1732 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1733 | thread_t * this = CURRENT_THREAD; |
---|
1734 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
1735 | printk("\n[%s] threadr[%x,%x] enter for vpn %x / %s / cycle %d\n", |
---|
1736 | __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(vseg->type), cycle ); |
---|
1737 | #endif |
---|
1738 | |
---|
1739 | //////////////// private vseg => access only the local GPT |
---|
1740 | if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) |
---|
1741 | { |
---|
1742 | // build extended pointer on local GPT and local GPT lock |
---|
1743 | local_gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); |
---|
1744 | local_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); |
---|
1745 | |
---|
1746 | // take local GPT lock in write mode |
---|
1747 | remote_rwlock_wr_acquire( local_lock_xp ); |
---|
1748 | |
---|
1749 | // check VPN still unmapped in local GPT |
---|
1750 | |
---|
1751 | // do nothing if VPN has been mapped by a a concurrent page_fault |
---|
1752 | hal_gpt_get_pte( local_gpt_xp, |
---|
1753 | vpn, |
---|
1754 | &new_attr, |
---|
1755 | &new_ppn ); |
---|
1756 | |
---|
1757 | if( (new_attr & GPT_MAPPED) == 0 ) // VPN still unmapped |
---|
1758 | { |
---|
1759 | // allocate and initialise a physical page depending on the vseg type |
---|
1760 | error = vmm_get_one_ppn( vseg , vpn , &new_ppn ); |
---|
1761 | |
---|
1762 | if( error ) |
---|
1763 | { |
---|
1764 | printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", |
---|
1765 | __FUNCTION__ , process->pid , vpn ); |
---|
1766 | |
---|
1767 | // release local GPT lock in write mode |
---|
1768 | remote_rwlock_wr_release( local_lock_xp ); |
---|
1769 | |
---|
1770 | return EXCP_KERNEL_PANIC; |
---|
1771 | } |
---|
1772 | |
---|
1773 | // define new_attr from vseg flags |
---|
1774 | new_attr = GPT_MAPPED | GPT_SMALL; |
---|
1775 | if( vseg->flags & VSEG_USER ) new_attr |= GPT_USER; |
---|
1776 | if( vseg->flags & VSEG_WRITE ) new_attr |= GPT_WRITABLE; |
---|
1777 | if( vseg->flags & VSEG_EXEC ) new_attr |= GPT_EXECUTABLE; |
---|
1778 | if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE; |
---|
1779 | |
---|
1780 | // set PTE (PPN & attribute) to local GPT |
---|
1781 | error = hal_gpt_set_pte( local_gpt_xp, |
---|
1782 | vpn, |
---|
1783 | new_attr, |
---|
1784 | new_ppn ); |
---|
1785 | if ( error ) |
---|
1786 | { |
---|
1787 | printk("\n[ERROR] in %s : cannot update local GPT / process %x / vpn = %x\n", |
---|
1788 | __FUNCTION__ , process->pid , vpn ); |
---|
1789 | |
---|
1790 | // release local GPT lock in write mode |
---|
1791 | remote_rwlock_wr_release( local_lock_xp ); |
---|
1792 | |
---|
1793 | return EXCP_KERNEL_PANIC; |
---|
1794 | } |
---|
1795 | } |
---|
1796 | |
---|
1797 | // release local GPT lock in write mode |
---|
1798 | remote_rwlock_wr_release( local_lock_xp ); |
---|
1799 | |
---|
1800 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
1801 | cycle = (uint32_t)hal_get_cycles(); |
---|
1802 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
1803 | printk("\n[%s] private page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", |
---|
1804 | __FUNCTION__, vpn, new_ppn, new_attr, cycle ); |
---|
1805 | #endif |
---|
1806 | return EXCP_NON_FATAL; |
---|
1807 | |
---|
1808 | } // end local GPT access |
---|
1809 | |
---|
1810 | //////////// public vseg => access reference GPT |
---|
1811 | else |
---|
1812 | { |
---|
1813 | // get reference process cluster and local pointer |
---|
1814 | ref_cxy = GET_CXY( process->ref_xp ); |
---|
1815 | ref_ptr = GET_PTR( process->ref_xp ); |
---|
1816 | |
---|
1817 | // build extended pointer on reference GPT and reference GPT lock |
---|
1818 | ref_gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt ); |
---|
1819 | ref_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt_lock ); |
---|
1820 | |
---|
1821 | // build extended pointer on local GPT and local GPT lock |
---|
1822 | local_gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); |
---|
1823 | local_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); |
---|
1824 | |
---|
1825 | // take reference GPT lock in read mode |
---|
1826 | remote_rwlock_rd_acquire( ref_lock_xp ); |
---|
1827 | |
---|
1828 | // get directly PPN & attributes from reference GPT |
---|
1829 | // this can avoids a costly RPC for a false page fault |
---|
1830 | hal_gpt_get_pte( ref_gpt_xp, |
---|
1831 | vpn, |
---|
1832 | &ref_attr, |
---|
1833 | &ref_ppn ); |
---|
1834 | |
---|
1835 | // release reference GPT lock in read mode |
---|
1836 | remote_rwlock_rd_release( ref_lock_xp ); |
---|
1837 | |
---|
1838 | if( ref_attr & GPT_MAPPED ) // false page fault => update local GPT |
---|
1839 | { |
---|
1840 | // take local GPT lock in write mode |
---|
1841 | remote_rwlock_wr_acquire( local_lock_xp ); |
---|
1842 | |
---|
1843 | // check VPN still unmapped in local GPT |
---|
1844 | hal_gpt_get_pte( local_gpt_xp, |
---|
1845 | vpn, |
---|
1846 | &new_attr, |
---|
1847 | &new_ppn ); |
---|
1848 | |
---|
1849 | if( (new_attr & GPT_MAPPED) == 0 ) // VPN still unmapped |
---|
1850 | { |
---|
1851 | // update local GPT from reference GPT |
---|
1852 | error = hal_gpt_set_pte( local_gpt_xp, |
---|
1853 | vpn, |
---|
1854 | ref_attr, |
---|
1855 | ref_ppn ); |
---|
1856 | if( error ) |
---|
1857 | { |
---|
1858 | printk("\n[ERROR] in %s : cannot update local GPT / process %x / vpn %x\n", |
---|
1859 | __FUNCTION__ , process->pid , vpn ); |
---|
1860 | |
---|
1861 | // release local GPT lock in write mode |
---|
1862 | remote_rwlock_wr_release( local_lock_xp ); |
---|
1863 | |
---|
1864 | return EXCP_KERNEL_PANIC; |
---|
1865 | } |
---|
1866 | } |
---|
1867 | else // VPN has been mapped by a a concurrent page_fault |
---|
1868 | { |
---|
1869 | // keep PTE from local GPT |
---|
1870 | ref_attr = new_attr; |
---|
1871 | ref_ppn = new_ppn; |
---|
1872 | } |
---|
1873 | |
---|
1874 | // release local GPT lock in write mode |
---|
1875 | remote_rwlock_wr_release( local_lock_xp ); |
---|
1876 | |
---|
1877 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
1878 | cycle = (uint32_t)hal_get_cycles(); |
---|
1879 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
1880 | printk("\n[%s] false page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", |
---|
1881 | __FUNCTION__, vpn, ref_ppn, ref_attr, cycle ); |
---|
1882 | #endif |
---|
1883 | return EXCP_NON_FATAL; |
---|
1884 | } |
---|
1885 | else // true page fault => update reference GPT |
---|
1886 | { |
---|
1887 | // take reference GPT lock in write mode |
---|
1888 | remote_rwlock_wr_acquire( ref_lock_xp ); |
---|
1889 | |
---|
1890 | // check VPN still unmapped in reference GPT |
---|
1891 | // do nothing if VPN has been mapped by a a concurrent page_fault |
---|
1892 | hal_gpt_get_pte( ref_gpt_xp, |
---|
1893 | vpn, |
---|
1894 | &ref_attr, |
---|
1895 | &ref_ppn ); |
---|
1896 | |
---|
1897 | if( (ref_attr & GPT_MAPPED) == 0 ) // VPN actually unmapped |
---|
1898 | { |
---|
1899 | // allocate and initialise a physical page depending on the vseg type |
---|
1900 | error = vmm_get_one_ppn( vseg , vpn , &new_ppn ); |
---|
1901 | |
---|
1902 | if( error ) |
---|
1903 | { |
---|
1904 | printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", |
---|
1905 | __FUNCTION__ , process->pid , vpn ); |
---|
1906 | |
---|
1907 | // release reference GPT lock in write mode |
---|
1908 | remote_rwlock_wr_release( ref_lock_xp ); |
---|
1909 | |
---|
1910 | return EXCP_KERNEL_PANIC; |
---|
1911 | } |
---|
1912 | |
---|
1913 | // define new_attr from vseg flags |
---|
1914 | new_attr = GPT_MAPPED | GPT_SMALL; |
---|
1915 | if( vseg->flags & VSEG_USER ) new_attr |= GPT_USER; |
---|
1916 | if( vseg->flags & VSEG_WRITE ) new_attr |= GPT_WRITABLE; |
---|
1917 | if( vseg->flags & VSEG_EXEC ) new_attr |= GPT_EXECUTABLE; |
---|
1918 | if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE; |
---|
1919 | |
---|
1920 | // update reference GPT |
---|
1921 | error = hal_gpt_set_pte( ref_gpt_xp, |
---|
1922 | vpn, |
---|
1923 | new_attr, |
---|
1924 | new_ppn ); |
---|
1925 | |
---|
1926 | // update local GPT (protected by reference GPT lock) |
---|
1927 | error |= hal_gpt_set_pte( local_gpt_xp, |
---|
1928 | vpn, |
---|
1929 | new_attr, |
---|
1930 | new_ppn ); |
---|
1931 | |
---|
1932 | if( error ) |
---|
1933 | { |
---|
1934 | printk("\n[ERROR] in %s : cannot update GPT / process %x / vpn = %x\n", |
---|
1935 | __FUNCTION__ , process->pid , vpn ); |
---|
1936 | |
---|
1937 | // release reference GPT lock in write mode |
---|
1938 | remote_rwlock_wr_release( ref_lock_xp ); |
---|
1939 | |
---|
1940 | return EXCP_KERNEL_PANIC; |
---|
1941 | } |
---|
1942 | } |
---|
1943 | |
---|
1944 | // release reference GPT lock in write mode |
---|
1945 | remote_rwlock_wr_release( ref_lock_xp ); |
---|
1946 | |
---|
1947 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
1948 | cycle = (uint32_t)hal_get_cycles(); |
---|
1949 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
1950 | printk("\n[%s] true page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", |
---|
1951 | __FUNCTION__, vpn, new_ppn, new_attr, cycle ); |
---|
1952 | #endif |
---|
1953 | return EXCP_NON_FATAL; |
---|
1954 | } |
---|
1955 | } |
---|
1956 | } // end vmm_handle_page_fault() |
---|
1957 | |
---|
1958 | //////////////////////////////////////////// |
---|
1959 | error_t vmm_handle_cow( process_t * process, |
---|
1960 | vpn_t vpn ) |
---|
1961 | { |
---|
1962 | vseg_t * vseg; // vseg containing vpn |
---|
1963 | cxy_t ref_cxy; // reference cluster for missing vpn |
---|
1964 | process_t * ref_ptr; // reference process for missing vpn |
---|
1965 | xptr_t gpt_xp; // extended pointer on GPT |
---|
1966 | xptr_t gpt_lock_xp; // extended pointer on GPT lock |
---|
1967 | uint32_t old_attr; // current PTE_ATTR value |
---|
1968 | ppn_t old_ppn; // current PTE_PPN value |
---|
1969 | uint32_t new_attr; // new PTE_ATTR value |
---|
1970 | ppn_t new_ppn; // new PTE_PPN value |
---|
1971 | error_t error; |
---|
1972 | |
---|
1973 | #if DEBUG_VMM_HANDLE_COW |
---|
1974 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1975 | thread_t * this = CURRENT_THREAD; |
---|
1976 | xptr_t this_xp = XPTR( local_cxy , this ); |
---|
1977 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
1978 | printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n", |
---|
1979 | __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); |
---|
1980 | #endif |
---|
1981 | |
---|
1982 | // access local GPT to get GPT_COW flag |
---|
1983 | bool_t cow = hal_gpt_pte_is_cow( &(process->vmm.gpt), vpn ); |
---|
1984 | |
---|
1985 | if( cow == false ) return EXCP_USER_ERROR; |
---|
1986 | |
---|
1987 | // get local vseg |
---|
1988 | error = vmm_get_vseg( process, |
---|
1989 | (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT, |
---|
1990 | &vseg ); |
---|
1991 | if( error ) |
---|
1992 | { |
---|
1993 | printk("\n[PANIC] in %s : vpn %x in process %x not in a registered vseg\n", |
---|
1994 | __FUNCTION__, vpn, process->pid ); |
---|
1995 | |
---|
1996 | return EXCP_KERNEL_PANIC; |
---|
1997 | } |
---|
1998 | |
---|
1999 | #if( DEBUG_VMM_HANDLE_COW & 1) |
---|
2000 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
2001 | printk("\n[%s] thread[%x,%x] get vseg for vpn %x\n", |
---|
2002 | __FUNCTION__, this->process->pid, this->trdid, vpn ); |
---|
2003 | #endif |
---|
2004 | |
---|
2005 | // get reference GPT cluster and local pointer |
---|
2006 | ref_cxy = GET_CXY( process->ref_xp ); |
---|
2007 | ref_ptr = GET_PTR( process->ref_xp ); |
---|
2008 | |
---|
2009 | // build relevant extended pointers on relevant GPT and GPT lock |
---|
2010 | // - access local GPT for a private vseg |
---|
2011 | // - access reference GPT for a public vseg |
---|
2012 | if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) |
---|
2013 | { |
---|
2014 | gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); |
---|
2015 | gpt_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); |
---|
2016 | } |
---|
2017 | else |
---|
2018 | { |
---|
2019 | gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt ); |
---|
2020 | gpt_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt_lock ); |
---|
2021 | } |
---|
2022 | |
---|
2023 | // take GPT lock in write mode |
---|
2024 | remote_rwlock_wr_acquire( gpt_lock_xp ); |
---|
2025 | |
---|
2026 | // get current PTE from reference GPT |
---|
2027 | hal_gpt_get_pte( gpt_xp, |
---|
2028 | vpn, |
---|
2029 | &old_attr, |
---|
2030 | &old_ppn ); |
---|
2031 | |
---|
2032 | #if( DEBUG_VMM_HANDLE_COW & 1) |
---|
2033 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
2034 | printk("\n[%s] thread[%x,%x] get pte for vpn %x : ppn %x / attr %x\n", |
---|
2035 | __FUNCTION__, this->process->pid, this->trdid, vpn, old_ppn, old_attr ); |
---|
2036 | #endif |
---|
2037 | |
---|
2038 | // the PTE must be mapped for a COW |
---|
2039 | if( (old_attr & GPT_MAPPED) == 0 ) |
---|
2040 | { |
---|
2041 | printk("\n[PANIC] in %s : VPN %x in process %x unmapped\n", |
---|
2042 | __FUNCTION__, vpn, process->pid ); |
---|
2043 | |
---|
2044 | // release GPT lock in write mode |
---|
2045 | remote_rwlock_wr_release( gpt_lock_xp ); |
---|
2046 | |
---|
2047 | return EXCP_KERNEL_PANIC; |
---|
2048 | } |
---|
2049 | |
---|
2050 | // get pointers on physical page descriptor |
---|
2051 | xptr_t page_xp = ppm_ppn2page( old_ppn ); |
---|
2052 | cxy_t page_cxy = GET_CXY( page_xp ); |
---|
2053 | page_t * page_ptr = GET_PTR( page_xp ); |
---|
2054 | |
---|
2055 | // get extended pointers on forks and lock field in page descriptor |
---|
2056 | xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
2057 | xptr_t forks_lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
2058 | |
---|
2059 | // take lock protecting "forks" counter |
---|
2060 | remote_busylock_acquire( forks_lock_xp ); |
---|
2061 | |
---|
2062 | // get number of pending forks from page descriptor |
---|
2063 | uint32_t forks = hal_remote_l32( forks_xp ); |
---|
2064 | |
---|
2065 | #if( DEBUG_VMM_HANDLE_COW & 1) |
---|
2066 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
2067 | printk("\n[%s] thread[%x,%x] get forks = %d for vpn %x\n", |
---|
2068 | __FUNCTION__, this->process->pid, this->trdid, forks, vpn ); |
---|
2069 | #endif |
---|
2070 | |
---|
2071 | if( forks ) // pending fork => allocate a new page, and copy old to new |
---|
2072 | { |
---|
2073 | // decrement pending forks counter in page descriptor |
---|
2074 | hal_remote_atomic_add( forks_xp , -1 ); |
---|
2075 | |
---|
2076 | // release lock protecting "forks" counter |
---|
2077 | remote_busylock_release( forks_lock_xp ); |
---|
2078 | |
---|
2079 | // allocate a new page |
---|
2080 | page_xp = vmm_page_allocate( vseg , vpn ); |
---|
2081 | |
---|
2082 | if( page_xp == XPTR_NULL ) |
---|
2083 | { |
---|
2084 | printk("\n[PANIC] in %s : no memory for vpn %x in process %x\n", |
---|
2085 | __FUNCTION__ , vpn, process->pid ); |
---|
2086 | |
---|
2087 | // release GPT lock in write mode |
---|
2088 | remote_rwlock_wr_acquire( gpt_lock_xp ); |
---|
2089 | |
---|
2090 | return EXCP_KERNEL_PANIC; |
---|
2091 | } |
---|
2092 | |
---|
2093 | // compute allocated page PPN |
---|
2094 | new_ppn = ppm_page2ppn( page_xp ); |
---|
2095 | |
---|
2096 | #if( DEBUG_VMM_HANDLE_COW & 1) |
---|
2097 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
2098 | printk("\n[%s] thread[%x,%x] get new ppn %x for vpn %x\n", |
---|
2099 | __FUNCTION__, this->process->pid, this->trdid, new_ppn, vpn ); |
---|
2100 | #endif |
---|
2101 | |
---|
2102 | // copy old page content to new page |
---|
2103 | hal_remote_memcpy( ppm_ppn2base( new_ppn ), |
---|
2104 | ppm_ppn2base( old_ppn ), |
---|
2105 | CONFIG_PPM_PAGE_SIZE ); |
---|
2106 | |
---|
2107 | #if(DEBUG_VMM_HANDLE_COW & 1) |
---|
2108 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
2109 | printk("\n[%s] thread[%x,%x] copied old page to new page\n", |
---|
2110 | __FUNCTION__, this->process->pid, this->trdid ); |
---|
2111 | #endif |
---|
2112 | |
---|
2113 | } |
---|
2114 | else // no pending fork => keep the existing page |
---|
2115 | { |
---|
2116 | // release lock protecting "forks" counter |
---|
2117 | remote_busylock_release( forks_lock_xp ); |
---|
2118 | |
---|
2119 | #if(DEBUG_VMM_HANDLE_COW & 1) |
---|
2120 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
2121 | printk("\n[%s] thread[%x,%x] no pending forks / keep existing PPN %x\n", |
---|
2122 | __FUNCTION__, this->process->pid, this->trdid, old_ppn ); |
---|
2123 | #endif |
---|
2124 | new_ppn = old_ppn; |
---|
2125 | } |
---|
2126 | |
---|
2127 | // build new_attr : reset COW and set WRITABLE, |
---|
2128 | new_attr = (old_attr | GPT_WRITABLE) & (~GPT_COW); |
---|
2129 | |
---|
2130 | // update the relevant GPT |
---|
2131 | // - private vseg => update local GPT |
---|
2132 | // - public vseg => update all GPT copies |
---|
2133 | if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) |
---|
2134 | { |
---|
2135 | hal_gpt_set_pte( gpt_xp, |
---|
2136 | vpn, |
---|
2137 | new_attr, |
---|
2138 | new_ppn ); |
---|
2139 | } |
---|
2140 | else |
---|
2141 | { |
---|
2142 | if( ref_cxy == local_cxy ) // reference cluster is local |
---|
2143 | { |
---|
2144 | vmm_global_update_pte( process, |
---|
2145 | vpn, |
---|
2146 | new_attr, |
---|
2147 | new_ppn ); |
---|
2148 | } |
---|
2149 | else // reference cluster is remote |
---|
2150 | { |
---|
2151 | rpc_vmm_global_update_pte_client( ref_cxy, |
---|
2152 | ref_ptr, |
---|
2153 | vpn, |
---|
2154 | new_attr, |
---|
2155 | new_ppn ); |
---|
2156 | } |
---|
2157 | } |
---|
2158 | |
---|
2159 | // release GPT lock in write mode |
---|
2160 | remote_rwlock_wr_release( gpt_lock_xp ); |
---|
2161 | |
---|
2162 | #if DEBUG_VMM_HANDLE_COW |
---|
2163 | cycle = (uint32_t)hal_get_cycles(); |
---|
2164 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
2165 | printk("\n[%s] thread[%x,%x] exit for vpn %x / core[%x,%d] / cycle %d\n", |
---|
2166 | __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); |
---|
2167 | #endif |
---|
2168 | |
---|
2169 | return EXCP_NON_FATAL; |
---|
2170 | |
---|
2171 | } // end vmm_handle_cow() |
---|
2172 | |
---|