[1] | 1 | /* |
---|
| 2 | * vmm.c - virtual memory manager related operations interface. |
---|
| 3 | * |
---|
| 4 | * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) |
---|
| 5 | * Mohamed Lamine Karaoui (2015) |
---|
[595] | 6 | * Alain Greiner (2016,2017,2018) |
---|
[21] | 7 | * |
---|
[1] | 8 | * Copyright (c) UPMC Sorbonne Universites |
---|
| 9 | * |
---|
| 10 | * This file is part of ALMOS-MKH. |
---|
| 11 | * |
---|
| 12 | * ALMOS-MKH is free software; you can redistribute it and/or modify it |
---|
| 13 | * under the terms of the GNU General Public License as published by |
---|
| 14 | * the Free Software Foundation; version 2.0 of the License. |
---|
| 15 | * |
---|
| 16 | * ALMOS-MKH is distributed in the hope that it will be useful, but |
---|
| 17 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
| 19 | * General Public License for more details. |
---|
| 20 | * |
---|
| 21 | * You should have received a copy of the GNU General Public License |
---|
| 22 | * along with ALMOS-MKH; if not, write to the Free Software Foundation, |
---|
| 23 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
| 24 | */ |
---|
| 25 | |
---|
[14] | 26 | #include <kernel_config.h> |
---|
[457] | 27 | #include <hal_kernel_types.h> |
---|
[1] | 28 | #include <hal_special.h> |
---|
| 29 | #include <hal_gpt.h> |
---|
[409] | 30 | #include <hal_vmm.h> |
---|
[577] | 31 | #include <hal_macros.h> |
---|
[1] | 32 | #include <printk.h> |
---|
[23] | 33 | #include <memcpy.h> |
---|
[567] | 34 | #include <remote_rwlock.h> |
---|
| 35 | #include <remote_queuelock.h> |
---|
[1] | 36 | #include <list.h> |
---|
[408] | 37 | #include <xlist.h> |
---|
[1] | 38 | #include <bits.h> |
---|
| 39 | #include <process.h> |
---|
| 40 | #include <thread.h> |
---|
| 41 | #include <vseg.h> |
---|
| 42 | #include <cluster.h> |
---|
| 43 | #include <scheduler.h> |
---|
| 44 | #include <vfs.h> |
---|
| 45 | #include <mapper.h> |
---|
| 46 | #include <page.h> |
---|
| 47 | #include <kmem.h> |
---|
| 48 | #include <vmm.h> |
---|
[585] | 49 | #include <hal_exception.h> |
---|
[1] | 50 | |
---|
| 51 | ////////////////////////////////////////////////////////////////////////////////// |
---|
| 52 | // Extern global variables |
---|
| 53 | ////////////////////////////////////////////////////////////////////////////////// |
---|
| 54 | |
---|
[567] | 55 | extern process_t process_zero; // allocated in cluster.c |
---|
[1] | 56 | |
---|
[415] | 57 | /////////////////////////////////////// |
---|
| 58 | error_t vmm_init( process_t * process ) |
---|
[21] | 59 | { |
---|
[1] | 60 | error_t error; |
---|
| 61 | vseg_t * vseg_kentry; |
---|
| 62 | vseg_t * vseg_args; |
---|
| 63 | vseg_t * vseg_envs; |
---|
| 64 | intptr_t base; |
---|
| 65 | intptr_t size; |
---|
| 66 | |
---|
[438] | 67 | #if DEBUG_VMM_INIT |
---|
[567] | 68 | thread_t * this = CURRENT_THREAD; |
---|
[433] | 69 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 70 | if( DEBUG_VMM_INIT ) |
---|
[595] | 71 | printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n", |
---|
| 72 | __FUNCTION__ , this->process->pid, this->trdid, process->pid , cycle ); |
---|
[433] | 73 | #endif |
---|
[204] | 74 | |
---|
[1] | 75 | // get pointer on VMM |
---|
| 76 | vmm_t * vmm = &process->vmm; |
---|
| 77 | |
---|
[407] | 78 | // initialize local list of vsegs |
---|
| 79 | vmm->vsegs_nr = 0; |
---|
[408] | 80 | xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) ); |
---|
[580] | 81 | remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) , LOCK_VMM_VSL ); |
---|
[407] | 82 | |
---|
[567] | 83 | assert( ((CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) |
---|
| 84 | <= CONFIG_VMM_ELF_BASE) , "UTILS zone too small\n" ); |
---|
[21] | 85 | |
---|
[567] | 86 | assert( (CONFIG_THREADS_MAX_PER_CLUSTER <= 32) , |
---|
| 87 | "no more than 32 threads per cluster for a single process\n"); |
---|
[1] | 88 | |
---|
[567] | 89 | assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <= |
---|
| 90 | (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , |
---|
| 91 | "STACK zone too small\n"); |
---|
[1] | 92 | |
---|
[409] | 93 | // register kentry vseg in VSL |
---|
[406] | 94 | base = CONFIG_VMM_KENTRY_BASE << CONFIG_PPM_PAGE_SHIFT; |
---|
[1] | 95 | size = CONFIG_VMM_KENTRY_SIZE << CONFIG_PPM_PAGE_SHIFT; |
---|
[406] | 96 | |
---|
[407] | 97 | vseg_kentry = vmm_create_vseg( process, |
---|
| 98 | VSEG_TYPE_CODE, |
---|
| 99 | base, |
---|
| 100 | size, |
---|
| 101 | 0, // file_offset unused |
---|
| 102 | 0, // file_size unused |
---|
| 103 | XPTR_NULL, // mapper_xp unused |
---|
| 104 | local_cxy ); |
---|
[204] | 105 | |
---|
[415] | 106 | if( vseg_kentry == NULL ) |
---|
| 107 | { |
---|
| 108 | printk("\n[ERROR] in %s : cannot register kentry vseg\n", __FUNCTION__ ); |
---|
| 109 | return -1; |
---|
| 110 | } |
---|
[204] | 111 | |
---|
[406] | 112 | vmm->kent_vpn_base = base; |
---|
[1] | 113 | |
---|
[409] | 114 | // register args vseg in VSL |
---|
[406] | 115 | base = (CONFIG_VMM_KENTRY_BASE + |
---|
| 116 | CONFIG_VMM_KENTRY_SIZE ) << CONFIG_PPM_PAGE_SHIFT; |
---|
[1] | 117 | size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT; |
---|
[406] | 118 | |
---|
[407] | 119 | vseg_args = vmm_create_vseg( process, |
---|
| 120 | VSEG_TYPE_DATA, |
---|
| 121 | base, |
---|
| 122 | size, |
---|
| 123 | 0, // file_offset unused |
---|
| 124 | 0, // file_size unused |
---|
| 125 | XPTR_NULL, // mapper_xp unused |
---|
| 126 | local_cxy ); |
---|
[204] | 127 | |
---|
[415] | 128 | if( vseg_args == NULL ) |
---|
| 129 | { |
---|
| 130 | printk("\n[ERROR] in %s : cannot register args vseg\n", __FUNCTION__ ); |
---|
| 131 | return -1; |
---|
| 132 | } |
---|
[204] | 133 | |
---|
[406] | 134 | vmm->args_vpn_base = base; |
---|
[1] | 135 | |
---|
[409] | 136 | // register the envs vseg in VSL |
---|
[406] | 137 | base = (CONFIG_VMM_KENTRY_BASE + |
---|
| 138 | CONFIG_VMM_KENTRY_SIZE + |
---|
| 139 | CONFIG_VMM_ARGS_SIZE ) << CONFIG_PPM_PAGE_SHIFT; |
---|
[1] | 140 | size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT; |
---|
[406] | 141 | |
---|
[407] | 142 | vseg_envs = vmm_create_vseg( process, |
---|
| 143 | VSEG_TYPE_DATA, |
---|
| 144 | base, |
---|
| 145 | size, |
---|
| 146 | 0, // file_offset unused |
---|
| 147 | 0, // file_size unused |
---|
| 148 | XPTR_NULL, // mapper_xp unused |
---|
| 149 | local_cxy ); |
---|
[204] | 150 | |
---|
[415] | 151 | if( vseg_envs == NULL ) |
---|
| 152 | { |
---|
| 153 | printk("\n[ERROR] in %s : cannot register envs vseg\n", __FUNCTION__ ); |
---|
| 154 | return -1; |
---|
| 155 | } |
---|
[204] | 156 | |
---|
[406] | 157 | vmm->envs_vpn_base = base; |
---|
[1] | 158 | |
---|
[409] | 159 | // create GPT (empty) |
---|
[1] | 160 | error = hal_gpt_create( &vmm->gpt ); |
---|
| 161 | |
---|
[415] | 162 | if( error ) |
---|
| 163 | printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ ); |
---|
[204] | 164 | |
---|
[585] | 165 | // initialize GPT lock |
---|
| 166 | remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT ); |
---|
| 167 | |
---|
| 168 | // architecture specic GPT initialisation |
---|
[409] | 169 | // (For TSAR, identity map the kentry_vseg) |
---|
| 170 | error = hal_vmm_init( vmm ); |
---|
| 171 | |
---|
[415] | 172 | if( error ) |
---|
| 173 | printk("\n[ERROR] in %s : cannot initialize GPT\n", __FUNCTION__ ); |
---|
[409] | 174 | |
---|
[1] | 175 | // initialize STACK allocator |
---|
| 176 | vmm->stack_mgr.bitmap = 0; |
---|
| 177 | vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE; |
---|
[567] | 178 | busylock_init( &vmm->stack_mgr.lock , LOCK_VMM_STACK ); |
---|
[1] | 179 | |
---|
| 180 | // initialize MMAP allocator |
---|
[407] | 181 | vmm->mmap_mgr.vpn_base = CONFIG_VMM_HEAP_BASE; |
---|
| 182 | vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; |
---|
| 183 | vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; |
---|
[567] | 184 | busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP ); |
---|
[457] | 185 | |
---|
[1] | 186 | uint32_t i; |
---|
| 187 | for( i = 0 ; i < 32 ; i++ ) list_root_init( &vmm->mmap_mgr.zombi_list[i] ); |
---|
| 188 | |
---|
[21] | 189 | // initialize instrumentation counters |
---|
[409] | 190 | vmm->pgfault_nr = 0; |
---|
[1] | 191 | |
---|
[124] | 192 | hal_fence(); |
---|
[1] | 193 | |
---|
[438] | 194 | #if DEBUG_VMM_INIT |
---|
[433] | 195 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 196 | if( DEBUG_VMM_INIT ) |
---|
[595] | 197 | printk("\n[%s] thread[%x,%x] exit / process %x / entry_point %x / cycle %d\n", |
---|
| 198 | __FUNCTION__, this->process->pid, this->trdid, process->pid, process->vmm.entry_point, cycle ); |
---|
[433] | 199 | #endif |
---|
[204] | 200 | |
---|
[415] | 201 | return 0; |
---|
| 202 | |
---|
[204] | 203 | } // end vmm_init() |
---|
| 204 | |
---|
[407] | 205 | ////////////////////////////////////// |
---|
| 206 | void vmm_display( process_t * process, |
---|
| 207 | bool_t mapping ) |
---|
| 208 | { |
---|
| 209 | vmm_t * vmm = &process->vmm; |
---|
| 210 | gpt_t * gpt = &vmm->gpt; |
---|
| 211 | |
---|
[457] | 212 | printk("\n***** VSL and GPT(%x) for process %x in cluster %x\n\n", |
---|
| 213 | process->vmm.gpt.ptr , process->pid , local_cxy ); |
---|
[407] | 214 | |
---|
[585] | 215 | // get lock protecting the VSL and the GPT |
---|
[567] | 216 | remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->vsegs_lock ) ); |
---|
[585] | 217 | remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->gpt_lock ) ); |
---|
[407] | 218 | |
---|
| 219 | // scan the list of vsegs |
---|
[408] | 220 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
| 221 | xptr_t iter_xp; |
---|
| 222 | xptr_t vseg_xp; |
---|
[407] | 223 | vseg_t * vseg; |
---|
[408] | 224 | XLIST_FOREACH( root_xp , iter_xp ) |
---|
[407] | 225 | { |
---|
[408] | 226 | vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
[433] | 227 | vseg = GET_PTR( vseg_xp ); |
---|
[408] | 228 | |
---|
[407] | 229 | printk(" - %s : base = %X / size = %X / npages = %d\n", |
---|
| 230 | vseg_type_str( vseg->type ) , vseg->min , vseg->max - vseg->min , vseg->vpn_size ); |
---|
| 231 | |
---|
| 232 | if( mapping ) |
---|
| 233 | { |
---|
| 234 | vpn_t vpn; |
---|
| 235 | ppn_t ppn; |
---|
| 236 | uint32_t attr; |
---|
| 237 | vpn_t base = vseg->vpn_base; |
---|
| 238 | vpn_t size = vseg->vpn_size; |
---|
| 239 | for( vpn = base ; vpn < (base+size) ; vpn++ ) |
---|
| 240 | { |
---|
[585] | 241 | hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn ); |
---|
[407] | 242 | if( attr & GPT_MAPPED ) |
---|
| 243 | { |
---|
| 244 | printk(" . vpn = %X / attr = %X / ppn = %X\n", vpn , attr , ppn ); |
---|
| 245 | } |
---|
| 246 | } |
---|
| 247 | } |
---|
| 248 | } |
---|
| 249 | |
---|
[585] | 250 | // release the locks |
---|
[567] | 251 | remote_rwlock_rd_release( XPTR( local_cxy , &vmm->vsegs_lock ) ); |
---|
[585] | 252 | remote_rwlock_rd_release( XPTR( local_cxy , &vmm->gpt_lock ) ); |
---|
[407] | 253 | |
---|
[408] | 254 | } // vmm_display() |
---|
| 255 | |
---|
[567] | 256 | /////////////////////////////////// |
---|
| 257 | void vmm_vseg_attach( vmm_t * vmm, |
---|
| 258 | vseg_t * vseg ) |
---|
| 259 | { |
---|
| 260 | // build extended pointer on rwlock protecting VSL |
---|
| 261 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); |
---|
| 262 | |
---|
| 263 | // get rwlock in write mode |
---|
| 264 | remote_rwlock_wr_acquire( lock_xp ); |
---|
| 265 | |
---|
| 266 | // update vseg descriptor |
---|
| 267 | vseg->vmm = vmm; |
---|
| 268 | |
---|
| 269 | // add vseg in vmm list |
---|
| 270 | xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ), |
---|
| 271 | XPTR( local_cxy , &vseg->xlist ) ); |
---|
| 272 | |
---|
| 273 | // release rwlock in write mode |
---|
| 274 | remote_rwlock_wr_release( lock_xp ); |
---|
| 275 | } |
---|
| 276 | |
---|
| 277 | /////////////////////////////////// |
---|
| 278 | void vmm_vseg_detach( vmm_t * vmm, |
---|
| 279 | vseg_t * vseg ) |
---|
| 280 | { |
---|
| 281 | // build extended pointer on rwlock protecting VSL |
---|
| 282 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); |
---|
| 283 | |
---|
| 284 | // get rwlock in write mode |
---|
| 285 | remote_rwlock_wr_acquire( lock_xp ); |
---|
| 286 | |
---|
| 287 | // update vseg descriptor |
---|
| 288 | vseg->vmm = NULL; |
---|
| 289 | |
---|
| 290 | // remove vseg from vmm list |
---|
| 291 | xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); |
---|
| 292 | |
---|
| 293 | // release rwlock in write mode |
---|
| 294 | remote_rwlock_wr_release( lock_xp ); |
---|
| 295 | } |
---|
| 296 | |
---|
[595] | 297 | //////////////////////////////////////////////// |
---|
[433] | 298 | void vmm_global_update_pte( process_t * process, |
---|
| 299 | vpn_t vpn, |
---|
| 300 | uint32_t attr, |
---|
| 301 | ppn_t ppn ) |
---|
[23] | 302 | { |
---|
[408] | 303 | xlist_entry_t * process_root_ptr; |
---|
| 304 | xptr_t process_root_xp; |
---|
| 305 | xptr_t process_iter_xp; |
---|
[23] | 306 | |
---|
[408] | 307 | xptr_t remote_process_xp; |
---|
| 308 | cxy_t remote_process_cxy; |
---|
| 309 | process_t * remote_process_ptr; |
---|
| 310 | xptr_t remote_gpt_xp; |
---|
[23] | 311 | |
---|
[408] | 312 | pid_t pid; |
---|
| 313 | cxy_t owner_cxy; |
---|
| 314 | lpid_t owner_lpid; |
---|
[23] | 315 | |
---|
[438] | 316 | #if DEBUG_VMM_UPDATE_PTE |
---|
[433] | 317 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[595] | 318 | thread_t * this = CURRENT_THREAD; |
---|
[438] | 319 | if( DEBUG_VMM_UPDATE_PTE < cycle ) |
---|
[595] | 320 | printk("\n[%s] thread[%x,%x] enter for process %x / vpn %x / cycle %d\n", |
---|
| 321 | __FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle ); |
---|
[433] | 322 | #endif |
---|
| 323 | |
---|
[567] | 324 | // check cluster is reference |
---|
[585] | 325 | assert( (GET_CXY( process->ref_xp ) == local_cxy) , "not called in reference cluster\n"); |
---|
[433] | 326 | |
---|
[408] | 327 | // get extended pointer on root of process copies xlist in owner cluster |
---|
| 328 | pid = process->pid; |
---|
| 329 | owner_cxy = CXY_FROM_PID( pid ); |
---|
| 330 | owner_lpid = LPID_FROM_PID( pid ); |
---|
| 331 | process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; |
---|
| 332 | process_root_xp = XPTR( owner_cxy , process_root_ptr ); |
---|
[23] | 333 | |
---|
[408] | 334 | // loop on destination process copies |
---|
| 335 | XLIST_FOREACH( process_root_xp , process_iter_xp ) |
---|
| 336 | { |
---|
| 337 | // get cluster and local pointer on remote process |
---|
| 338 | remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); |
---|
[433] | 339 | remote_process_ptr = GET_PTR( remote_process_xp ); |
---|
[408] | 340 | remote_process_cxy = GET_CXY( remote_process_xp ); |
---|
[407] | 341 | |
---|
[438] | 342 | #if (DEBUG_VMM_UPDATE_PTE & 0x1) |
---|
| 343 | if( DEBUG_VMM_UPDATE_PTE < cycle ) |
---|
[595] | 344 | printk("\n[%s] threadr[%x,%x] handling vpn %x for process %x in cluster %x\n", |
---|
| 345 | __FUNCTION__, this->process->pid, this->trdid, vpn, process->pid, remote_process_cxy ); |
---|
[433] | 346 | #endif |
---|
| 347 | |
---|
[408] | 348 | // get extended pointer on remote gpt |
---|
| 349 | remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt ); |
---|
| 350 | |
---|
[433] | 351 | // update remote GPT |
---|
| 352 | hal_gpt_update_pte( remote_gpt_xp, vpn, attr, ppn ); |
---|
[408] | 353 | } |
---|
| 354 | |
---|
[438] | 355 | #if DEBUG_VMM_UPDATE_PTE |
---|
[433] | 356 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 357 | if( DEBUG_VMM_UPDATE_PTE < cycle ) |
---|
[595] | 358 | printk("\n[%s] thread[%x,%x] exit for process %x / vpn %x / cycle %d\n", |
---|
| 359 | __FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle ); |
---|
[433] | 360 | #endif |
---|
| 361 | |
---|
| 362 | } // end vmm_global_update_pte() |
---|
| 363 | |
---|
[408] | 364 | /////////////////////////////////////// |
---|
| 365 | void vmm_set_cow( process_t * process ) |
---|
| 366 | { |
---|
| 367 | vmm_t * vmm; |
---|
| 368 | |
---|
| 369 | xlist_entry_t * process_root_ptr; |
---|
| 370 | xptr_t process_root_xp; |
---|
| 371 | xptr_t process_iter_xp; |
---|
| 372 | |
---|
| 373 | xptr_t remote_process_xp; |
---|
| 374 | cxy_t remote_process_cxy; |
---|
| 375 | process_t * remote_process_ptr; |
---|
| 376 | xptr_t remote_gpt_xp; |
---|
| 377 | |
---|
| 378 | xptr_t vseg_root_xp; |
---|
| 379 | xptr_t vseg_iter_xp; |
---|
| 380 | |
---|
| 381 | xptr_t vseg_xp; |
---|
| 382 | vseg_t * vseg; |
---|
| 383 | |
---|
| 384 | pid_t pid; |
---|
| 385 | cxy_t owner_cxy; |
---|
| 386 | lpid_t owner_lpid; |
---|
| 387 | |
---|
[438] | 388 | #if DEBUG_VMM_SET_COW |
---|
[595] | 389 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
| 390 | thread_t * this = CURRENT_THREAD; |
---|
[438] | 391 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
[595] | 392 | printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n", |
---|
| 393 | __FUNCTION__, this->process->pid, this->trdid, process->pid , cycle ); |
---|
[433] | 394 | #endif |
---|
[408] | 395 | |
---|
[567] | 396 | // check cluster is reference |
---|
| 397 | assert( (GET_CXY( process->ref_xp ) == local_cxy) , |
---|
| 398 | "local cluster is not process reference cluster\n"); |
---|
[408] | 399 | |
---|
| 400 | // get pointer on reference VMM |
---|
| 401 | vmm = &process->vmm; |
---|
| 402 | |
---|
| 403 | // get extended pointer on root of process copies xlist in owner cluster |
---|
| 404 | pid = process->pid; |
---|
| 405 | owner_cxy = CXY_FROM_PID( pid ); |
---|
| 406 | owner_lpid = LPID_FROM_PID( pid ); |
---|
| 407 | process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; |
---|
| 408 | process_root_xp = XPTR( owner_cxy , process_root_ptr ); |
---|
| 409 | |
---|
| 410 | // get extended pointer on root of vsegs xlist from reference VMM |
---|
| 411 | vseg_root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
| 412 | |
---|
| 413 | // loop on destination process copies |
---|
| 414 | XLIST_FOREACH( process_root_xp , process_iter_xp ) |
---|
| 415 | { |
---|
| 416 | // get cluster and local pointer on remote process |
---|
| 417 | remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); |
---|
[433] | 418 | remote_process_ptr = GET_PTR( remote_process_xp ); |
---|
[408] | 419 | remote_process_cxy = GET_CXY( remote_process_xp ); |
---|
| 420 | |
---|
[595] | 421 | #if (DEBUG_VMM_SET_COW & 1) |
---|
[438] | 422 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
[595] | 423 | printk("\n[%s] thread[%x,%x] handling process %x in cluster %x\n", |
---|
| 424 | __FUNCTION__, this->process->pid, this->trdid, process->pid , remote_process_cxy ); |
---|
[433] | 425 | #endif |
---|
[408] | 426 | |
---|
| 427 | // get extended pointer on remote gpt |
---|
| 428 | remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt ); |
---|
| 429 | |
---|
| 430 | // loop on vsegs in (local) reference process VSL |
---|
| 431 | XLIST_FOREACH( vseg_root_xp , vseg_iter_xp ) |
---|
| 432 | { |
---|
| 433 | // get pointer on vseg |
---|
| 434 | vseg_xp = XLIST_ELEMENT( vseg_iter_xp , vseg_t , xlist ); |
---|
[433] | 435 | vseg = GET_PTR( vseg_xp ); |
---|
[408] | 436 | |
---|
[567] | 437 | assert( (GET_CXY( vseg_xp ) == local_cxy) , |
---|
| 438 | "all vsegs in reference VSL must be local\n" ); |
---|
[408] | 439 | |
---|
| 440 | // get vseg type, base and size |
---|
| 441 | uint32_t type = vseg->type; |
---|
| 442 | vpn_t vpn_base = vseg->vpn_base; |
---|
| 443 | vpn_t vpn_size = vseg->vpn_size; |
---|
| 444 | |
---|
[595] | 445 | #if (DEBUG_VMM_SET_COW & 1) |
---|
[438] | 446 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
[595] | 447 | printk("\n[%s] thread[%x,%x] handling vseg %s / vpn_base = %x / vpn_size = %x\n", |
---|
| 448 | __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size ); |
---|
[433] | 449 | #endif |
---|
| 450 | // only DATA, ANON and REMOTE vsegs |
---|
[408] | 451 | if( (type == VSEG_TYPE_DATA) || |
---|
| 452 | (type == VSEG_TYPE_ANON) || |
---|
| 453 | (type == VSEG_TYPE_REMOTE) ) |
---|
| 454 | { |
---|
[433] | 455 | vpn_t vpn; |
---|
| 456 | uint32_t attr; |
---|
| 457 | ppn_t ppn; |
---|
| 458 | xptr_t page_xp; |
---|
| 459 | cxy_t page_cxy; |
---|
| 460 | page_t * page_ptr; |
---|
| 461 | xptr_t forks_xp; |
---|
[469] | 462 | xptr_t lock_xp; |
---|
[433] | 463 | |
---|
| 464 | // update flags in remote GPT |
---|
| 465 | hal_gpt_set_cow( remote_gpt_xp, |
---|
| 466 | vpn_base, |
---|
| 467 | vpn_size ); |
---|
| 468 | |
---|
| 469 | // atomically increment pending forks counter in physical pages, |
---|
| 470 | // for all vseg pages that are mapped in reference cluster |
---|
| 471 | if( remote_process_cxy == local_cxy ) |
---|
| 472 | { |
---|
| 473 | // scan all pages in vseg |
---|
| 474 | for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) |
---|
| 475 | { |
---|
| 476 | // get page attributes and PPN from reference GPT |
---|
[585] | 477 | hal_gpt_get_pte( remote_gpt_xp , vpn , &attr , &ppn ); |
---|
[433] | 478 | |
---|
| 479 | // atomically update pending forks counter if page is mapped |
---|
| 480 | if( attr & GPT_MAPPED ) |
---|
| 481 | { |
---|
[469] | 482 | // get pointers and cluster on page descriptor |
---|
[433] | 483 | page_xp = ppm_ppn2page( ppn ); |
---|
| 484 | page_cxy = GET_CXY( page_xp ); |
---|
| 485 | page_ptr = GET_PTR( page_xp ); |
---|
[469] | 486 | |
---|
| 487 | // get extended pointers on "forks" and "lock" |
---|
[433] | 488 | forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
[469] | 489 | lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
| 490 | |
---|
[567] | 491 | // take lock protecting "forks" counter |
---|
| 492 | remote_busylock_acquire( lock_xp ); |
---|
| 493 | |
---|
[469] | 494 | // increment "forks" |
---|
[433] | 495 | hal_remote_atomic_add( forks_xp , 1 ); |
---|
[567] | 496 | |
---|
| 497 | // release lock protecting "forks" counter |
---|
| 498 | remote_busylock_release( lock_xp ); |
---|
[433] | 499 | } |
---|
| 500 | } // end loop on vpn |
---|
| 501 | } // end if local |
---|
| 502 | } // end if vseg type |
---|
| 503 | } // end loop on vsegs |
---|
[408] | 504 | } // end loop on process copies |
---|
| 505 | |
---|
[438] | 506 | #if DEBUG_VMM_SET_COW |
---|
[433] | 507 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 508 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
[595] | 509 | printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", |
---|
| 510 | __FUNCTION__, this->process->pid, this->trdid, process->pid , cycle ); |
---|
[433] | 511 | #endif |
---|
[408] | 512 | |
---|
| 513 | } // end vmm_set-cow() |
---|
| 514 | |
---|
| 515 | ///////////////////////////////////////////////// |
---|
| 516 | error_t vmm_fork_copy( process_t * child_process, |
---|
| 517 | xptr_t parent_process_xp ) |
---|
| 518 | { |
---|
| 519 | error_t error; |
---|
| 520 | cxy_t parent_cxy; |
---|
| 521 | process_t * parent_process; |
---|
| 522 | vmm_t * parent_vmm; |
---|
| 523 | xptr_t parent_lock_xp; |
---|
| 524 | vmm_t * child_vmm; |
---|
| 525 | xptr_t iter_xp; |
---|
| 526 | xptr_t parent_vseg_xp; |
---|
| 527 | vseg_t * parent_vseg; |
---|
| 528 | vseg_t * child_vseg; |
---|
| 529 | uint32_t type; |
---|
| 530 | bool_t cow; |
---|
| 531 | vpn_t vpn; |
---|
| 532 | vpn_t vpn_base; |
---|
| 533 | vpn_t vpn_size; |
---|
[469] | 534 | xptr_t page_xp; // extended pointer on page descriptor |
---|
[408] | 535 | page_t * page_ptr; |
---|
| 536 | cxy_t page_cxy; |
---|
[469] | 537 | xptr_t forks_xp; // extended pointer on forks counter in page descriptor |
---|
| 538 | xptr_t lock_xp; // extended pointer on lock protecting the forks counter |
---|
[408] | 539 | xptr_t parent_root_xp; |
---|
| 540 | bool_t mapped; |
---|
| 541 | ppn_t ppn; |
---|
| 542 | |
---|
[438] | 543 | #if DEBUG_VMM_FORK_COPY |
---|
[433] | 544 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[595] | 545 | thread_t * this = CURRENT_THREAD; |
---|
[438] | 546 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
[595] | 547 | printk("\n[%s] thread %x enter / cycle %d\n", |
---|
| 548 | __FUNCTION__ , this->process->pid, this->trdid, cycle ); |
---|
[433] | 549 | #endif |
---|
[408] | 550 | |
---|
| 551 | // get parent process cluster and local pointer |
---|
| 552 | parent_cxy = GET_CXY( parent_process_xp ); |
---|
[433] | 553 | parent_process = GET_PTR( parent_process_xp ); |
---|
[408] | 554 | |
---|
| 555 | // get local pointers on parent and child VMM |
---|
| 556 | parent_vmm = &parent_process->vmm; |
---|
| 557 | child_vmm = &child_process->vmm; |
---|
| 558 | |
---|
| 559 | // get extended pointer on lock protecting the parent VSL |
---|
| 560 | parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsegs_lock ); |
---|
| 561 | |
---|
| 562 | // initialize the lock protecting the child VSL |
---|
[567] | 563 | remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsegs_lock ), LOCK_VMM_STACK ); |
---|
[408] | 564 | |
---|
| 565 | // initialize the child VSL as empty |
---|
| 566 | xlist_root_init( XPTR( local_cxy, &child_vmm->vsegs_root ) ); |
---|
| 567 | child_vmm->vsegs_nr = 0; |
---|
| 568 | |
---|
[415] | 569 | // create child GPT |
---|
[408] | 570 | error = hal_gpt_create( &child_vmm->gpt ); |
---|
[415] | 571 | |
---|
[407] | 572 | if( error ) |
---|
| 573 | { |
---|
[408] | 574 | printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ ); |
---|
| 575 | return -1; |
---|
[407] | 576 | } |
---|
| 577 | |
---|
[408] | 578 | // build extended pointer on parent VSL |
---|
| 579 | parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root ); |
---|
| 580 | |
---|
[567] | 581 | // take the lock protecting the parent VSL in read mode |
---|
| 582 | remote_rwlock_rd_acquire( parent_lock_xp ); |
---|
[415] | 583 | |
---|
[408] | 584 | // loop on parent VSL xlist |
---|
| 585 | XLIST_FOREACH( parent_root_xp , iter_xp ) |
---|
[23] | 586 | { |
---|
[408] | 587 | // get local and extended pointers on current parent vseg |
---|
| 588 | parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
[433] | 589 | parent_vseg = GET_PTR( parent_vseg_xp ); |
---|
[23] | 590 | |
---|
[408] | 591 | // get vseg type |
---|
[567] | 592 | type = hal_remote_l32( XPTR( parent_cxy , &parent_vseg->type ) ); |
---|
[408] | 593 | |
---|
[438] | 594 | #if DEBUG_VMM_FORK_COPY |
---|
[433] | 595 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 596 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
[595] | 597 | printk("\n[%s] thread[%x,%x] found parent vseg %s / vpn_base = %x / cycle %d\n", |
---|
| 598 | __FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type), |
---|
[567] | 599 | hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); |
---|
[433] | 600 | #endif |
---|
[23] | 601 | |
---|
[408] | 602 | // all parent vsegs - but STACK - must be copied in child VSL |
---|
| 603 | if( type != VSEG_TYPE_STACK ) |
---|
[23] | 604 | { |
---|
[408] | 605 | // allocate memory for a new child vseg |
---|
| 606 | child_vseg = vseg_alloc(); |
---|
| 607 | if( child_vseg == NULL ) // release all allocated vsegs |
---|
[23] | 608 | { |
---|
[408] | 609 | vmm_destroy( child_process ); |
---|
| 610 | printk("\n[ERROR] in %s : cannot create vseg for child\n", __FUNCTION__ ); |
---|
| 611 | return -1; |
---|
[23] | 612 | } |
---|
| 613 | |
---|
[408] | 614 | // copy parent vseg to child vseg |
---|
| 615 | vseg_init_from_ref( child_vseg , parent_vseg_xp ); |
---|
[23] | 616 | |
---|
[408] | 617 | // register child vseg in child VSL |
---|
[567] | 618 | vmm_vseg_attach( child_vmm , child_vseg ); |
---|
[407] | 619 | |
---|
[438] | 620 | #if DEBUG_VMM_FORK_COPY |
---|
[433] | 621 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 622 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
[595] | 623 | printk("\n[%s] thread[%x,%x] copied vseg %s / vpn_base = %x to child VSL / cycle %d\n", |
---|
| 624 | __FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type), |
---|
[567] | 625 | hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); |
---|
[433] | 626 | #endif |
---|
[23] | 627 | |
---|
[408] | 628 | // copy DATA, MMAP, REMOTE, FILE parent GPT entries to child GPT |
---|
| 629 | if( type != VSEG_TYPE_CODE ) |
---|
| 630 | { |
---|
| 631 | // activate the COW for DATA, MMAP, REMOTE vsegs only |
---|
| 632 | cow = ( type != VSEG_TYPE_FILE ); |
---|
[23] | 633 | |
---|
[408] | 634 | vpn_base = child_vseg->vpn_base; |
---|
| 635 | vpn_size = child_vseg->vpn_size; |
---|
[23] | 636 | |
---|
[408] | 637 | // scan pages in parent vseg |
---|
| 638 | for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) |
---|
| 639 | { |
---|
| 640 | error = hal_gpt_pte_copy( &child_vmm->gpt, |
---|
| 641 | XPTR( parent_cxy , &parent_vmm->gpt ), |
---|
| 642 | vpn, |
---|
| 643 | cow, |
---|
| 644 | &ppn, |
---|
| 645 | &mapped ); |
---|
| 646 | if( error ) |
---|
| 647 | { |
---|
| 648 | vmm_destroy( child_process ); |
---|
| 649 | printk("\n[ERROR] in %s : cannot copy GPT\n", __FUNCTION__ ); |
---|
| 650 | return -1; |
---|
| 651 | } |
---|
| 652 | |
---|
[433] | 653 | // increment pending forks counter in page if mapped |
---|
[408] | 654 | if( mapped ) |
---|
| 655 | { |
---|
[469] | 656 | // get pointers and cluster on page descriptor |
---|
| 657 | page_xp = ppm_ppn2page( ppn ); |
---|
[408] | 658 | page_cxy = GET_CXY( page_xp ); |
---|
[433] | 659 | page_ptr = GET_PTR( page_xp ); |
---|
[408] | 660 | |
---|
[469] | 661 | // get extended pointers on "forks" and "lock" |
---|
| 662 | forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
| 663 | lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
| 664 | |
---|
[567] | 665 | // get lock protecting "forks" counter |
---|
| 666 | remote_busylock_acquire( lock_xp ); |
---|
| 667 | |
---|
[469] | 668 | // increment "forks" |
---|
| 669 | hal_remote_atomic_add( forks_xp , 1 ); |
---|
| 670 | |
---|
[567] | 671 | // release lock protecting "forks" counter |
---|
| 672 | remote_busylock_release( lock_xp ); |
---|
| 673 | |
---|
[438] | 674 | #if DEBUG_VMM_FORK_COPY |
---|
[433] | 675 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 676 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
[595] | 677 | printk("\n[%s] thread[%x,%x] copied vpn %x to child GPT / cycle %d\n", |
---|
| 678 | __FUNCTION__ , this->process->pid, this->trdid , vpn , cycle ); |
---|
[433] | 679 | #endif |
---|
[408] | 680 | } |
---|
| 681 | } |
---|
| 682 | } // end if no code & no stack |
---|
| 683 | } // end if no stack |
---|
| 684 | } // end loop on vsegs |
---|
| 685 | |
---|
[567] | 686 | // release the parent VSL lock in read mode |
---|
| 687 | remote_rwlock_rd_release( parent_lock_xp ); |
---|
[408] | 688 | |
---|
[415] | 689 | // initialize child GPT (architecture specic) |
---|
| 690 | // => For TSAR, identity map the kentry_vseg |
---|
| 691 | error = hal_vmm_init( child_vmm ); |
---|
| 692 | |
---|
| 693 | if( error ) |
---|
| 694 | { |
---|
| 695 | printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ ); |
---|
| 696 | return -1; |
---|
| 697 | } |
---|
| 698 | |
---|
[408] | 699 | // initialize the child VMM STACK allocator |
---|
| 700 | child_vmm->stack_mgr.bitmap = 0; |
---|
| 701 | child_vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE; |
---|
| 702 | |
---|
| 703 | // initialize the child VMM MMAP allocator |
---|
[23] | 704 | uint32_t i; |
---|
[408] | 705 | child_vmm->mmap_mgr.vpn_base = CONFIG_VMM_HEAP_BASE; |
---|
| 706 | child_vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; |
---|
| 707 | child_vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; |
---|
| 708 | for( i = 0 ; i < 32 ; i++ ) list_root_init( &child_vmm->mmap_mgr.zombi_list[i] ); |
---|
[23] | 709 | |
---|
[178] | 710 | // initialize instrumentation counters |
---|
[408] | 711 | child_vmm->pgfault_nr = 0; |
---|
[23] | 712 | |
---|
[408] | 713 | // copy base addresses from parent VMM to child VMM |
---|
| 714 | child_vmm->kent_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->kent_vpn_base)); |
---|
| 715 | child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base)); |
---|
| 716 | child_vmm->envs_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->envs_vpn_base)); |
---|
| 717 | child_vmm->heap_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->heap_vpn_base)); |
---|
| 718 | child_vmm->code_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->code_vpn_base)); |
---|
| 719 | child_vmm->data_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->data_vpn_base)); |
---|
[23] | 720 | |
---|
[408] | 721 | child_vmm->entry_point = (intptr_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->entry_point)); |
---|
[23] | 722 | |
---|
[124] | 723 | hal_fence(); |
---|
[23] | 724 | |
---|
[438] | 725 | #if DEBUG_VMM_FORK_COPY |
---|
[433] | 726 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 727 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
[595] | 728 | printk("\n[%s] thread[%x,%x] exit successfully / cycle %d\n", |
---|
| 729 | __FUNCTION__ , this->process->pid, this->trdid , cycle ); |
---|
[433] | 730 | #endif |
---|
| 731 | |
---|
[23] | 732 | return 0; |
---|
| 733 | |
---|
[408] | 734 | } // vmm_fork_copy() |
---|
[204] | 735 | |
---|
[1] | 736 | /////////////////////////////////////// |
---|
| 737 | void vmm_destroy( process_t * process ) |
---|
| 738 | { |
---|
[408] | 739 | xptr_t vseg_xp; |
---|
[1] | 740 | vseg_t * vseg; |
---|
| 741 | |
---|
[438] | 742 | #if DEBUG_VMM_DESTROY |
---|
[433] | 743 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[595] | 744 | thread_t * this = CURRENT_THREAD; |
---|
[438] | 745 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
[595] | 746 | printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", |
---|
| 747 | __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle ); |
---|
[433] | 748 | #endif |
---|
[416] | 749 | |
---|
[438] | 750 | #if (DEBUG_VMM_DESTROY & 1 ) |
---|
[443] | 751 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
[437] | 752 | vmm_display( process , true ); |
---|
| 753 | #endif |
---|
| 754 | |
---|
[433] | 755 | // get pointer on local VMM |
---|
[1] | 756 | vmm_t * vmm = &process->vmm; |
---|
| 757 | |
---|
[408] | 758 | // get extended pointer on VSL root and VSL lock |
---|
| 759 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
| 760 | |
---|
[409] | 761 | // remove all user vsegs registered in VSL |
---|
[408] | 762 | while( !xlist_is_empty( root_xp ) ) |
---|
[1] | 763 | { |
---|
[409] | 764 | // get pointer on first vseg in VSL |
---|
[567] | 765 | vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); |
---|
[433] | 766 | vseg = GET_PTR( vseg_xp ); |
---|
[409] | 767 | |
---|
[437] | 768 | // unmap and release physical pages |
---|
[409] | 769 | vmm_unmap_vseg( process , vseg ); |
---|
| 770 | |
---|
| 771 | // remove vseg from VSL |
---|
[567] | 772 | vmm_vseg_detach( vmm , vseg ); |
---|
[409] | 773 | |
---|
| 774 | // release memory allocated to vseg descriptor |
---|
[1] | 775 | vseg_free( vseg ); |
---|
[443] | 776 | |
---|
| 777 | #if( DEBUG_VMM_DESTROY & 1 ) |
---|
| 778 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
[595] | 779 | printk("\n[%s] %s vseg released / vpn_base %x / vpn_size %d\n", |
---|
[443] | 780 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
| 781 | #endif |
---|
| 782 | |
---|
[1] | 783 | } |
---|
| 784 | |
---|
| 785 | // remove all vsegs from zombi_lists in MMAP allocator |
---|
| 786 | uint32_t i; |
---|
| 787 | for( i = 0 ; i<32 ; i++ ) |
---|
| 788 | { |
---|
| 789 | while( !list_is_empty( &vmm->mmap_mgr.zombi_list[i] ) ) |
---|
| 790 | { |
---|
[408] | 791 | vseg = LIST_FIRST( &vmm->mmap_mgr.zombi_list[i] , vseg_t , zlist ); |
---|
[443] | 792 | |
---|
| 793 | #if( DEBUG_VMM_DESTROY & 1 ) |
---|
| 794 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
[595] | 795 | printk("\n[%s] found zombi vseg / vpn_base %x / vpn_size %d\n", |
---|
[443] | 796 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
| 797 | #endif |
---|
[567] | 798 | vmm_vseg_detach( vmm , vseg ); |
---|
[1] | 799 | vseg_free( vseg ); |
---|
[443] | 800 | |
---|
| 801 | #if( DEBUG_VMM_DESTROY & 1 ) |
---|
| 802 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
[595] | 803 | printk("\n[%s] zombi vseg released / vpn_base %x / vpn_size %d\n", |
---|
[443] | 804 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
| 805 | #endif |
---|
[1] | 806 | } |
---|
| 807 | } |
---|
| 808 | |
---|
[409] | 809 | // release memory allocated to the GPT itself |
---|
[1] | 810 | hal_gpt_destroy( &vmm->gpt ); |
---|
| 811 | |
---|
[438] | 812 | #if DEBUG_VMM_DESTROY |
---|
[433] | 813 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 814 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
[595] | 815 | printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", |
---|
| 816 | __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle ); |
---|
[433] | 817 | #endif |
---|
[416] | 818 | |
---|
[204] | 819 | } // end vmm_destroy() |
---|
| 820 | |
---|
[1] | 821 | ///////////////////////////////////////////////// |
---|
| 822 | vseg_t * vmm_check_conflict( process_t * process, |
---|
[21] | 823 | vpn_t vpn_base, |
---|
[1] | 824 | vpn_t vpn_size ) |
---|
| 825 | { |
---|
| 826 | vmm_t * vmm = &process->vmm; |
---|
[408] | 827 | |
---|
| 828 | // scan the VSL |
---|
[1] | 829 | vseg_t * vseg; |
---|
[408] | 830 | xptr_t iter_xp; |
---|
| 831 | xptr_t vseg_xp; |
---|
| 832 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
[1] | 833 | |
---|
[408] | 834 | XLIST_FOREACH( root_xp , iter_xp ) |
---|
[1] | 835 | { |
---|
[408] | 836 | vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
[433] | 837 | vseg = GET_PTR( vseg_xp ); |
---|
[204] | 838 | |
---|
[21] | 839 | if( ((vpn_base + vpn_size) > vseg->vpn_base) && |
---|
| 840 | (vpn_base < (vseg->vpn_base + vseg->vpn_size)) ) return vseg; |
---|
[1] | 841 | } |
---|
| 842 | return NULL; |
---|
| 843 | |
---|
[204] | 844 | } // end vmm_check_conflict() |
---|
| 845 | |
---|
[1] | 846 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 847 | // This static function is called by the vmm_create_vseg() function, and implements |
---|
| 848 | // the VMM stack_vseg specific allocator. |
---|
| 849 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 850 | // @ vmm : pointer on VMM. |
---|
[21] | 851 | // @ vpn_base : (return value) first allocated page |
---|
[1] | 852 | // @ vpn_size : (return value) number of allocated pages |
---|
| 853 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 854 | static error_t vmm_stack_alloc( vmm_t * vmm, |
---|
| 855 | vpn_t * vpn_base, |
---|
| 856 | vpn_t * vpn_size ) |
---|
| 857 | { |
---|
| 858 | // get stack allocator pointer |
---|
| 859 | stack_mgr_t * mgr = &vmm->stack_mgr; |
---|
| 860 | |
---|
| 861 | // get lock on stack allocator |
---|
[567] | 862 | busylock_acquire( &mgr->lock ); |
---|
[1] | 863 | |
---|
| 864 | // get first free slot index in bitmap |
---|
| 865 | int32_t index = bitmap_ffc( &mgr->bitmap , 4 ); |
---|
[179] | 866 | if( (index < 0) || (index > 31) ) |
---|
| 867 | { |
---|
[567] | 868 | busylock_release( &mgr->lock ); |
---|
| 869 | return 0xFFFFFFFF; |
---|
[179] | 870 | } |
---|
[1] | 871 | |
---|
| 872 | // update bitmap |
---|
| 873 | bitmap_set( &mgr->bitmap , index ); |
---|
[21] | 874 | |
---|
[1] | 875 | // release lock on stack allocator |
---|
[567] | 876 | busylock_release( &mgr->lock ); |
---|
[1] | 877 | |
---|
[21] | 878 | // returns vpn_base, vpn_size (one page non allocated) |
---|
[1] | 879 | *vpn_base = mgr->vpn_base + index * CONFIG_VMM_STACK_SIZE + 1; |
---|
| 880 | *vpn_size = CONFIG_VMM_STACK_SIZE - 1; |
---|
| 881 | return 0; |
---|
| 882 | |
---|
[204] | 883 | } // end vmm_stack_alloc() |
---|
| 884 | |
---|
[1] | 885 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 886 | // This static function is called by the vmm_create_vseg() function, and implements |
---|
| 887 | // the VMM MMAP specific allocator. |
---|
| 888 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 889 | // @ vmm : [in] pointer on VMM. |
---|
| 890 | // @ npages : [in] requested number of pages. |
---|
[21] | 891 | // @ vpn_base : [out] first allocated page. |
---|
[1] | 892 | // @ vpn_size : [out] actual number of allocated pages. |
---|
| 893 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 894 | static error_t vmm_mmap_alloc( vmm_t * vmm, |
---|
| 895 | vpn_t npages, |
---|
| 896 | vpn_t * vpn_base, |
---|
| 897 | vpn_t * vpn_size ) |
---|
| 898 | { |
---|
| 899 | uint32_t index; |
---|
| 900 | vseg_t * vseg; |
---|
| 901 | vpn_t base; |
---|
| 902 | vpn_t size; |
---|
[21] | 903 | vpn_t free; |
---|
[1] | 904 | |
---|
[21] | 905 | // mmap vseg size must be power of 2 |
---|
[1] | 906 | // compute actual size and index in zombi_list array |
---|
| 907 | size = POW2_ROUNDUP( npages ); |
---|
| 908 | index = bits_log2( size ); |
---|
| 909 | |
---|
| 910 | // get mmap allocator pointer |
---|
| 911 | mmap_mgr_t * mgr = &vmm->mmap_mgr; |
---|
| 912 | |
---|
| 913 | // get lock on mmap allocator |
---|
[567] | 914 | busylock_acquire( &mgr->lock ); |
---|
[1] | 915 | |
---|
| 916 | // get vseg from zombi_list or from mmap zone |
---|
| 917 | if( list_is_empty( &mgr->zombi_list[index] ) ) // from mmap zone |
---|
| 918 | { |
---|
| 919 | // check overflow |
---|
| 920 | free = mgr->first_free_vpn; |
---|
| 921 | if( (free + size) > mgr->vpn_size ) return ENOMEM; |
---|
| 922 | |
---|
| 923 | // update STACK allocator |
---|
| 924 | mgr->first_free_vpn += size; |
---|
| 925 | |
---|
| 926 | // compute base |
---|
| 927 | base = free; |
---|
| 928 | } |
---|
| 929 | else // from zombi_list |
---|
| 930 | { |
---|
| 931 | // get pointer on zombi vseg from zombi_list |
---|
[408] | 932 | vseg = LIST_FIRST( &mgr->zombi_list[index] , vseg_t , zlist ); |
---|
[1] | 933 | |
---|
| 934 | // remove vseg from free-list |
---|
[408] | 935 | list_unlink( &vseg->zlist ); |
---|
[1] | 936 | |
---|
| 937 | // compute base |
---|
| 938 | base = vseg->vpn_base; |
---|
[21] | 939 | } |
---|
| 940 | |
---|
[1] | 941 | // release lock on mmap allocator |
---|
[567] | 942 | busylock_release( &mgr->lock ); |
---|
[1] | 943 | |
---|
| 944 | // returns vpn_base, vpn_size |
---|
| 945 | *vpn_base = base; |
---|
| 946 | *vpn_size = size; |
---|
| 947 | return 0; |
---|
| 948 | |
---|
[204] | 949 | } // end vmm_mmap_alloc() |
---|
| 950 | |
---|
[407] | 951 | //////////////////////////////////////////////// |
---|
| 952 | vseg_t * vmm_create_vseg( process_t * process, |
---|
| 953 | vseg_type_t type, |
---|
| 954 | intptr_t base, |
---|
| 955 | uint32_t size, |
---|
| 956 | uint32_t file_offset, |
---|
| 957 | uint32_t file_size, |
---|
| 958 | xptr_t mapper_xp, |
---|
| 959 | cxy_t cxy ) |
---|
[1] | 960 | { |
---|
| 961 | vseg_t * vseg; // created vseg pointer |
---|
[204] | 962 | vpn_t vpn_base; // first page index |
---|
[595] | 963 | vpn_t vpn_size; // number of pages covered by vseg |
---|
[1] | 964 | error_t error; |
---|
| 965 | |
---|
[438] | 966 | #if DEBUG_VMM_CREATE_VSEG |
---|
[595] | 967 | thread_t * this = CURRENT_THREAD; |
---|
| 968 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 969 | if( DEBUG_VMM_CREATE_VSEG < cycle ) |
---|
[595] | 970 | printk("\n[%s] thread[%x,%x] enter / %s / cxy %x / cycle %d\n", |
---|
| 971 | __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), cxy, cycle ); |
---|
[433] | 972 | #endif |
---|
[21] | 973 | |
---|
[407] | 974 | // get pointer on VMM |
---|
| 975 | vmm_t * vmm = &process->vmm; |
---|
[21] | 976 | |
---|
[204] | 977 | // compute base, size, vpn_base, vpn_size, depending on vseg type |
---|
[407] | 978 | // we use the VMM specific allocators for "stack", "file", "anon", & "remote" vsegs |
---|
[595] | 979 | |
---|
[1] | 980 | if( type == VSEG_TYPE_STACK ) |
---|
| 981 | { |
---|
| 982 | // get vpn_base and vpn_size from STACK allocator |
---|
| 983 | error = vmm_stack_alloc( vmm , &vpn_base , &vpn_size ); |
---|
| 984 | if( error ) |
---|
| 985 | { |
---|
[407] | 986 | printk("\n[ERROR] in %s : no space for stack vseg / process %x in cluster %x\n", |
---|
| 987 | __FUNCTION__ , process->pid , local_cxy ); |
---|
[1] | 988 | return NULL; |
---|
| 989 | } |
---|
| 990 | |
---|
| 991 | // compute vseg base and size from vpn_base and vpn_size |
---|
| 992 | base = vpn_base << CONFIG_PPM_PAGE_SHIFT; |
---|
| 993 | size = vpn_size << CONFIG_PPM_PAGE_SHIFT; |
---|
| 994 | } |
---|
[595] | 995 | else if( type == VSEG_TYPE_FILE ) |
---|
| 996 | { |
---|
| 997 | // compute page index (in mapper) for first byte |
---|
| 998 | vpn_t vpn_min = file_offset >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 999 | |
---|
| 1000 | // compute page index (in mapper) for last byte |
---|
| 1001 | vpn_t vpn_max = (file_offset + size - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1002 | |
---|
| 1003 | // compute offset in first page |
---|
| 1004 | uint32_t offset = file_offset & CONFIG_PPM_PAGE_MASK; |
---|
| 1005 | |
---|
| 1006 | // compute number of pages required in virtual space |
---|
| 1007 | vpn_t npages = vpn_max - vpn_min + 1; |
---|
| 1008 | |
---|
| 1009 | // get vpn_base and vpn_size from MMAP allocator |
---|
| 1010 | error = vmm_mmap_alloc( vmm , npages , &vpn_base , &vpn_size ); |
---|
| 1011 | if( error ) |
---|
| 1012 | { |
---|
| 1013 | printk("\n[ERROR] in %s : no vspace for mmap vseg / process %x in cluster %x\n", |
---|
| 1014 | __FUNCTION__ , process->pid , local_cxy ); |
---|
| 1015 | return NULL; |
---|
| 1016 | } |
---|
| 1017 | |
---|
| 1018 | // set the vseg base (not always aligned for FILE) |
---|
| 1019 | base = (vpn_base << CONFIG_PPM_PAGE_SHIFT) + offset; |
---|
| 1020 | } |
---|
[21] | 1021 | else if( (type == VSEG_TYPE_ANON) || |
---|
[1] | 1022 | (type == VSEG_TYPE_REMOTE) ) |
---|
| 1023 | { |
---|
[595] | 1024 | // compute number of required pages in virtual space |
---|
| 1025 | vpn_t npages = size >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1026 | if( size & CONFIG_PPM_PAGE_MASK) npages++; |
---|
| 1027 | |
---|
[1] | 1028 | // get vpn_base and vpn_size from MMAP allocator |
---|
| 1029 | error = vmm_mmap_alloc( vmm , npages , &vpn_base , &vpn_size ); |
---|
| 1030 | if( error ) |
---|
| 1031 | { |
---|
| 1032 | printk("\n[ERROR] in %s : no vspace for mmap vseg / process %x in cluster %x\n", |
---|
| 1033 | __FUNCTION__ , process->pid , local_cxy ); |
---|
| 1034 | return NULL; |
---|
| 1035 | } |
---|
| 1036 | |
---|
[595] | 1037 | // set vseg base (always aligned for ANON or REMOTE) |
---|
[1] | 1038 | base = vpn_base << CONFIG_PPM_PAGE_SHIFT; |
---|
| 1039 | } |
---|
[595] | 1040 | else // VSEG_TYPE_DATA or VSEG_TYPE_CODE |
---|
[1] | 1041 | { |
---|
[204] | 1042 | uint32_t vpn_min = base >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1043 | uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1044 | |
---|
| 1045 | vpn_base = vpn_min; |
---|
| 1046 | vpn_size = vpn_max - vpn_min + 1; |
---|
[1] | 1047 | } |
---|
| 1048 | |
---|
| 1049 | // check collisions |
---|
| 1050 | vseg = vmm_check_conflict( process , vpn_base , vpn_size ); |
---|
| 1051 | if( vseg != NULL ) |
---|
| 1052 | { |
---|
[21] | 1053 | printk("\n[ERROR] in %s for process %x : new vseg [vpn_base = %x / vpn_size = %x]\n" |
---|
[1] | 1054 | " overlap existing vseg [vpn_base = %x / vpn_size = %x]\n", |
---|
[407] | 1055 | __FUNCTION__ , process->pid, vpn_base, vpn_size, vseg->vpn_base, vseg->vpn_size ); |
---|
[1] | 1056 | return NULL; |
---|
| 1057 | } |
---|
| 1058 | |
---|
| 1059 | // allocate physical memory for vseg descriptor |
---|
| 1060 | vseg = vseg_alloc(); |
---|
| 1061 | if( vseg == NULL ) |
---|
| 1062 | { |
---|
| 1063 | printk("\n[ERROR] in %s for process %x : cannot allocate memory for vseg\n", |
---|
[407] | 1064 | __FUNCTION__ , process->pid ); |
---|
[1] | 1065 | return NULL; |
---|
| 1066 | } |
---|
| 1067 | |
---|
| 1068 | // initialize vseg descriptor |
---|
[407] | 1069 | vseg_init( vseg, |
---|
| 1070 | type, |
---|
| 1071 | base, |
---|
| 1072 | size, |
---|
| 1073 | vpn_base, |
---|
| 1074 | vpn_size, |
---|
| 1075 | file_offset, |
---|
| 1076 | file_size, |
---|
| 1077 | mapper_xp, |
---|
| 1078 | cxy ); |
---|
[1] | 1079 | |
---|
[408] | 1080 | // attach vseg to VSL |
---|
[567] | 1081 | vmm_vseg_attach( vmm , vseg ); |
---|
[1] | 1082 | |
---|
[438] | 1083 | #if DEBUG_VMM_CREATE_VSEG |
---|
[433] | 1084 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 1085 | if( DEBUG_VMM_CREATE_VSEG < cycle ) |
---|
[595] | 1086 | printk("\n[%s] thread[%x,%x] exit / %s / cxy %x / cycle %d\n", |
---|
| 1087 | __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), cxy, cycle ); |
---|
[433] | 1088 | #endif |
---|
[21] | 1089 | |
---|
[1] | 1090 | return vseg; |
---|
| 1091 | |
---|
[406] | 1092 | } // vmm_create_vseg() |
---|
| 1093 | |
---|
[1] | 1094 | ///////////////////////////////////// |
---|
| 1095 | void vmm_remove_vseg( vseg_t * vseg ) |
---|
| 1096 | { |
---|
| 1097 | // get pointers on calling process and VMM |
---|
| 1098 | thread_t * this = CURRENT_THREAD; |
---|
| 1099 | vmm_t * vmm = &this->process->vmm; |
---|
| 1100 | uint32_t type = vseg->type; |
---|
| 1101 | |
---|
[408] | 1102 | // detach vseg from VSL |
---|
[567] | 1103 | vmm_vseg_detach( vmm , vseg ); |
---|
[1] | 1104 | |
---|
| 1105 | // release the stack slot to VMM stack allocator if STACK type |
---|
| 1106 | if( type == VSEG_TYPE_STACK ) |
---|
| 1107 | { |
---|
| 1108 | // get pointer on stack allocator |
---|
| 1109 | stack_mgr_t * mgr = &vmm->stack_mgr; |
---|
| 1110 | |
---|
| 1111 | // compute slot index |
---|
| 1112 | uint32_t index = ((vseg->vpn_base - mgr->vpn_base - 1) / CONFIG_VMM_STACK_SIZE); |
---|
| 1113 | |
---|
| 1114 | // update stacks_bitmap |
---|
[567] | 1115 | busylock_acquire( &mgr->lock ); |
---|
[1] | 1116 | bitmap_clear( &mgr->bitmap , index ); |
---|
[567] | 1117 | busylock_release( &mgr->lock ); |
---|
[1] | 1118 | } |
---|
| 1119 | |
---|
| 1120 | // release the vseg to VMM mmap allocator if MMAP type |
---|
| 1121 | if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_FILE) || (type == VSEG_TYPE_REMOTE) ) |
---|
| 1122 | { |
---|
| 1123 | // get pointer on mmap allocator |
---|
| 1124 | mmap_mgr_t * mgr = &vmm->mmap_mgr; |
---|
| 1125 | |
---|
| 1126 | // compute zombi_list index |
---|
| 1127 | uint32_t index = bits_log2( vseg->vpn_size ); |
---|
| 1128 | |
---|
| 1129 | // update zombi_list |
---|
[567] | 1130 | busylock_acquire( &mgr->lock ); |
---|
[408] | 1131 | list_add_first( &mgr->zombi_list[index] , &vseg->zlist ); |
---|
[567] | 1132 | busylock_release( &mgr->lock ); |
---|
[1] | 1133 | } |
---|
| 1134 | |
---|
| 1135 | // release physical memory allocated for vseg descriptor if no MMAP type |
---|
| 1136 | if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) ) |
---|
| 1137 | { |
---|
| 1138 | vseg_free( vseg ); |
---|
| 1139 | } |
---|
[407] | 1140 | } // end vmm_remove_vseg() |
---|
[1] | 1141 | |
---|
| 1142 | ///////////////////////////////////////// |
---|
| 1143 | void vmm_unmap_vseg( process_t * process, |
---|
| 1144 | vseg_t * vseg ) |
---|
| 1145 | { |
---|
[21] | 1146 | vpn_t vpn; // VPN of current PTE |
---|
| 1147 | vpn_t vpn_min; // VPN of first PTE |
---|
[1] | 1148 | vpn_t vpn_max; // VPN of last PTE (excluded) |
---|
[409] | 1149 | ppn_t ppn; // current PTE ppn value |
---|
| 1150 | uint32_t attr; // current PTE attributes |
---|
| 1151 | kmem_req_t req; // request to release memory |
---|
| 1152 | xptr_t page_xp; // extended pointer on page descriptor |
---|
| 1153 | cxy_t page_cxy; // page descriptor cluster |
---|
| 1154 | page_t * page_ptr; // page descriptor pointer |
---|
[433] | 1155 | xptr_t forks_xp; // extended pointer on pending forks counter |
---|
[469] | 1156 | xptr_t lock_xp; // extended pointer on lock protecting forks counter |
---|
| 1157 | uint32_t forks; // actual number of pendinf forks |
---|
[1] | 1158 | |
---|
[438] | 1159 | #if DEBUG_VMM_UNMAP_VSEG |
---|
[595] | 1160 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
| 1161 | thread_t * this = CURRENT_THREAD; |
---|
[438] | 1162 | if( DEBUG_VMM_UNMAP_VSEG < cycle ) |
---|
[595] | 1163 | printk("\n[%s] thread[%x,%x] enter / process %x / vseg %s / base %x / cycle %d\n", |
---|
| 1164 | __FUNCTION__, this->process->pid, this->trdid, process->pid, |
---|
| 1165 | vseg_type_str( vseg->type ), vseg->vpn_base, cycle ); |
---|
[433] | 1166 | #endif |
---|
[409] | 1167 | |
---|
[433] | 1168 | // get pointer on local GPT |
---|
[1] | 1169 | gpt_t * gpt = &process->vmm.gpt; |
---|
| 1170 | |
---|
| 1171 | // loop on pages in vseg |
---|
| 1172 | vpn_min = vseg->vpn_base; |
---|
| 1173 | vpn_max = vpn_min + vseg->vpn_size; |
---|
| 1174 | for( vpn = vpn_min ; vpn < vpn_max ; vpn++ ) |
---|
| 1175 | { |
---|
[409] | 1176 | // get GPT entry |
---|
[585] | 1177 | hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn ); |
---|
[409] | 1178 | |
---|
| 1179 | if( attr & GPT_MAPPED ) // entry is mapped |
---|
| 1180 | { |
---|
[437] | 1181 | |
---|
[438] | 1182 | #if( DEBUG_VMM_UNMAP_VSEG & 1 ) |
---|
| 1183 | if( DEBUG_VMM_UNMAP_VSEG < cycle ) |
---|
[437] | 1184 | printk("- vpn %x / ppn %x\n" , vpn , ppn ); |
---|
| 1185 | #endif |
---|
| 1186 | |
---|
[567] | 1187 | // check small page |
---|
[585] | 1188 | assert( (attr & GPT_SMALL) , "an user vseg must use small pages" ); |
---|
[409] | 1189 | |
---|
[585] | 1190 | // unmap GPT entry in local GPT |
---|
[409] | 1191 | hal_gpt_reset_pte( gpt , vpn ); |
---|
| 1192 | |
---|
[433] | 1193 | // handle pending forks counter if |
---|
| 1194 | // 1) not identity mapped |
---|
[567] | 1195 | // 2) reference cluster |
---|
[433] | 1196 | if( ((vseg->flags & VSEG_IDENT) == 0) && |
---|
| 1197 | (GET_CXY( process->ref_xp ) == local_cxy) ) |
---|
[409] | 1198 | { |
---|
[433] | 1199 | // get extended pointer on physical page descriptor |
---|
[409] | 1200 | page_xp = ppm_ppn2page( ppn ); |
---|
| 1201 | page_cxy = GET_CXY( page_xp ); |
---|
[433] | 1202 | page_ptr = GET_PTR( page_xp ); |
---|
[409] | 1203 | |
---|
[469] | 1204 | // get extended pointers on forks and lock fields |
---|
| 1205 | forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
| 1206 | lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
[433] | 1207 | |
---|
| 1208 | // get pending forks counter |
---|
[567] | 1209 | forks = hal_remote_l32( forks_xp ); |
---|
[433] | 1210 | |
---|
[469] | 1211 | if( forks ) // decrement pending forks counter |
---|
[409] | 1212 | { |
---|
[433] | 1213 | hal_remote_atomic_add( forks_xp , -1 ); |
---|
| 1214 | } |
---|
| 1215 | else // release physical page to relevant cluster |
---|
[409] | 1216 | { |
---|
[433] | 1217 | if( page_cxy == local_cxy ) // local cluster |
---|
| 1218 | { |
---|
| 1219 | req.type = KMEM_PAGE; |
---|
| 1220 | req.ptr = page_ptr; |
---|
| 1221 | kmem_free( &req ); |
---|
| 1222 | } |
---|
| 1223 | else // remote cluster |
---|
| 1224 | { |
---|
| 1225 | rpc_pmem_release_pages_client( page_cxy , page_ptr ); |
---|
| 1226 | } |
---|
[409] | 1227 | } |
---|
| 1228 | } |
---|
| 1229 | } |
---|
[1] | 1230 | } |
---|
[433] | 1231 | |
---|
[438] | 1232 | #if DEBUG_VMM_UNMAP_VSEG |
---|
[433] | 1233 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 1234 | if( DEBUG_VMM_UNMAP_VSEG < cycle ) |
---|
[595] | 1235 | printk("\n[%s] thread[%x,%x] exit / process %x / vseg %s / base %x / cycle %d\n", |
---|
| 1236 | __FUNCTION__, this->process->pid, this->trdid, process->pid, |
---|
| 1237 | vseg_type_str( vseg->type ), vseg->vpn_base, cycle ); |
---|
[433] | 1238 | #endif |
---|
| 1239 | |
---|
[409] | 1240 | } // end vmm_unmap_vseg() |
---|
[1] | 1241 | |
---|
[407] | 1242 | ////////////////////////////////////////////////////////////////////////////////////////// |
---|
[440] | 1243 | // This low-level static function is called by the vmm_get_vseg(), vmm_get_pte(), |
---|
| 1244 | // and vmm_resize_vseg() functions. It scan the local VSL to find the unique vseg |
---|
| 1245 | // containing a given virtual address. |
---|
[407] | 1246 | ////////////////////////////////////////////////////////////////////////////////////////// |
---|
[406] | 1247 | // @ vmm : pointer on the process VMM. |
---|
| 1248 | // @ vaddr : virtual address. |
---|
| 1249 | // @ return vseg pointer if success / return NULL if not found. |
---|
[407] | 1250 | ////////////////////////////////////////////////////////////////////////////////////////// |
---|
[595] | 1251 | static vseg_t * vmm_vseg_from_vaddr( vmm_t * vmm, |
---|
| 1252 | intptr_t vaddr ) |
---|
[406] | 1253 | { |
---|
[408] | 1254 | xptr_t iter_xp; |
---|
| 1255 | xptr_t vseg_xp; |
---|
| 1256 | vseg_t * vseg; |
---|
[406] | 1257 | |
---|
[408] | 1258 | // get extended pointers on VSL lock and root |
---|
| 1259 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); |
---|
| 1260 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
[406] | 1261 | |
---|
[408] | 1262 | // get lock protecting the VSL |
---|
[567] | 1263 | remote_rwlock_rd_acquire( lock_xp ); |
---|
[408] | 1264 | |
---|
| 1265 | // scan the list of vsegs in VSL |
---|
| 1266 | XLIST_FOREACH( root_xp , iter_xp ) |
---|
[406] | 1267 | { |
---|
[408] | 1268 | vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
[433] | 1269 | vseg = GET_PTR( vseg_xp ); |
---|
[595] | 1270 | |
---|
[408] | 1271 | if( (vaddr >= vseg->min) && (vaddr < vseg->max) ) |
---|
[595] | 1272 | { |
---|
[408] | 1273 | // return success |
---|
[567] | 1274 | remote_rwlock_rd_release( lock_xp ); |
---|
[408] | 1275 | return vseg; |
---|
| 1276 | } |
---|
[406] | 1277 | } |
---|
| 1278 | |
---|
[408] | 1279 | // return failure |
---|
[567] | 1280 | remote_rwlock_rd_release( lock_xp ); |
---|
[408] | 1281 | return NULL; |
---|
[406] | 1282 | |
---|
[595] | 1283 | } // end vmm_vseg_from_vaddr() |
---|
[406] | 1284 | |
---|
[1] | 1285 | ///////////////////////////////////////////// |
---|
| 1286 | error_t vmm_resize_vseg( process_t * process, |
---|
| 1287 | intptr_t base, |
---|
| 1288 | intptr_t size ) |
---|
| 1289 | { |
---|
[406] | 1290 | error_t error; |
---|
| 1291 | vseg_t * new; |
---|
| 1292 | vpn_t vpn_min; |
---|
| 1293 | vpn_t vpn_max; |
---|
[1] | 1294 | |
---|
| 1295 | // get pointer on process VMM |
---|
| 1296 | vmm_t * vmm = &process->vmm; |
---|
| 1297 | |
---|
| 1298 | intptr_t addr_min = base; |
---|
| 1299 | intptr_t addr_max = base + size; |
---|
| 1300 | |
---|
| 1301 | // get pointer on vseg |
---|
[595] | 1302 | vseg_t * vseg = vmm_vseg_from_vaddr( vmm , base ); |
---|
[1] | 1303 | |
---|
| 1304 | if( vseg == NULL) return EINVAL; |
---|
[21] | 1305 | |
---|
[408] | 1306 | // get extended pointer on VSL lock |
---|
| 1307 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); |
---|
[21] | 1308 | |
---|
[408] | 1309 | // get lock protecting VSL |
---|
[567] | 1310 | remote_rwlock_wr_acquire( lock_xp ); |
---|
[408] | 1311 | |
---|
[1] | 1312 | if( (vseg->min > addr_min) || (vseg->max < addr_max) ) // region not included in vseg |
---|
| 1313 | { |
---|
| 1314 | error = EINVAL; |
---|
| 1315 | } |
---|
| 1316 | else if( (vseg->min == addr_min) && (vseg->max == addr_max) ) // vseg must be removed |
---|
| 1317 | { |
---|
| 1318 | vmm_remove_vseg( vseg ); |
---|
| 1319 | error = 0; |
---|
| 1320 | } |
---|
[406] | 1321 | else if( vseg->min == addr_min ) // vseg must be resized |
---|
[1] | 1322 | { |
---|
[406] | 1323 | // update vseg base address |
---|
| 1324 | vseg->min = addr_max; |
---|
| 1325 | |
---|
| 1326 | // update vpn_base and vpn_size |
---|
| 1327 | vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1328 | vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1329 | vseg->vpn_base = vpn_min; |
---|
| 1330 | vseg->vpn_size = vpn_max - vpn_min + 1; |
---|
| 1331 | error = 0; |
---|
[1] | 1332 | } |
---|
[406] | 1333 | else if( vseg->max == addr_max ) // vseg must be resized |
---|
[1] | 1334 | { |
---|
[406] | 1335 | // update vseg max address |
---|
| 1336 | vseg->max = addr_min; |
---|
| 1337 | |
---|
| 1338 | // update vpn_base and vpn_size |
---|
| 1339 | vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1340 | vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1341 | vseg->vpn_base = vpn_min; |
---|
| 1342 | vseg->vpn_size = vpn_max - vpn_min + 1; |
---|
| 1343 | error = 0; |
---|
[1] | 1344 | } |
---|
[406] | 1345 | else // vseg cut in three regions |
---|
[1] | 1346 | { |
---|
[406] | 1347 | // resize existing vseg |
---|
| 1348 | vseg->max = addr_min; |
---|
| 1349 | |
---|
| 1350 | // update vpn_base and vpn_size |
---|
| 1351 | vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1352 | vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1353 | vseg->vpn_base = vpn_min; |
---|
| 1354 | vseg->vpn_size = vpn_max - vpn_min + 1; |
---|
| 1355 | |
---|
| 1356 | // create new vseg |
---|
[407] | 1357 | new = vmm_create_vseg( process, |
---|
| 1358 | vseg->type, |
---|
| 1359 | addr_min, |
---|
| 1360 | (vseg->max - addr_max), |
---|
| 1361 | vseg->file_offset, |
---|
| 1362 | vseg->file_size, |
---|
| 1363 | vseg->mapper_xp, |
---|
| 1364 | vseg->cxy ); |
---|
| 1365 | |
---|
[406] | 1366 | if( new == NULL ) error = EINVAL; |
---|
| 1367 | else error = 0; |
---|
[1] | 1368 | } |
---|
| 1369 | |
---|
| 1370 | // release VMM lock |
---|
[567] | 1371 | remote_rwlock_wr_release( lock_xp ); |
---|
[1] | 1372 | |
---|
| 1373 | return error; |
---|
| 1374 | |
---|
[406] | 1375 | } // vmm_resize_vseg() |
---|
| 1376 | |
---|
[1] | 1377 | /////////////////////////////////////////// |
---|
[388] | 1378 | error_t vmm_get_vseg( process_t * process, |
---|
[394] | 1379 | intptr_t vaddr, |
---|
[388] | 1380 | vseg_t ** found_vseg ) |
---|
[1] | 1381 | { |
---|
[595] | 1382 | xptr_t vseg_xp; |
---|
| 1383 | vseg_t * vseg; |
---|
| 1384 | vmm_t * vmm; |
---|
| 1385 | error_t error; |
---|
[1] | 1386 | |
---|
[440] | 1387 | // get pointer on local VMM |
---|
| 1388 | vmm = &process->vmm; |
---|
[1] | 1389 | |
---|
[440] | 1390 | // try to get vseg from local VMM |
---|
[595] | 1391 | vseg = vmm_vseg_from_vaddr( vmm , vaddr ); |
---|
[440] | 1392 | |
---|
[388] | 1393 | if( vseg == NULL ) // vseg not found in local cluster => try to get it from ref |
---|
| 1394 | { |
---|
| 1395 | // get extended pointer on reference process |
---|
| 1396 | xptr_t ref_xp = process->ref_xp; |
---|
[1] | 1397 | |
---|
[388] | 1398 | // get cluster and local pointer on reference process |
---|
| 1399 | cxy_t ref_cxy = GET_CXY( ref_xp ); |
---|
[433] | 1400 | process_t * ref_ptr = GET_PTR( ref_xp ); |
---|
[388] | 1401 | |
---|
| 1402 | if( local_cxy == ref_cxy ) return -1; // local cluster is the reference |
---|
| 1403 | |
---|
| 1404 | // get extended pointer on reference vseg |
---|
[394] | 1405 | rpc_vmm_get_vseg_client( ref_cxy , ref_ptr , vaddr , &vseg_xp , &error ); |
---|
[388] | 1406 | |
---|
[440] | 1407 | if( error ) return -1; // vseg not found => illegal user vaddr |
---|
[388] | 1408 | |
---|
| 1409 | // allocate a vseg in local cluster |
---|
| 1410 | vseg = vseg_alloc(); |
---|
| 1411 | |
---|
[440] | 1412 | if( vseg == NULL ) return -1; // cannot allocate a local vseg |
---|
[388] | 1413 | |
---|
| 1414 | // initialise local vseg from reference |
---|
| 1415 | vseg_init_from_ref( vseg , vseg_xp ); |
---|
| 1416 | |
---|
| 1417 | // register local vseg in local VMM |
---|
[595] | 1418 | vmm_vseg_attach( vmm , vseg ); |
---|
[388] | 1419 | } |
---|
[595] | 1420 | |
---|
[388] | 1421 | // success |
---|
| 1422 | *found_vseg = vseg; |
---|
[394] | 1423 | return 0; |
---|
[388] | 1424 | |
---|
| 1425 | } // end vmm_get_vseg() |
---|
| 1426 | |
---|
[407] | 1427 | ////////////////////////////////////////////////////////////////////////////////////// |
---|
| 1428 | // This static function compute the target cluster to allocate a physical page |
---|
| 1429 | // for a given <vpn> in a given <vseg>, allocates the page (with an RPC if required) |
---|
| 1430 | // and returns an extended pointer on the allocated page descriptor. |
---|
[585] | 1431 | // It can be called by a thread running in any cluster. |
---|
[407] | 1432 | // The vseg cannot have the FILE type. |
---|
| 1433 | ////////////////////////////////////////////////////////////////////////////////////// |
---|
| 1434 | static xptr_t vmm_page_allocate( vseg_t * vseg, |
---|
| 1435 | vpn_t vpn ) |
---|
| 1436 | { |
---|
[433] | 1437 | |
---|
[438] | 1438 | #if DEBUG_VMM_ALLOCATE_PAGE |
---|
[595] | 1439 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
| 1440 | thread_t * this = CURRENT_THREAD; |
---|
[438] | 1441 | if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) |
---|
[595] | 1442 | printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", |
---|
| 1443 | __FUNCTION__ , this->process->pid, this->trdid, vpn, cycle ); |
---|
[433] | 1444 | #endif |
---|
| 1445 | |
---|
[407] | 1446 | // compute target cluster |
---|
| 1447 | page_t * page_ptr; |
---|
| 1448 | cxy_t page_cxy; |
---|
| 1449 | kmem_req_t req; |
---|
[577] | 1450 | uint32_t index; |
---|
[407] | 1451 | |
---|
[577] | 1452 | uint32_t type = vseg->type; |
---|
| 1453 | uint32_t flags = vseg->flags; |
---|
| 1454 | uint32_t x_size = LOCAL_CLUSTER->x_size; |
---|
| 1455 | uint32_t y_size = LOCAL_CLUSTER->y_size; |
---|
[407] | 1456 | |
---|
[567] | 1457 | // check vseg type |
---|
| 1458 | assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" ); |
---|
[407] | 1459 | |
---|
| 1460 | if( flags & VSEG_DISTRIB ) // distributed => cxy depends on vpn LSB |
---|
| 1461 | { |
---|
[577] | 1462 | index = vpn & ((x_size * y_size) - 1); |
---|
| 1463 | page_cxy = HAL_CXY_FROM_XY( (index / y_size) , (index % y_size) ); |
---|
[561] | 1464 | |
---|
[577] | 1465 | // If the cluster selected from VPN's LSBs is empty, we select one randomly |
---|
| 1466 | if ( cluster_is_active( page_cxy ) == false ) |
---|
| 1467 | { |
---|
| 1468 | page_cxy = cluster_random_select(); |
---|
[561] | 1469 | } |
---|
[407] | 1470 | } |
---|
| 1471 | else // other cases => cxy specified in vseg |
---|
| 1472 | { |
---|
[561] | 1473 | page_cxy = vseg->cxy; |
---|
[407] | 1474 | } |
---|
| 1475 | |
---|
| 1476 | // allocate a physical page from target cluster |
---|
| 1477 | if( page_cxy == local_cxy ) // target cluster is the local cluster |
---|
| 1478 | { |
---|
| 1479 | req.type = KMEM_PAGE; |
---|
| 1480 | req.size = 0; |
---|
| 1481 | req.flags = AF_NONE; |
---|
| 1482 | page_ptr = (page_t *)kmem_alloc( &req ); |
---|
| 1483 | } |
---|
| 1484 | else // target cluster is not the local cluster |
---|
| 1485 | { |
---|
| 1486 | rpc_pmem_get_pages_client( page_cxy , 0 , &page_ptr ); |
---|
| 1487 | } |
---|
| 1488 | |
---|
[438] | 1489 | #if DEBUG_VMM_ALLOCATE_PAGE |
---|
[595] | 1490 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 1491 | if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) |
---|
[595] | 1492 | printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n", |
---|
| 1493 | __FUNCTION__ , this->process->pid, this->trdid, vpn, |
---|
| 1494 | ppm_page2ppn( XPTR( page_cxy , page_ptr ) , cycle ); |
---|
[433] | 1495 | #endif |
---|
| 1496 | |
---|
[407] | 1497 | if( page_ptr == NULL ) return XPTR_NULL; |
---|
| 1498 | else return XPTR( page_cxy , page_ptr ); |
---|
| 1499 | |
---|
| 1500 | } // end vmm_page_allocate() |
---|
| 1501 | |
---|
[313] | 1502 | //////////////////////////////////////// |
---|
| 1503 | error_t vmm_get_one_ppn( vseg_t * vseg, |
---|
| 1504 | vpn_t vpn, |
---|
| 1505 | ppn_t * ppn ) |
---|
| 1506 | { |
---|
| 1507 | error_t error; |
---|
[407] | 1508 | xptr_t page_xp; // extended pointer on physical page descriptor |
---|
[606] | 1509 | uint32_t page_id; // missing page index in vseg mapper |
---|
[406] | 1510 | uint32_t type; // vseg type; |
---|
[313] | 1511 | |
---|
[406] | 1512 | type = vseg->type; |
---|
[606] | 1513 | page_id = vpn - vseg->vpn_base; |
---|
[313] | 1514 | |
---|
[438] | 1515 | #if DEBUG_VMM_GET_ONE_PPN |
---|
[595] | 1516 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
| 1517 | thread_t * this = CURRENT_THREAD; |
---|
| 1518 | if( DEBUG_VMM_GET_ONE_PPN < cycle ) |
---|
[606] | 1519 | printk("\n[%s] thread[%x,%x] enter for vpn %x / type %s / page_id %d / cycle %d\n", |
---|
| 1520 | __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle ); |
---|
[433] | 1521 | #endif |
---|
[313] | 1522 | |
---|
[406] | 1523 | // FILE type : get the physical page from the file mapper |
---|
[313] | 1524 | if( type == VSEG_TYPE_FILE ) |
---|
| 1525 | { |
---|
[406] | 1526 | // get extended pointer on mapper |
---|
[407] | 1527 | xptr_t mapper_xp = vseg->mapper_xp; |
---|
[313] | 1528 | |
---|
[567] | 1529 | assert( (mapper_xp != XPTR_NULL), |
---|
| 1530 | "mapper not defined for a FILE vseg\n" ); |
---|
[406] | 1531 | |
---|
[606] | 1532 | // get extended pointer on page descriptor |
---|
| 1533 | page_xp = mapper_remote_get_page( mapper_xp , page_id ); |
---|
[406] | 1534 | |
---|
[606] | 1535 | if ( page_xp == XPTR_NULL ) return EINVAL; |
---|
[313] | 1536 | } |
---|
| 1537 | |
---|
[406] | 1538 | // Other types : allocate a physical page from target cluster, |
---|
[407] | 1539 | // as defined by vseg type and vpn value |
---|
[313] | 1540 | else |
---|
| 1541 | { |
---|
[433] | 1542 | // allocate one physical page |
---|
[407] | 1543 | page_xp = vmm_page_allocate( vseg , vpn ); |
---|
[406] | 1544 | |
---|
[407] | 1545 | if( page_xp == XPTR_NULL ) return ENOMEM; |
---|
[313] | 1546 | |
---|
[406] | 1547 | // initialise missing page from .elf file mapper for DATA and CODE types |
---|
[440] | 1548 | // the vseg->mapper_xp field is an extended pointer on the .elf file mapper |
---|
[313] | 1549 | if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) ) |
---|
| 1550 | { |
---|
[406] | 1551 | // get extended pointer on mapper |
---|
| 1552 | xptr_t mapper_xp = vseg->mapper_xp; |
---|
[313] | 1553 | |
---|
[567] | 1554 | assert( (mapper_xp != XPTR_NULL), |
---|
| 1555 | "mapper not defined for a CODE or DATA vseg\n" ); |
---|
[406] | 1556 | |
---|
| 1557 | // compute missing page offset in vseg |
---|
[606] | 1558 | uint32_t offset = page_id << CONFIG_PPM_PAGE_SHIFT; |
---|
[406] | 1559 | |
---|
[313] | 1560 | // compute missing page offset in .elf file |
---|
[406] | 1561 | uint32_t elf_offset = vseg->file_offset + offset; |
---|
[313] | 1562 | |
---|
[438] | 1563 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
[469] | 1564 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
[595] | 1565 | printk("\n[%s] thread[%x,%x] for vpn = %x / elf_offset = %x\n", |
---|
| 1566 | __FUNCTION__, this->process->pid, this->trdid, vpn, elf_offset ); |
---|
[433] | 1567 | #endif |
---|
[406] | 1568 | // compute extended pointer on page base |
---|
[407] | 1569 | xptr_t base_xp = ppm_page2base( page_xp ); |
---|
[313] | 1570 | |
---|
[406] | 1571 | // file_size (in .elf mapper) can be smaller than vseg_size (BSS) |
---|
| 1572 | uint32_t file_size = vseg->file_size; |
---|
| 1573 | |
---|
| 1574 | if( file_size < offset ) // missing page fully in BSS |
---|
[313] | 1575 | { |
---|
[406] | 1576 | |
---|
[438] | 1577 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
[469] | 1578 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
[595] | 1579 | printk("\n[%s] thread[%x,%x] for vpn %x / fully in BSS\n", |
---|
| 1580 | __FUNCTION__, this->process->pid, this->trdid, vpn ); |
---|
[433] | 1581 | #endif |
---|
[407] | 1582 | if( GET_CXY( page_xp ) == local_cxy ) |
---|
[313] | 1583 | { |
---|
[315] | 1584 | memset( GET_PTR( base_xp ) , 0 , CONFIG_PPM_PAGE_SIZE ); |
---|
[313] | 1585 | } |
---|
| 1586 | else |
---|
| 1587 | { |
---|
[315] | 1588 | hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE ); |
---|
[313] | 1589 | } |
---|
| 1590 | } |
---|
[406] | 1591 | else if( file_size >= (offset + CONFIG_PPM_PAGE_SIZE) ) // fully in mapper |
---|
[315] | 1592 | { |
---|
[406] | 1593 | |
---|
[438] | 1594 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
[469] | 1595 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
[595] | 1596 | printk("\n[%s] thread[%x,%x] for vpn %x / fully in mapper\n", |
---|
| 1597 | __FUNCTION__, this->process->pid, this->trdid, vpn ); |
---|
[433] | 1598 | #endif |
---|
[606] | 1599 | error = mapper_move_kernel( mapper_xp, |
---|
| 1600 | true, // to_buffer |
---|
| 1601 | elf_offset, |
---|
| 1602 | base_xp, |
---|
| 1603 | CONFIG_PPM_PAGE_SIZE ); |
---|
[313] | 1604 | if( error ) return EINVAL; |
---|
| 1605 | } |
---|
[406] | 1606 | else // both in mapper and in BSS : |
---|
| 1607 | // - (file_size - offset) bytes from mapper |
---|
| 1608 | // - (page_size + offset - file_size) bytes from BSS |
---|
[313] | 1609 | { |
---|
[406] | 1610 | |
---|
[438] | 1611 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
[469] | 1612 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
[595] | 1613 | printk("\n[%s] thread[%x,%x] for vpn %x / both mapper & BSS\n", |
---|
[433] | 1614 | " %d bytes from mapper / %d bytes from BSS\n", |
---|
[595] | 1615 | __FUNCTION__, this->process->pid, this->trdid, vpn, |
---|
[407] | 1616 | file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size ); |
---|
[433] | 1617 | #endif |
---|
[313] | 1618 | // initialize mapper part |
---|
[606] | 1619 | error = mapper_move_kernel( mapper_xp, |
---|
| 1620 | true, // to buffer |
---|
| 1621 | elf_offset, |
---|
| 1622 | base_xp, |
---|
| 1623 | file_size - offset ); |
---|
[313] | 1624 | if( error ) return EINVAL; |
---|
| 1625 | |
---|
| 1626 | // initialize BSS part |
---|
[407] | 1627 | if( GET_CXY( page_xp ) == local_cxy ) |
---|
[313] | 1628 | { |
---|
[406] | 1629 | memset( GET_PTR( base_xp ) + file_size - offset , 0 , |
---|
| 1630 | offset + CONFIG_PPM_PAGE_SIZE - file_size ); |
---|
[313] | 1631 | } |
---|
| 1632 | else |
---|
| 1633 | { |
---|
[406] | 1634 | hal_remote_memset( base_xp + file_size - offset , 0 , |
---|
| 1635 | offset + CONFIG_PPM_PAGE_SIZE - file_size ); |
---|
[313] | 1636 | } |
---|
| 1637 | } |
---|
| 1638 | } // end initialisation for CODE or DATA types |
---|
| 1639 | } |
---|
| 1640 | |
---|
| 1641 | // return ppn |
---|
[407] | 1642 | *ppn = ppm_page2ppn( page_xp ); |
---|
[406] | 1643 | |
---|
[438] | 1644 | #if DEBUG_VMM_GET_ONE_PPN |
---|
[595] | 1645 | cycle = (uint32_t)hal_get_cycles(); |
---|
| 1646 | if( DEBUG_VMM_GET_ONE_PPN < cycle ) |
---|
| 1647 | printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle\n", |
---|
| 1648 | __FUNCTION__ , this->process->pid, this->trdid , vpn , *ppn, cycle ); |
---|
[433] | 1649 | #endif |
---|
[406] | 1650 | |
---|
[313] | 1651 | return 0; |
---|
| 1652 | |
---|
| 1653 | } // end vmm_get_one_ppn() |
---|
| 1654 | |
---|
[585] | 1655 | /////////////////////////////////////////////////// |
---|
| 1656 | error_t vmm_handle_page_fault( process_t * process, |
---|
| 1657 | vpn_t vpn ) |
---|
[1] | 1658 | { |
---|
[585] | 1659 | vseg_t * vseg; // vseg containing vpn |
---|
| 1660 | uint32_t new_attr; // new PTE_ATTR value |
---|
| 1661 | ppn_t new_ppn; // new PTE_PPN value |
---|
| 1662 | uint32_t ref_attr; // PTE_ATTR value in reference GPT |
---|
| 1663 | ppn_t ref_ppn; // PTE_PPN value in reference GPT |
---|
| 1664 | cxy_t ref_cxy; // reference cluster for missing vpn |
---|
| 1665 | process_t * ref_ptr; // reference process for missing vpn |
---|
| 1666 | xptr_t local_gpt_xp; // extended pointer on local GPT |
---|
| 1667 | xptr_t local_lock_xp; // extended pointer on local GPT lock |
---|
| 1668 | xptr_t ref_gpt_xp; // extended pointer on reference GPT |
---|
| 1669 | xptr_t ref_lock_xp; // extended pointer on reference GPT lock |
---|
| 1670 | error_t error; // value returned by called functions |
---|
[1] | 1671 | |
---|
[585] | 1672 | // get local vseg (access to reference VSL can be required) |
---|
| 1673 | error = vmm_get_vseg( process, |
---|
| 1674 | (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT, |
---|
| 1675 | &vseg ); |
---|
[441] | 1676 | |
---|
[585] | 1677 | if( error ) |
---|
| 1678 | { |
---|
[595] | 1679 | printk("\n[ERROR] in %s : vpn %x in process %x not in a registered vseg\n", |
---|
[585] | 1680 | __FUNCTION__ , vpn , process->pid ); |
---|
| 1681 | |
---|
| 1682 | return EXCP_USER_ERROR; |
---|
| 1683 | } |
---|
| 1684 | |
---|
| 1685 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
| 1686 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[567] | 1687 | thread_t * this = CURRENT_THREAD; |
---|
[585] | 1688 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
[595] | 1689 | printk("\n[%s] threadr[%x,%x] enter for vpn %x / %s / cycle %d\n", |
---|
[585] | 1690 | __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(vseg->type), cycle ); |
---|
[433] | 1691 | #endif |
---|
[406] | 1692 | |
---|
[585] | 1693 | //////////////// private vseg => access only the local GPT |
---|
| 1694 | if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) |
---|
[438] | 1695 | { |
---|
[585] | 1696 | // build extended pointer on local GPT and local GPT lock |
---|
| 1697 | local_gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); |
---|
| 1698 | local_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); |
---|
[407] | 1699 | |
---|
[585] | 1700 | // take local GPT lock in write mode |
---|
| 1701 | remote_rwlock_wr_acquire( local_lock_xp ); |
---|
[407] | 1702 | |
---|
[585] | 1703 | // check VPN still unmapped in local GPT |
---|
[595] | 1704 | |
---|
[585] | 1705 | // do nothing if VPN has been mapped by a a concurrent page_fault |
---|
| 1706 | hal_gpt_get_pte( local_gpt_xp, |
---|
| 1707 | vpn, |
---|
| 1708 | &new_attr, |
---|
| 1709 | &new_ppn ); |
---|
[407] | 1710 | |
---|
[585] | 1711 | if( (new_attr & GPT_MAPPED) == 0 ) // VPN still unmapped |
---|
| 1712 | { |
---|
| 1713 | // allocate and initialise a physical page depending on the vseg type |
---|
| 1714 | error = vmm_get_one_ppn( vseg , vpn , &new_ppn ); |
---|
[407] | 1715 | |
---|
[585] | 1716 | if( error ) |
---|
[408] | 1717 | { |
---|
| 1718 | printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", |
---|
| 1719 | __FUNCTION__ , process->pid , vpn ); |
---|
[1] | 1720 | |
---|
[585] | 1721 | // release local GPT lock in write mode |
---|
| 1722 | remote_rwlock_wr_release( local_lock_xp ); |
---|
[406] | 1723 | |
---|
[585] | 1724 | return EXCP_KERNEL_PANIC; |
---|
[407] | 1725 | } |
---|
| 1726 | |
---|
[408] | 1727 | // define new_attr from vseg flags |
---|
[407] | 1728 | new_attr = GPT_MAPPED | GPT_SMALL; |
---|
| 1729 | if( vseg->flags & VSEG_USER ) new_attr |= GPT_USER; |
---|
| 1730 | if( vseg->flags & VSEG_WRITE ) new_attr |= GPT_WRITABLE; |
---|
| 1731 | if( vseg->flags & VSEG_EXEC ) new_attr |= GPT_EXECUTABLE; |
---|
| 1732 | if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE; |
---|
| 1733 | |
---|
[585] | 1734 | // set PTE (PPN & attribute) to local GPT |
---|
| 1735 | error = hal_gpt_set_pte( local_gpt_xp, |
---|
[408] | 1736 | vpn, |
---|
| 1737 | new_attr, |
---|
| 1738 | new_ppn ); |
---|
[585] | 1739 | if ( error ) |
---|
[407] | 1740 | { |
---|
[585] | 1741 | printk("\n[ERROR] in %s : cannot update local GPT / process %x / vpn = %x\n", |
---|
[407] | 1742 | __FUNCTION__ , process->pid , vpn ); |
---|
[585] | 1743 | |
---|
| 1744 | // release local GPT lock in write mode |
---|
| 1745 | remote_rwlock_wr_release( local_lock_xp ); |
---|
| 1746 | |
---|
| 1747 | return EXCP_KERNEL_PANIC; |
---|
[407] | 1748 | } |
---|
| 1749 | } |
---|
[585] | 1750 | |
---|
| 1751 | // release local GPT lock in write mode |
---|
| 1752 | remote_rwlock_wr_release( local_lock_xp ); |
---|
| 1753 | |
---|
| 1754 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
| 1755 | cycle = (uint32_t)hal_get_cycles(); |
---|
| 1756 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
[595] | 1757 | printk("\n[%s] private page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", |
---|
[585] | 1758 | __FUNCTION__, vpn, new_ppn, new_attr, cycle ); |
---|
| 1759 | #endif |
---|
| 1760 | return EXCP_NON_FATAL; |
---|
| 1761 | |
---|
| 1762 | } // end local GPT access |
---|
| 1763 | |
---|
| 1764 | //////////// public vseg => access reference GPT |
---|
| 1765 | else |
---|
| 1766 | { |
---|
| 1767 | // get reference process cluster and local pointer |
---|
| 1768 | ref_cxy = GET_CXY( process->ref_xp ); |
---|
| 1769 | ref_ptr = GET_PTR( process->ref_xp ); |
---|
| 1770 | |
---|
| 1771 | // build extended pointer on reference GPT and reference GPT lock |
---|
| 1772 | ref_gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt ); |
---|
| 1773 | ref_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt_lock ); |
---|
| 1774 | |
---|
| 1775 | // build extended pointer on local GPT and local GPT lock |
---|
| 1776 | local_gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); |
---|
| 1777 | local_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); |
---|
| 1778 | |
---|
| 1779 | // take reference GPT lock in read mode |
---|
| 1780 | remote_rwlock_rd_acquire( ref_lock_xp ); |
---|
| 1781 | |
---|
| 1782 | // get directly PPN & attributes from reference GPT |
---|
| 1783 | // this can avoids a costly RPC for a false page fault |
---|
| 1784 | hal_gpt_get_pte( ref_gpt_xp, |
---|
| 1785 | vpn, |
---|
| 1786 | &ref_attr, |
---|
| 1787 | &ref_ppn ); |
---|
| 1788 | |
---|
| 1789 | // release reference GPT lock in read mode |
---|
| 1790 | remote_rwlock_rd_release( ref_lock_xp ); |
---|
| 1791 | |
---|
| 1792 | if( ref_attr & GPT_MAPPED ) // false page fault => update local GPT |
---|
[1] | 1793 | { |
---|
[585] | 1794 | // take local GPT lock in write mode |
---|
| 1795 | remote_rwlock_wr_acquire( local_lock_xp ); |
---|
| 1796 | |
---|
| 1797 | // check VPN still unmapped in local GPT |
---|
| 1798 | hal_gpt_get_pte( local_gpt_xp, |
---|
| 1799 | vpn, |
---|
| 1800 | &new_attr, |
---|
| 1801 | &new_ppn ); |
---|
[1] | 1802 | |
---|
[585] | 1803 | if( (new_attr & GPT_MAPPED) == 0 ) // VPN still unmapped |
---|
| 1804 | { |
---|
| 1805 | // update local GPT from reference GPT |
---|
| 1806 | error = hal_gpt_set_pte( local_gpt_xp, |
---|
| 1807 | vpn, |
---|
| 1808 | ref_attr, |
---|
| 1809 | ref_ppn ); |
---|
| 1810 | if( error ) |
---|
| 1811 | { |
---|
[595] | 1812 | printk("\n[ERROR] in %s : cannot update local GPT / process %x / vpn %x\n", |
---|
[585] | 1813 | __FUNCTION__ , process->pid , vpn ); |
---|
| 1814 | |
---|
| 1815 | // release local GPT lock in write mode |
---|
| 1816 | remote_rwlock_wr_release( local_lock_xp ); |
---|
| 1817 | |
---|
| 1818 | return EXCP_KERNEL_PANIC; |
---|
| 1819 | } |
---|
| 1820 | } |
---|
| 1821 | else // VPN has been mapped by a a concurrent page_fault |
---|
| 1822 | { |
---|
| 1823 | // keep PTE from local GPT |
---|
| 1824 | ref_attr = new_attr; |
---|
| 1825 | ref_ppn = new_ppn; |
---|
| 1826 | } |
---|
| 1827 | |
---|
| 1828 | // release local GPT lock in write mode |
---|
| 1829 | remote_rwlock_wr_release( local_lock_xp ); |
---|
| 1830 | |
---|
| 1831 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
[433] | 1832 | cycle = (uint32_t)hal_get_cycles(); |
---|
[585] | 1833 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
[595] | 1834 | printk("\n[%s] false page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", |
---|
[585] | 1835 | __FUNCTION__, vpn, ref_ppn, ref_attr, cycle ); |
---|
[433] | 1836 | #endif |
---|
[585] | 1837 | return EXCP_NON_FATAL; |
---|
| 1838 | } |
---|
| 1839 | else // true page fault => update reference GPT |
---|
| 1840 | { |
---|
| 1841 | // take reference GPT lock in write mode |
---|
| 1842 | remote_rwlock_wr_acquire( ref_lock_xp ); |
---|
| 1843 | |
---|
| 1844 | // check VPN still unmapped in reference GPT |
---|
| 1845 | // do nothing if VPN has been mapped by a a concurrent page_fault |
---|
| 1846 | hal_gpt_get_pte( ref_gpt_xp, |
---|
| 1847 | vpn, |
---|
| 1848 | &ref_attr, |
---|
| 1849 | &ref_ppn ); |
---|
[406] | 1850 | |
---|
[585] | 1851 | if( (ref_attr & GPT_MAPPED) == 0 ) // VPN actually unmapped |
---|
| 1852 | { |
---|
| 1853 | // allocate and initialise a physical page depending on the vseg type |
---|
| 1854 | error = vmm_get_one_ppn( vseg , vpn , &new_ppn ); |
---|
[1] | 1855 | |
---|
[585] | 1856 | if( error ) |
---|
| 1857 | { |
---|
| 1858 | printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", |
---|
| 1859 | __FUNCTION__ , process->pid , vpn ); |
---|
[313] | 1860 | |
---|
[585] | 1861 | // release reference GPT lock in write mode |
---|
| 1862 | remote_rwlock_wr_release( ref_lock_xp ); |
---|
| 1863 | |
---|
| 1864 | return EXCP_KERNEL_PANIC; |
---|
| 1865 | } |
---|
[1] | 1866 | |
---|
[585] | 1867 | // define new_attr from vseg flags |
---|
| 1868 | new_attr = GPT_MAPPED | GPT_SMALL; |
---|
| 1869 | if( vseg->flags & VSEG_USER ) new_attr |= GPT_USER; |
---|
| 1870 | if( vseg->flags & VSEG_WRITE ) new_attr |= GPT_WRITABLE; |
---|
| 1871 | if( vseg->flags & VSEG_EXEC ) new_attr |= GPT_EXECUTABLE; |
---|
| 1872 | if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE; |
---|
[440] | 1873 | |
---|
[585] | 1874 | // update reference GPT |
---|
| 1875 | error = hal_gpt_set_pte( ref_gpt_xp, |
---|
| 1876 | vpn, |
---|
| 1877 | new_attr, |
---|
| 1878 | new_ppn ); |
---|
| 1879 | |
---|
| 1880 | // update local GPT (protected by reference GPT lock) |
---|
| 1881 | error |= hal_gpt_set_pte( local_gpt_xp, |
---|
| 1882 | vpn, |
---|
| 1883 | new_attr, |
---|
| 1884 | new_ppn ); |
---|
| 1885 | |
---|
| 1886 | if( error ) |
---|
| 1887 | { |
---|
| 1888 | printk("\n[ERROR] in %s : cannot update GPT / process %x / vpn = %x\n", |
---|
| 1889 | __FUNCTION__ , process->pid , vpn ); |
---|
| 1890 | |
---|
| 1891 | // release reference GPT lock in write mode |
---|
| 1892 | remote_rwlock_wr_release( ref_lock_xp ); |
---|
| 1893 | |
---|
| 1894 | return EXCP_KERNEL_PANIC; |
---|
| 1895 | } |
---|
| 1896 | } |
---|
| 1897 | |
---|
| 1898 | // release reference GPT lock in write mode |
---|
| 1899 | remote_rwlock_wr_release( ref_lock_xp ); |
---|
| 1900 | |
---|
[440] | 1901 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
[585] | 1902 | cycle = (uint32_t)hal_get_cycles(); |
---|
[469] | 1903 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
[595] | 1904 | printk("\n[%s] true page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", |
---|
[585] | 1905 | __FUNCTION__, vpn, new_ppn, new_attr, cycle ); |
---|
[435] | 1906 | #endif |
---|
[585] | 1907 | return EXCP_NON_FATAL; |
---|
| 1908 | } |
---|
| 1909 | } |
---|
| 1910 | } // end vmm_handle_page_fault() |
---|
[435] | 1911 | |
---|
[585] | 1912 | //////////////////////////////////////////// |
---|
| 1913 | error_t vmm_handle_cow( process_t * process, |
---|
| 1914 | vpn_t vpn ) |
---|
| 1915 | { |
---|
| 1916 | vseg_t * vseg; // vseg containing vpn |
---|
| 1917 | cxy_t ref_cxy; // reference cluster for missing vpn |
---|
| 1918 | process_t * ref_ptr; // reference process for missing vpn |
---|
| 1919 | xptr_t gpt_xp; // extended pointer on GPT |
---|
| 1920 | xptr_t gpt_lock_xp; // extended pointer on GPT lock |
---|
| 1921 | uint32_t old_attr; // current PTE_ATTR value |
---|
| 1922 | ppn_t old_ppn; // current PTE_PPN value |
---|
| 1923 | uint32_t new_attr; // new PTE_ATTR value |
---|
| 1924 | ppn_t new_ppn; // new PTE_PPN value |
---|
| 1925 | error_t error; |
---|
[1] | 1926 | |
---|
[585] | 1927 | #if DEBUG_VMM_HANDLE_COW |
---|
| 1928 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
| 1929 | thread_t * this = CURRENT_THREAD; |
---|
| 1930 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
[595] | 1931 | printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n", |
---|
[585] | 1932 | __FUNCTION__, process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); |
---|
| 1933 | #endif |
---|
| 1934 | |
---|
| 1935 | // get local vseg |
---|
| 1936 | error = vmm_get_vseg( process, |
---|
| 1937 | (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT, |
---|
| 1938 | &vseg ); |
---|
| 1939 | |
---|
[440] | 1940 | if( error ) |
---|
[1] | 1941 | { |
---|
[595] | 1942 | printk("\n[PANIC] in %s : vpn %x in process %x not in a registered vseg\n", |
---|
[585] | 1943 | __FUNCTION__, vpn, process->pid ); |
---|
| 1944 | |
---|
| 1945 | return EXCP_KERNEL_PANIC; |
---|
[440] | 1946 | } |
---|
[407] | 1947 | |
---|
[585] | 1948 | // get reference GPT cluster and local pointer |
---|
| 1949 | ref_cxy = GET_CXY( process->ref_xp ); |
---|
| 1950 | ref_ptr = GET_PTR( process->ref_xp ); |
---|
[407] | 1951 | |
---|
[585] | 1952 | // build relevant extended pointers on GPT and GPT lock |
---|
| 1953 | // - access local GPT for a private vseg |
---|
| 1954 | // - access reference GPT for a public vseg |
---|
| 1955 | if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) |
---|
[440] | 1956 | { |
---|
[585] | 1957 | gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); |
---|
| 1958 | gpt_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); |
---|
[1] | 1959 | } |
---|
[440] | 1960 | else |
---|
[1] | 1961 | { |
---|
[585] | 1962 | gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt ); |
---|
| 1963 | gpt_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt_lock ); |
---|
[1] | 1964 | } |
---|
| 1965 | |
---|
[585] | 1966 | // take GPT lock in write mode |
---|
| 1967 | remote_rwlock_wr_acquire( gpt_lock_xp ); |
---|
[441] | 1968 | |
---|
[585] | 1969 | // get current PTE from reference GPT |
---|
| 1970 | hal_gpt_get_pte( gpt_xp, |
---|
| 1971 | vpn, |
---|
| 1972 | &old_attr, |
---|
| 1973 | &old_ppn ); |
---|
[441] | 1974 | |
---|
[585] | 1975 | // the PTE must be mapped for a COW |
---|
| 1976 | if( (old_attr & GPT_MAPPED) == 0 ) |
---|
| 1977 | { |
---|
| 1978 | printk("\n[PANIC] in %s : VPN %x in process %x unmapped\n", |
---|
| 1979 | __FUNCTION__, vpn, process->pid ); |
---|
[407] | 1980 | |
---|
[585] | 1981 | // release GPT lock in write mode |
---|
| 1982 | remote_rwlock_wr_acquire( gpt_lock_xp ); |
---|
[407] | 1983 | |
---|
[585] | 1984 | return EXCP_KERNEL_PANIC; |
---|
[407] | 1985 | } |
---|
| 1986 | |
---|
[585] | 1987 | // get extended pointer, cluster and local pointer on physical page descriptor |
---|
| 1988 | xptr_t page_xp = ppm_ppn2page( old_ppn ); |
---|
| 1989 | cxy_t page_cxy = GET_CXY( page_xp ); |
---|
| 1990 | page_t * page_ptr = GET_PTR( page_xp ); |
---|
[435] | 1991 | |
---|
[585] | 1992 | // get extended pointers on forks and lock field in page descriptor |
---|
| 1993 | xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
| 1994 | xptr_t forks_lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
[407] | 1995 | |
---|
[585] | 1996 | // take lock protecting "forks" counter |
---|
| 1997 | remote_busylock_acquire( forks_lock_xp ); |
---|
[407] | 1998 | |
---|
[585] | 1999 | // get number of pending forks from page descriptor |
---|
| 2000 | uint32_t forks = hal_remote_l32( forks_xp ); |
---|
[441] | 2001 | |
---|
[585] | 2002 | if( forks ) // pending fork => allocate a new page, and copy old to new |
---|
| 2003 | { |
---|
| 2004 | // allocate a new physical page |
---|
| 2005 | page_xp = vmm_page_allocate( vseg , vpn ); |
---|
| 2006 | if( page_xp == XPTR_NULL ) |
---|
| 2007 | { |
---|
| 2008 | printk("\n[PANIC] in %s : no memory for vpn %x in process %x\n", |
---|
| 2009 | __FUNCTION__ , vpn, process->pid ); |
---|
[441] | 2010 | |
---|
[585] | 2011 | // release GPT lock in write mode |
---|
| 2012 | remote_rwlock_wr_acquire( gpt_lock_xp ); |
---|
[441] | 2013 | |
---|
[585] | 2014 | // release lock protecting "forks" counter |
---|
| 2015 | remote_busylock_release( forks_lock_xp ); |
---|
[441] | 2016 | |
---|
[585] | 2017 | return EXCP_KERNEL_PANIC; |
---|
| 2018 | } |
---|
[441] | 2019 | |
---|
[585] | 2020 | // compute allocated page PPN |
---|
| 2021 | new_ppn = ppm_page2ppn( page_xp ); |
---|
[441] | 2022 | |
---|
[585] | 2023 | // copy old page content to new page |
---|
| 2024 | xptr_t old_base_xp = ppm_ppn2base( old_ppn ); |
---|
| 2025 | xptr_t new_base_xp = ppm_ppn2base( new_ppn ); |
---|
| 2026 | memcpy( GET_PTR( new_base_xp ), |
---|
| 2027 | GET_PTR( old_base_xp ), |
---|
| 2028 | CONFIG_PPM_PAGE_SIZE ); |
---|
[441] | 2029 | |
---|
[585] | 2030 | // decrement pending forks counter in page descriptor |
---|
| 2031 | hal_remote_atomic_add( forks_xp , -1 ); |
---|
[441] | 2032 | |
---|
[585] | 2033 | #if(DEBUG_VMM_HANDLE_COW & 1) |
---|
| 2034 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
[595] | 2035 | printk("\n[%s] thread[%x,%x] : pending forks => allocate a new PPN %x\n", |
---|
[585] | 2036 | __FUNCTION__, process->pid, this->trdid, new_ppn ); |
---|
| 2037 | #endif |
---|
[440] | 2038 | |
---|
[585] | 2039 | } |
---|
| 2040 | else // no pending fork => keep the existing page |
---|
| 2041 | { |
---|
[1] | 2042 | |
---|
[585] | 2043 | #if(DEBUG_VMM_HANDLE_COW & 1) |
---|
| 2044 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
[595] | 2045 | printk("\n[%s] thread[%x,%x] no pending forks => keep existing PPN %x\n", |
---|
[585] | 2046 | __FUNCTION__, process->pid, this->trdid, new_ppn ); |
---|
| 2047 | #endif |
---|
| 2048 | new_ppn = old_ppn; |
---|
| 2049 | } |
---|
[1] | 2050 | |
---|
[585] | 2051 | // release lock protecting "forks" counter |
---|
| 2052 | remote_busylock_release( forks_lock_xp ); |
---|
| 2053 | |
---|
| 2054 | // build new_attr : reset COW and set WRITABLE, |
---|
| 2055 | new_attr = (old_attr | GPT_WRITABLE) & (~GPT_COW); |
---|
| 2056 | |
---|
| 2057 | // update the relevan GPT |
---|
| 2058 | // - private vseg => update local GPT |
---|
| 2059 | // - public vseg => update all GPT copies |
---|
| 2060 | if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) |
---|
[1] | 2061 | { |
---|
[585] | 2062 | hal_gpt_set_pte( gpt_xp, |
---|
| 2063 | vpn, |
---|
| 2064 | new_attr, |
---|
| 2065 | new_ppn ); |
---|
[1] | 2066 | } |
---|
[585] | 2067 | else |
---|
[1] | 2068 | { |
---|
[585] | 2069 | if( ref_cxy == local_cxy ) // reference cluster is local |
---|
| 2070 | { |
---|
| 2071 | vmm_global_update_pte( process, |
---|
| 2072 | vpn, |
---|
| 2073 | new_attr, |
---|
| 2074 | new_ppn ); |
---|
| 2075 | } |
---|
| 2076 | else // reference cluster is remote |
---|
| 2077 | { |
---|
| 2078 | rpc_vmm_global_update_pte_client( ref_cxy, |
---|
| 2079 | ref_ptr, |
---|
| 2080 | vpn, |
---|
| 2081 | new_attr, |
---|
| 2082 | new_ppn ); |
---|
| 2083 | } |
---|
[1] | 2084 | } |
---|
| 2085 | |
---|
[585] | 2086 | // release GPT lock in write mode |
---|
| 2087 | remote_rwlock_wr_release( gpt_lock_xp ); |
---|
[21] | 2088 | |
---|
[585] | 2089 | #if DEBUG_VMM_HANDLE_COW |
---|
| 2090 | cycle = (uint32_t)hal_get_cycles(); |
---|
| 2091 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
[595] | 2092 | printk("\n[%s] thread[%x,%x] exit for vpn %x / core[%x,%d] / cycle %d\n", |
---|
[585] | 2093 | __FUNCTION__, process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); |
---|
| 2094 | #endif |
---|
[313] | 2095 | |
---|
[585] | 2096 | return EXCP_NON_FATAL; |
---|
[1] | 2097 | |
---|
[585] | 2098 | } // end vmm_handle_cow() |
---|
| 2099 | |
---|