[1] | 1 | /* |
---|
[611] | 2 | * vmm.c - virtual memory manager related operations definition. |
---|
[1] | 3 | * |
---|
| 4 | * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) |
---|
| 5 | * Mohamed Lamine Karaoui (2015) |
---|
[595] | 6 | * Alain Greiner (2016,2017,2018) |
---|
[21] | 7 | * |
---|
[1] | 8 | * Copyright (c) UPMC Sorbonne Universites |
---|
| 9 | * |
---|
| 10 | * This file is part of ALMOS-MKH. |
---|
| 11 | * |
---|
| 12 | * ALMOS-MKH is free software; you can redistribute it and/or modify it |
---|
| 13 | * under the terms of the GNU General Public License as published by |
---|
| 14 | * the Free Software Foundation; version 2.0 of the License. |
---|
| 15 | * |
---|
| 16 | * ALMOS-MKH is distributed in the hope that it will be useful, but |
---|
| 17 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
| 19 | * General Public License for more details. |
---|
| 20 | * |
---|
| 21 | * You should have received a copy of the GNU General Public License |
---|
| 22 | * along with ALMOS-MKH; if not, write to the Free Software Foundation, |
---|
| 23 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
| 24 | */ |
---|
| 25 | |
---|
[14] | 26 | #include <kernel_config.h> |
---|
[457] | 27 | #include <hal_kernel_types.h> |
---|
[1] | 28 | #include <hal_special.h> |
---|
| 29 | #include <hal_gpt.h> |
---|
[409] | 30 | #include <hal_vmm.h> |
---|
[577] | 31 | #include <hal_macros.h> |
---|
[1] | 32 | #include <printk.h> |
---|
[23] | 33 | #include <memcpy.h> |
---|
[567] | 34 | #include <remote_rwlock.h> |
---|
| 35 | #include <remote_queuelock.h> |
---|
[1] | 36 | #include <list.h> |
---|
[408] | 37 | #include <xlist.h> |
---|
[1] | 38 | #include <bits.h> |
---|
| 39 | #include <process.h> |
---|
| 40 | #include <thread.h> |
---|
| 41 | #include <vseg.h> |
---|
| 42 | #include <cluster.h> |
---|
| 43 | #include <scheduler.h> |
---|
| 44 | #include <vfs.h> |
---|
| 45 | #include <mapper.h> |
---|
| 46 | #include <page.h> |
---|
| 47 | #include <kmem.h> |
---|
| 48 | #include <vmm.h> |
---|
[585] | 49 | #include <hal_exception.h> |
---|
[1] | 50 | |
---|
| 51 | ////////////////////////////////////////////////////////////////////////////////// |
---|
| 52 | // Extern global variables |
---|
| 53 | ////////////////////////////////////////////////////////////////////////////////// |
---|
| 54 | |
---|
[567] | 55 | extern process_t process_zero; // allocated in cluster.c |
---|
[1] | 56 | |
---|
[415] | 57 | /////////////////////////////////////// |
---|
| 58 | error_t vmm_init( process_t * process ) |
---|
[21] | 59 | { |
---|
[1] | 60 | error_t error; |
---|
| 61 | vseg_t * vseg_kentry; |
---|
| 62 | vseg_t * vseg_args; |
---|
| 63 | vseg_t * vseg_envs; |
---|
| 64 | intptr_t base; |
---|
| 65 | intptr_t size; |
---|
| 66 | |
---|
[438] | 67 | #if DEBUG_VMM_INIT |
---|
[567] | 68 | thread_t * this = CURRENT_THREAD; |
---|
[433] | 69 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 70 | if( DEBUG_VMM_INIT ) |
---|
[595] | 71 | printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n", |
---|
| 72 | __FUNCTION__ , this->process->pid, this->trdid, process->pid , cycle ); |
---|
[433] | 73 | #endif |
---|
[204] | 74 | |
---|
[1] | 75 | // get pointer on VMM |
---|
| 76 | vmm_t * vmm = &process->vmm; |
---|
| 77 | |
---|
[407] | 78 | // initialize local list of vsegs |
---|
| 79 | vmm->vsegs_nr = 0; |
---|
[408] | 80 | xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) ); |
---|
[580] | 81 | remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) , LOCK_VMM_VSL ); |
---|
[407] | 82 | |
---|
[567] | 83 | assert( ((CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) |
---|
| 84 | <= CONFIG_VMM_ELF_BASE) , "UTILS zone too small\n" ); |
---|
[21] | 85 | |
---|
[567] | 86 | assert( (CONFIG_THREADS_MAX_PER_CLUSTER <= 32) , |
---|
| 87 | "no more than 32 threads per cluster for a single process\n"); |
---|
[1] | 88 | |
---|
[567] | 89 | assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <= |
---|
| 90 | (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , |
---|
| 91 | "STACK zone too small\n"); |
---|
[1] | 92 | |
---|
[409] | 93 | // register kentry vseg in VSL |
---|
[406] | 94 | base = CONFIG_VMM_KENTRY_BASE << CONFIG_PPM_PAGE_SHIFT; |
---|
[1] | 95 | size = CONFIG_VMM_KENTRY_SIZE << CONFIG_PPM_PAGE_SHIFT; |
---|
[406] | 96 | |
---|
[407] | 97 | vseg_kentry = vmm_create_vseg( process, |
---|
| 98 | VSEG_TYPE_CODE, |
---|
| 99 | base, |
---|
| 100 | size, |
---|
| 101 | 0, // file_offset unused |
---|
| 102 | 0, // file_size unused |
---|
| 103 | XPTR_NULL, // mapper_xp unused |
---|
| 104 | local_cxy ); |
---|
[204] | 105 | |
---|
[415] | 106 | if( vseg_kentry == NULL ) |
---|
| 107 | { |
---|
| 108 | printk("\n[ERROR] in %s : cannot register kentry vseg\n", __FUNCTION__ ); |
---|
| 109 | return -1; |
---|
| 110 | } |
---|
[204] | 111 | |
---|
[406] | 112 | vmm->kent_vpn_base = base; |
---|
[1] | 113 | |
---|
[409] | 114 | // register args vseg in VSL |
---|
[406] | 115 | base = (CONFIG_VMM_KENTRY_BASE + |
---|
| 116 | CONFIG_VMM_KENTRY_SIZE ) << CONFIG_PPM_PAGE_SHIFT; |
---|
[1] | 117 | size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT; |
---|
[406] | 118 | |
---|
[407] | 119 | vseg_args = vmm_create_vseg( process, |
---|
| 120 | VSEG_TYPE_DATA, |
---|
| 121 | base, |
---|
| 122 | size, |
---|
| 123 | 0, // file_offset unused |
---|
| 124 | 0, // file_size unused |
---|
| 125 | XPTR_NULL, // mapper_xp unused |
---|
| 126 | local_cxy ); |
---|
[204] | 127 | |
---|
[415] | 128 | if( vseg_args == NULL ) |
---|
| 129 | { |
---|
| 130 | printk("\n[ERROR] in %s : cannot register args vseg\n", __FUNCTION__ ); |
---|
| 131 | return -1; |
---|
| 132 | } |
---|
[204] | 133 | |
---|
[406] | 134 | vmm->args_vpn_base = base; |
---|
[1] | 135 | |
---|
[409] | 136 | // register the envs vseg in VSL |
---|
[406] | 137 | base = (CONFIG_VMM_KENTRY_BASE + |
---|
| 138 | CONFIG_VMM_KENTRY_SIZE + |
---|
| 139 | CONFIG_VMM_ARGS_SIZE ) << CONFIG_PPM_PAGE_SHIFT; |
---|
[1] | 140 | size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT; |
---|
[406] | 141 | |
---|
[407] | 142 | vseg_envs = vmm_create_vseg( process, |
---|
| 143 | VSEG_TYPE_DATA, |
---|
| 144 | base, |
---|
| 145 | size, |
---|
| 146 | 0, // file_offset unused |
---|
| 147 | 0, // file_size unused |
---|
| 148 | XPTR_NULL, // mapper_xp unused |
---|
| 149 | local_cxy ); |
---|
[204] | 150 | |
---|
[415] | 151 | if( vseg_envs == NULL ) |
---|
| 152 | { |
---|
| 153 | printk("\n[ERROR] in %s : cannot register envs vseg\n", __FUNCTION__ ); |
---|
| 154 | return -1; |
---|
| 155 | } |
---|
[204] | 156 | |
---|
[406] | 157 | vmm->envs_vpn_base = base; |
---|
[1] | 158 | |
---|
[409] | 159 | // create GPT (empty) |
---|
[1] | 160 | error = hal_gpt_create( &vmm->gpt ); |
---|
| 161 | |
---|
[415] | 162 | if( error ) |
---|
| 163 | printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ ); |
---|
[204] | 164 | |
---|
[585] | 165 | // initialize GPT lock |
---|
| 166 | remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT ); |
---|
| 167 | |
---|
| 168 | // architecture specic GPT initialisation |
---|
[409] | 169 | // (For TSAR, identity map the kentry_vseg) |
---|
| 170 | error = hal_vmm_init( vmm ); |
---|
| 171 | |
---|
[415] | 172 | if( error ) |
---|
| 173 | printk("\n[ERROR] in %s : cannot initialize GPT\n", __FUNCTION__ ); |
---|
[409] | 174 | |
---|
[1] | 175 | // initialize STACK allocator |
---|
| 176 | vmm->stack_mgr.bitmap = 0; |
---|
| 177 | vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE; |
---|
[567] | 178 | busylock_init( &vmm->stack_mgr.lock , LOCK_VMM_STACK ); |
---|
[1] | 179 | |
---|
| 180 | // initialize MMAP allocator |
---|
[407] | 181 | vmm->mmap_mgr.vpn_base = CONFIG_VMM_HEAP_BASE; |
---|
| 182 | vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; |
---|
| 183 | vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; |
---|
[567] | 184 | busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP ); |
---|
[457] | 185 | |
---|
[1] | 186 | uint32_t i; |
---|
| 187 | for( i = 0 ; i < 32 ; i++ ) list_root_init( &vmm->mmap_mgr.zombi_list[i] ); |
---|
| 188 | |
---|
[21] | 189 | // initialize instrumentation counters |
---|
[409] | 190 | vmm->pgfault_nr = 0; |
---|
[1] | 191 | |
---|
[124] | 192 | hal_fence(); |
---|
[1] | 193 | |
---|
[438] | 194 | #if DEBUG_VMM_INIT |
---|
[433] | 195 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 196 | if( DEBUG_VMM_INIT ) |
---|
[595] | 197 | printk("\n[%s] thread[%x,%x] exit / process %x / entry_point %x / cycle %d\n", |
---|
| 198 | __FUNCTION__, this->process->pid, this->trdid, process->pid, process->vmm.entry_point, cycle ); |
---|
[433] | 199 | #endif |
---|
[204] | 200 | |
---|
[415] | 201 | return 0; |
---|
| 202 | |
---|
[204] | 203 | } // end vmm_init() |
---|
| 204 | |
---|
[407] | 205 | ////////////////////////////////////// |
---|
| 206 | void vmm_display( process_t * process, |
---|
| 207 | bool_t mapping ) |
---|
| 208 | { |
---|
| 209 | vmm_t * vmm = &process->vmm; |
---|
| 210 | gpt_t * gpt = &vmm->gpt; |
---|
| 211 | |
---|
[457] | 212 | printk("\n***** VSL and GPT(%x) for process %x in cluster %x\n\n", |
---|
| 213 | process->vmm.gpt.ptr , process->pid , local_cxy ); |
---|
[407] | 214 | |
---|
[585] | 215 | // get lock protecting the VSL and the GPT |
---|
[567] | 216 | remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->vsegs_lock ) ); |
---|
[585] | 217 | remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->gpt_lock ) ); |
---|
[407] | 218 | |
---|
| 219 | // scan the list of vsegs |
---|
[408] | 220 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
| 221 | xptr_t iter_xp; |
---|
| 222 | xptr_t vseg_xp; |
---|
[407] | 223 | vseg_t * vseg; |
---|
[408] | 224 | XLIST_FOREACH( root_xp , iter_xp ) |
---|
[407] | 225 | { |
---|
[408] | 226 | vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
[433] | 227 | vseg = GET_PTR( vseg_xp ); |
---|
[408] | 228 | |
---|
[407] | 229 | printk(" - %s : base = %X / size = %X / npages = %d\n", |
---|
| 230 | vseg_type_str( vseg->type ) , vseg->min , vseg->max - vseg->min , vseg->vpn_size ); |
---|
| 231 | |
---|
| 232 | if( mapping ) |
---|
| 233 | { |
---|
| 234 | vpn_t vpn; |
---|
| 235 | ppn_t ppn; |
---|
| 236 | uint32_t attr; |
---|
| 237 | vpn_t base = vseg->vpn_base; |
---|
| 238 | vpn_t size = vseg->vpn_size; |
---|
| 239 | for( vpn = base ; vpn < (base+size) ; vpn++ ) |
---|
| 240 | { |
---|
[585] | 241 | hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn ); |
---|
[407] | 242 | if( attr & GPT_MAPPED ) |
---|
| 243 | { |
---|
| 244 | printk(" . vpn = %X / attr = %X / ppn = %X\n", vpn , attr , ppn ); |
---|
| 245 | } |
---|
| 246 | } |
---|
| 247 | } |
---|
| 248 | } |
---|
| 249 | |
---|
[585] | 250 | // release the locks |
---|
[567] | 251 | remote_rwlock_rd_release( XPTR( local_cxy , &vmm->vsegs_lock ) ); |
---|
[585] | 252 | remote_rwlock_rd_release( XPTR( local_cxy , &vmm->gpt_lock ) ); |
---|
[407] | 253 | |
---|
[408] | 254 | } // vmm_display() |
---|
| 255 | |
---|
[611] | 256 | ////////////////////////////////////////// |
---|
| 257 | void vmm_attach_vseg_to_vsl( vmm_t * vmm, |
---|
| 258 | vseg_t * vseg ) |
---|
[567] | 259 | { |
---|
| 260 | // build extended pointer on rwlock protecting VSL |
---|
| 261 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); |
---|
| 262 | |
---|
| 263 | // get rwlock in write mode |
---|
| 264 | remote_rwlock_wr_acquire( lock_xp ); |
---|
| 265 | |
---|
| 266 | // update vseg descriptor |
---|
| 267 | vseg->vmm = vmm; |
---|
| 268 | |
---|
| 269 | // add vseg in vmm list |
---|
| 270 | xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ), |
---|
| 271 | XPTR( local_cxy , &vseg->xlist ) ); |
---|
| 272 | |
---|
| 273 | // release rwlock in write mode |
---|
| 274 | remote_rwlock_wr_release( lock_xp ); |
---|
| 275 | } |
---|
| 276 | |
---|
[611] | 277 | //////////////////////////////////////////// |
---|
| 278 | void vmm_detach_vseg_from_vsl( vmm_t * vmm, |
---|
| 279 | vseg_t * vseg ) |
---|
[567] | 280 | { |
---|
[611] | 281 | // get vseg type |
---|
| 282 | uint32_t type = vseg->type; |
---|
| 283 | |
---|
[567] | 284 | // build extended pointer on rwlock protecting VSL |
---|
| 285 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); |
---|
| 286 | |
---|
| 287 | // get rwlock in write mode |
---|
| 288 | remote_rwlock_wr_acquire( lock_xp ); |
---|
| 289 | |
---|
| 290 | // update vseg descriptor |
---|
| 291 | vseg->vmm = NULL; |
---|
| 292 | |
---|
[611] | 293 | // remove vseg from VSL |
---|
[567] | 294 | xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); |
---|
| 295 | |
---|
| 296 | // release rwlock in write mode |
---|
| 297 | remote_rwlock_wr_release( lock_xp ); |
---|
| 298 | |
---|
[611] | 299 | // release the stack slot to VMM stack allocator if STACK type |
---|
| 300 | if( type == VSEG_TYPE_STACK ) |
---|
| 301 | { |
---|
| 302 | // get pointer on stack allocator |
---|
| 303 | stack_mgr_t * mgr = &vmm->stack_mgr; |
---|
| 304 | |
---|
| 305 | // compute slot index |
---|
| 306 | uint32_t index = ((vseg->vpn_base - mgr->vpn_base - 1) / CONFIG_VMM_STACK_SIZE); |
---|
| 307 | |
---|
| 308 | // update stacks_bitmap |
---|
| 309 | busylock_acquire( &mgr->lock ); |
---|
| 310 | bitmap_clear( &mgr->bitmap , index ); |
---|
| 311 | busylock_release( &mgr->lock ); |
---|
| 312 | } |
---|
| 313 | |
---|
| 314 | // release the vseg to VMM mmap allocator if MMAP type |
---|
| 315 | if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_FILE) || (type == VSEG_TYPE_REMOTE) ) |
---|
| 316 | { |
---|
| 317 | // get pointer on mmap allocator |
---|
| 318 | mmap_mgr_t * mgr = &vmm->mmap_mgr; |
---|
| 319 | |
---|
| 320 | // compute zombi_list index |
---|
| 321 | uint32_t index = bits_log2( vseg->vpn_size ); |
---|
| 322 | |
---|
| 323 | // update zombi_list |
---|
| 324 | busylock_acquire( &mgr->lock ); |
---|
| 325 | list_add_first( &mgr->zombi_list[index] , &vseg->zlist ); |
---|
| 326 | busylock_release( &mgr->lock ); |
---|
| 327 | } |
---|
| 328 | |
---|
| 329 | // release physical memory allocated for vseg descriptor if no MMAP type |
---|
| 330 | if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) ) |
---|
| 331 | { |
---|
| 332 | vseg_free( vseg ); |
---|
| 333 | } |
---|
| 334 | |
---|
| 335 | } // end vmm_remove_vseg_from_vsl() |
---|
| 336 | |
---|
[595] | 337 | //////////////////////////////////////////////// |
---|
[433] | 338 | void vmm_global_update_pte( process_t * process, |
---|
| 339 | vpn_t vpn, |
---|
| 340 | uint32_t attr, |
---|
| 341 | ppn_t ppn ) |
---|
[23] | 342 | { |
---|
[408] | 343 | xlist_entry_t * process_root_ptr; |
---|
| 344 | xptr_t process_root_xp; |
---|
| 345 | xptr_t process_iter_xp; |
---|
[23] | 346 | |
---|
[408] | 347 | xptr_t remote_process_xp; |
---|
| 348 | cxy_t remote_process_cxy; |
---|
| 349 | process_t * remote_process_ptr; |
---|
| 350 | xptr_t remote_gpt_xp; |
---|
[23] | 351 | |
---|
[408] | 352 | pid_t pid; |
---|
| 353 | cxy_t owner_cxy; |
---|
| 354 | lpid_t owner_lpid; |
---|
[23] | 355 | |
---|
[438] | 356 | #if DEBUG_VMM_UPDATE_PTE |
---|
[433] | 357 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[595] | 358 | thread_t * this = CURRENT_THREAD; |
---|
[438] | 359 | if( DEBUG_VMM_UPDATE_PTE < cycle ) |
---|
[595] | 360 | printk("\n[%s] thread[%x,%x] enter for process %x / vpn %x / cycle %d\n", |
---|
| 361 | __FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle ); |
---|
[433] | 362 | #endif |
---|
| 363 | |
---|
[567] | 364 | // check cluster is reference |
---|
[585] | 365 | assert( (GET_CXY( process->ref_xp ) == local_cxy) , "not called in reference cluster\n"); |
---|
[433] | 366 | |
---|
[408] | 367 | // get extended pointer on root of process copies xlist in owner cluster |
---|
| 368 | pid = process->pid; |
---|
| 369 | owner_cxy = CXY_FROM_PID( pid ); |
---|
| 370 | owner_lpid = LPID_FROM_PID( pid ); |
---|
| 371 | process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; |
---|
| 372 | process_root_xp = XPTR( owner_cxy , process_root_ptr ); |
---|
[23] | 373 | |
---|
[408] | 374 | // loop on destination process copies |
---|
| 375 | XLIST_FOREACH( process_root_xp , process_iter_xp ) |
---|
| 376 | { |
---|
| 377 | // get cluster and local pointer on remote process |
---|
| 378 | remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); |
---|
[433] | 379 | remote_process_ptr = GET_PTR( remote_process_xp ); |
---|
[408] | 380 | remote_process_cxy = GET_CXY( remote_process_xp ); |
---|
[407] | 381 | |
---|
[438] | 382 | #if (DEBUG_VMM_UPDATE_PTE & 0x1) |
---|
| 383 | if( DEBUG_VMM_UPDATE_PTE < cycle ) |
---|
[595] | 384 | printk("\n[%s] threadr[%x,%x] handling vpn %x for process %x in cluster %x\n", |
---|
| 385 | __FUNCTION__, this->process->pid, this->trdid, vpn, process->pid, remote_process_cxy ); |
---|
[433] | 386 | #endif |
---|
| 387 | |
---|
[408] | 388 | // get extended pointer on remote gpt |
---|
| 389 | remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt ); |
---|
| 390 | |
---|
[433] | 391 | // update remote GPT |
---|
| 392 | hal_gpt_update_pte( remote_gpt_xp, vpn, attr, ppn ); |
---|
[408] | 393 | } |
---|
| 394 | |
---|
[438] | 395 | #if DEBUG_VMM_UPDATE_PTE |
---|
[433] | 396 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 397 | if( DEBUG_VMM_UPDATE_PTE < cycle ) |
---|
[595] | 398 | printk("\n[%s] thread[%x,%x] exit for process %x / vpn %x / cycle %d\n", |
---|
| 399 | __FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle ); |
---|
[433] | 400 | #endif |
---|
| 401 | |
---|
| 402 | } // end vmm_global_update_pte() |
---|
| 403 | |
---|
[408] | 404 | /////////////////////////////////////// |
---|
| 405 | void vmm_set_cow( process_t * process ) |
---|
| 406 | { |
---|
| 407 | vmm_t * vmm; |
---|
| 408 | |
---|
| 409 | xlist_entry_t * process_root_ptr; |
---|
| 410 | xptr_t process_root_xp; |
---|
| 411 | xptr_t process_iter_xp; |
---|
| 412 | |
---|
| 413 | xptr_t remote_process_xp; |
---|
| 414 | cxy_t remote_process_cxy; |
---|
| 415 | process_t * remote_process_ptr; |
---|
| 416 | xptr_t remote_gpt_xp; |
---|
| 417 | |
---|
| 418 | xptr_t vseg_root_xp; |
---|
| 419 | xptr_t vseg_iter_xp; |
---|
| 420 | |
---|
| 421 | xptr_t vseg_xp; |
---|
| 422 | vseg_t * vseg; |
---|
| 423 | |
---|
| 424 | pid_t pid; |
---|
| 425 | cxy_t owner_cxy; |
---|
| 426 | lpid_t owner_lpid; |
---|
| 427 | |
---|
[438] | 428 | #if DEBUG_VMM_SET_COW |
---|
[595] | 429 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
| 430 | thread_t * this = CURRENT_THREAD; |
---|
[438] | 431 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
[595] | 432 | printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n", |
---|
| 433 | __FUNCTION__, this->process->pid, this->trdid, process->pid , cycle ); |
---|
[433] | 434 | #endif |
---|
[408] | 435 | |
---|
[567] | 436 | // check cluster is reference |
---|
| 437 | assert( (GET_CXY( process->ref_xp ) == local_cxy) , |
---|
| 438 | "local cluster is not process reference cluster\n"); |
---|
[408] | 439 | |
---|
| 440 | // get pointer on reference VMM |
---|
| 441 | vmm = &process->vmm; |
---|
| 442 | |
---|
| 443 | // get extended pointer on root of process copies xlist in owner cluster |
---|
| 444 | pid = process->pid; |
---|
| 445 | owner_cxy = CXY_FROM_PID( pid ); |
---|
| 446 | owner_lpid = LPID_FROM_PID( pid ); |
---|
| 447 | process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; |
---|
| 448 | process_root_xp = XPTR( owner_cxy , process_root_ptr ); |
---|
| 449 | |
---|
| 450 | // get extended pointer on root of vsegs xlist from reference VMM |
---|
| 451 | vseg_root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
| 452 | |
---|
| 453 | // loop on destination process copies |
---|
| 454 | XLIST_FOREACH( process_root_xp , process_iter_xp ) |
---|
| 455 | { |
---|
| 456 | // get cluster and local pointer on remote process |
---|
| 457 | remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); |
---|
[433] | 458 | remote_process_ptr = GET_PTR( remote_process_xp ); |
---|
[408] | 459 | remote_process_cxy = GET_CXY( remote_process_xp ); |
---|
| 460 | |
---|
[595] | 461 | #if (DEBUG_VMM_SET_COW & 1) |
---|
[438] | 462 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
[595] | 463 | printk("\n[%s] thread[%x,%x] handling process %x in cluster %x\n", |
---|
| 464 | __FUNCTION__, this->process->pid, this->trdid, process->pid , remote_process_cxy ); |
---|
[433] | 465 | #endif |
---|
[408] | 466 | |
---|
| 467 | // get extended pointer on remote gpt |
---|
| 468 | remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt ); |
---|
| 469 | |
---|
| 470 | // loop on vsegs in (local) reference process VSL |
---|
| 471 | XLIST_FOREACH( vseg_root_xp , vseg_iter_xp ) |
---|
| 472 | { |
---|
| 473 | // get pointer on vseg |
---|
| 474 | vseg_xp = XLIST_ELEMENT( vseg_iter_xp , vseg_t , xlist ); |
---|
[433] | 475 | vseg = GET_PTR( vseg_xp ); |
---|
[408] | 476 | |
---|
[567] | 477 | assert( (GET_CXY( vseg_xp ) == local_cxy) , |
---|
| 478 | "all vsegs in reference VSL must be local\n" ); |
---|
[408] | 479 | |
---|
| 480 | // get vseg type, base and size |
---|
| 481 | uint32_t type = vseg->type; |
---|
| 482 | vpn_t vpn_base = vseg->vpn_base; |
---|
| 483 | vpn_t vpn_size = vseg->vpn_size; |
---|
| 484 | |
---|
[595] | 485 | #if (DEBUG_VMM_SET_COW & 1) |
---|
[438] | 486 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
[595] | 487 | printk("\n[%s] thread[%x,%x] handling vseg %s / vpn_base = %x / vpn_size = %x\n", |
---|
| 488 | __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size ); |
---|
[433] | 489 | #endif |
---|
| 490 | // only DATA, ANON and REMOTE vsegs |
---|
[408] | 491 | if( (type == VSEG_TYPE_DATA) || |
---|
| 492 | (type == VSEG_TYPE_ANON) || |
---|
| 493 | (type == VSEG_TYPE_REMOTE) ) |
---|
| 494 | { |
---|
[433] | 495 | vpn_t vpn; |
---|
| 496 | uint32_t attr; |
---|
| 497 | ppn_t ppn; |
---|
| 498 | xptr_t page_xp; |
---|
| 499 | cxy_t page_cxy; |
---|
| 500 | page_t * page_ptr; |
---|
| 501 | xptr_t forks_xp; |
---|
[469] | 502 | xptr_t lock_xp; |
---|
[433] | 503 | |
---|
| 504 | // update flags in remote GPT |
---|
| 505 | hal_gpt_set_cow( remote_gpt_xp, |
---|
| 506 | vpn_base, |
---|
| 507 | vpn_size ); |
---|
| 508 | |
---|
| 509 | // atomically increment pending forks counter in physical pages, |
---|
| 510 | // for all vseg pages that are mapped in reference cluster |
---|
| 511 | if( remote_process_cxy == local_cxy ) |
---|
| 512 | { |
---|
| 513 | // scan all pages in vseg |
---|
| 514 | for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) |
---|
| 515 | { |
---|
| 516 | // get page attributes and PPN from reference GPT |
---|
[585] | 517 | hal_gpt_get_pte( remote_gpt_xp , vpn , &attr , &ppn ); |
---|
[433] | 518 | |
---|
| 519 | // atomically update pending forks counter if page is mapped |
---|
| 520 | if( attr & GPT_MAPPED ) |
---|
| 521 | { |
---|
[469] | 522 | // get pointers and cluster on page descriptor |
---|
[433] | 523 | page_xp = ppm_ppn2page( ppn ); |
---|
| 524 | page_cxy = GET_CXY( page_xp ); |
---|
| 525 | page_ptr = GET_PTR( page_xp ); |
---|
[469] | 526 | |
---|
| 527 | // get extended pointers on "forks" and "lock" |
---|
[433] | 528 | forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
[469] | 529 | lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
| 530 | |
---|
[567] | 531 | // take lock protecting "forks" counter |
---|
| 532 | remote_busylock_acquire( lock_xp ); |
---|
| 533 | |
---|
[469] | 534 | // increment "forks" |
---|
[433] | 535 | hal_remote_atomic_add( forks_xp , 1 ); |
---|
[567] | 536 | |
---|
| 537 | // release lock protecting "forks" counter |
---|
| 538 | remote_busylock_release( lock_xp ); |
---|
[433] | 539 | } |
---|
| 540 | } // end loop on vpn |
---|
| 541 | } // end if local |
---|
| 542 | } // end if vseg type |
---|
| 543 | } // end loop on vsegs |
---|
[408] | 544 | } // end loop on process copies |
---|
| 545 | |
---|
[438] | 546 | #if DEBUG_VMM_SET_COW |
---|
[433] | 547 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 548 | if( DEBUG_VMM_SET_COW < cycle ) |
---|
[595] | 549 | printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", |
---|
| 550 | __FUNCTION__, this->process->pid, this->trdid, process->pid , cycle ); |
---|
[433] | 551 | #endif |
---|
[408] | 552 | |
---|
| 553 | } // end vmm_set-cow() |
---|
| 554 | |
---|
| 555 | ///////////////////////////////////////////////// |
---|
| 556 | error_t vmm_fork_copy( process_t * child_process, |
---|
| 557 | xptr_t parent_process_xp ) |
---|
| 558 | { |
---|
| 559 | error_t error; |
---|
| 560 | cxy_t parent_cxy; |
---|
| 561 | process_t * parent_process; |
---|
| 562 | vmm_t * parent_vmm; |
---|
| 563 | xptr_t parent_lock_xp; |
---|
| 564 | vmm_t * child_vmm; |
---|
| 565 | xptr_t iter_xp; |
---|
| 566 | xptr_t parent_vseg_xp; |
---|
| 567 | vseg_t * parent_vseg; |
---|
| 568 | vseg_t * child_vseg; |
---|
| 569 | uint32_t type; |
---|
| 570 | bool_t cow; |
---|
| 571 | vpn_t vpn; |
---|
| 572 | vpn_t vpn_base; |
---|
| 573 | vpn_t vpn_size; |
---|
[469] | 574 | xptr_t page_xp; // extended pointer on page descriptor |
---|
[408] | 575 | page_t * page_ptr; |
---|
| 576 | cxy_t page_cxy; |
---|
[469] | 577 | xptr_t forks_xp; // extended pointer on forks counter in page descriptor |
---|
| 578 | xptr_t lock_xp; // extended pointer on lock protecting the forks counter |
---|
[408] | 579 | xptr_t parent_root_xp; |
---|
| 580 | bool_t mapped; |
---|
| 581 | ppn_t ppn; |
---|
| 582 | |
---|
[438] | 583 | #if DEBUG_VMM_FORK_COPY |
---|
[433] | 584 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[595] | 585 | thread_t * this = CURRENT_THREAD; |
---|
[438] | 586 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
[595] | 587 | printk("\n[%s] thread %x enter / cycle %d\n", |
---|
| 588 | __FUNCTION__ , this->process->pid, this->trdid, cycle ); |
---|
[433] | 589 | #endif |
---|
[408] | 590 | |
---|
| 591 | // get parent process cluster and local pointer |
---|
| 592 | parent_cxy = GET_CXY( parent_process_xp ); |
---|
[433] | 593 | parent_process = GET_PTR( parent_process_xp ); |
---|
[408] | 594 | |
---|
| 595 | // get local pointers on parent and child VMM |
---|
| 596 | parent_vmm = &parent_process->vmm; |
---|
| 597 | child_vmm = &child_process->vmm; |
---|
| 598 | |
---|
| 599 | // get extended pointer on lock protecting the parent VSL |
---|
| 600 | parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsegs_lock ); |
---|
| 601 | |
---|
| 602 | // initialize the lock protecting the child VSL |
---|
[567] | 603 | remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsegs_lock ), LOCK_VMM_STACK ); |
---|
[408] | 604 | |
---|
| 605 | // initialize the child VSL as empty |
---|
| 606 | xlist_root_init( XPTR( local_cxy, &child_vmm->vsegs_root ) ); |
---|
| 607 | child_vmm->vsegs_nr = 0; |
---|
| 608 | |
---|
[415] | 609 | // create child GPT |
---|
[408] | 610 | error = hal_gpt_create( &child_vmm->gpt ); |
---|
[415] | 611 | |
---|
[407] | 612 | if( error ) |
---|
| 613 | { |
---|
[408] | 614 | printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ ); |
---|
| 615 | return -1; |
---|
[407] | 616 | } |
---|
| 617 | |
---|
[408] | 618 | // build extended pointer on parent VSL |
---|
| 619 | parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root ); |
---|
| 620 | |
---|
[567] | 621 | // take the lock protecting the parent VSL in read mode |
---|
| 622 | remote_rwlock_rd_acquire( parent_lock_xp ); |
---|
[415] | 623 | |
---|
[408] | 624 | // loop on parent VSL xlist |
---|
| 625 | XLIST_FOREACH( parent_root_xp , iter_xp ) |
---|
[23] | 626 | { |
---|
[408] | 627 | // get local and extended pointers on current parent vseg |
---|
| 628 | parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
[433] | 629 | parent_vseg = GET_PTR( parent_vseg_xp ); |
---|
[23] | 630 | |
---|
[408] | 631 | // get vseg type |
---|
[567] | 632 | type = hal_remote_l32( XPTR( parent_cxy , &parent_vseg->type ) ); |
---|
[408] | 633 | |
---|
[438] | 634 | #if DEBUG_VMM_FORK_COPY |
---|
[433] | 635 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 636 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
[595] | 637 | printk("\n[%s] thread[%x,%x] found parent vseg %s / vpn_base = %x / cycle %d\n", |
---|
| 638 | __FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type), |
---|
[567] | 639 | hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); |
---|
[433] | 640 | #endif |
---|
[23] | 641 | |
---|
[408] | 642 | // all parent vsegs - but STACK - must be copied in child VSL |
---|
| 643 | if( type != VSEG_TYPE_STACK ) |
---|
[23] | 644 | { |
---|
[408] | 645 | // allocate memory for a new child vseg |
---|
| 646 | child_vseg = vseg_alloc(); |
---|
| 647 | if( child_vseg == NULL ) // release all allocated vsegs |
---|
[23] | 648 | { |
---|
[408] | 649 | vmm_destroy( child_process ); |
---|
| 650 | printk("\n[ERROR] in %s : cannot create vseg for child\n", __FUNCTION__ ); |
---|
| 651 | return -1; |
---|
[23] | 652 | } |
---|
| 653 | |
---|
[408] | 654 | // copy parent vseg to child vseg |
---|
| 655 | vseg_init_from_ref( child_vseg , parent_vseg_xp ); |
---|
[23] | 656 | |
---|
[408] | 657 | // register child vseg in child VSL |
---|
[611] | 658 | vmm_attach_vseg_to_vsl( child_vmm , child_vseg ); |
---|
[407] | 659 | |
---|
[438] | 660 | #if DEBUG_VMM_FORK_COPY |
---|
[433] | 661 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 662 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
[595] | 663 | printk("\n[%s] thread[%x,%x] copied vseg %s / vpn_base = %x to child VSL / cycle %d\n", |
---|
| 664 | __FUNCTION__ , this->process->pid, this->trdid, vseg_type_str(type), |
---|
[567] | 665 | hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); |
---|
[433] | 666 | #endif |
---|
[23] | 667 | |
---|
[408] | 668 | // copy DATA, MMAP, REMOTE, FILE parent GPT entries to child GPT |
---|
| 669 | if( type != VSEG_TYPE_CODE ) |
---|
| 670 | { |
---|
| 671 | // activate the COW for DATA, MMAP, REMOTE vsegs only |
---|
| 672 | cow = ( type != VSEG_TYPE_FILE ); |
---|
[23] | 673 | |
---|
[408] | 674 | vpn_base = child_vseg->vpn_base; |
---|
| 675 | vpn_size = child_vseg->vpn_size; |
---|
[23] | 676 | |
---|
[408] | 677 | // scan pages in parent vseg |
---|
| 678 | for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) |
---|
| 679 | { |
---|
| 680 | error = hal_gpt_pte_copy( &child_vmm->gpt, |
---|
| 681 | XPTR( parent_cxy , &parent_vmm->gpt ), |
---|
| 682 | vpn, |
---|
| 683 | cow, |
---|
| 684 | &ppn, |
---|
| 685 | &mapped ); |
---|
| 686 | if( error ) |
---|
| 687 | { |
---|
| 688 | vmm_destroy( child_process ); |
---|
| 689 | printk("\n[ERROR] in %s : cannot copy GPT\n", __FUNCTION__ ); |
---|
| 690 | return -1; |
---|
| 691 | } |
---|
| 692 | |
---|
[433] | 693 | // increment pending forks counter in page if mapped |
---|
[408] | 694 | if( mapped ) |
---|
| 695 | { |
---|
[469] | 696 | // get pointers and cluster on page descriptor |
---|
| 697 | page_xp = ppm_ppn2page( ppn ); |
---|
[408] | 698 | page_cxy = GET_CXY( page_xp ); |
---|
[433] | 699 | page_ptr = GET_PTR( page_xp ); |
---|
[408] | 700 | |
---|
[469] | 701 | // get extended pointers on "forks" and "lock" |
---|
| 702 | forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
| 703 | lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
| 704 | |
---|
[567] | 705 | // get lock protecting "forks" counter |
---|
| 706 | remote_busylock_acquire( lock_xp ); |
---|
| 707 | |
---|
[469] | 708 | // increment "forks" |
---|
| 709 | hal_remote_atomic_add( forks_xp , 1 ); |
---|
| 710 | |
---|
[567] | 711 | // release lock protecting "forks" counter |
---|
| 712 | remote_busylock_release( lock_xp ); |
---|
| 713 | |
---|
[438] | 714 | #if DEBUG_VMM_FORK_COPY |
---|
[433] | 715 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 716 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
[595] | 717 | printk("\n[%s] thread[%x,%x] copied vpn %x to child GPT / cycle %d\n", |
---|
| 718 | __FUNCTION__ , this->process->pid, this->trdid , vpn , cycle ); |
---|
[433] | 719 | #endif |
---|
[408] | 720 | } |
---|
| 721 | } |
---|
| 722 | } // end if no code & no stack |
---|
| 723 | } // end if no stack |
---|
| 724 | } // end loop on vsegs |
---|
| 725 | |
---|
[567] | 726 | // release the parent VSL lock in read mode |
---|
| 727 | remote_rwlock_rd_release( parent_lock_xp ); |
---|
[408] | 728 | |
---|
[415] | 729 | // initialize child GPT (architecture specic) |
---|
| 730 | // => For TSAR, identity map the kentry_vseg |
---|
| 731 | error = hal_vmm_init( child_vmm ); |
---|
| 732 | |
---|
| 733 | if( error ) |
---|
| 734 | { |
---|
| 735 | printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ ); |
---|
| 736 | return -1; |
---|
| 737 | } |
---|
| 738 | |
---|
[408] | 739 | // initialize the child VMM STACK allocator |
---|
| 740 | child_vmm->stack_mgr.bitmap = 0; |
---|
| 741 | child_vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE; |
---|
| 742 | |
---|
| 743 | // initialize the child VMM MMAP allocator |
---|
[23] | 744 | uint32_t i; |
---|
[408] | 745 | child_vmm->mmap_mgr.vpn_base = CONFIG_VMM_HEAP_BASE; |
---|
| 746 | child_vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; |
---|
| 747 | child_vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; |
---|
| 748 | for( i = 0 ; i < 32 ; i++ ) list_root_init( &child_vmm->mmap_mgr.zombi_list[i] ); |
---|
[23] | 749 | |
---|
[178] | 750 | // initialize instrumentation counters |
---|
[408] | 751 | child_vmm->pgfault_nr = 0; |
---|
[23] | 752 | |
---|
[408] | 753 | // copy base addresses from parent VMM to child VMM |
---|
| 754 | child_vmm->kent_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->kent_vpn_base)); |
---|
| 755 | child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base)); |
---|
| 756 | child_vmm->envs_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->envs_vpn_base)); |
---|
| 757 | child_vmm->heap_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->heap_vpn_base)); |
---|
| 758 | child_vmm->code_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->code_vpn_base)); |
---|
| 759 | child_vmm->data_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->data_vpn_base)); |
---|
[23] | 760 | |
---|
[408] | 761 | child_vmm->entry_point = (intptr_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->entry_point)); |
---|
[23] | 762 | |
---|
[124] | 763 | hal_fence(); |
---|
[23] | 764 | |
---|
[438] | 765 | #if DEBUG_VMM_FORK_COPY |
---|
[433] | 766 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 767 | if( DEBUG_VMM_FORK_COPY < cycle ) |
---|
[595] | 768 | printk("\n[%s] thread[%x,%x] exit successfully / cycle %d\n", |
---|
| 769 | __FUNCTION__ , this->process->pid, this->trdid , cycle ); |
---|
[433] | 770 | #endif |
---|
| 771 | |
---|
[23] | 772 | return 0; |
---|
| 773 | |
---|
[408] | 774 | } // vmm_fork_copy() |
---|
[204] | 775 | |
---|
[1] | 776 | /////////////////////////////////////// |
---|
| 777 | void vmm_destroy( process_t * process ) |
---|
| 778 | { |
---|
[408] | 779 | xptr_t vseg_xp; |
---|
[1] | 780 | vseg_t * vseg; |
---|
| 781 | |
---|
[438] | 782 | #if DEBUG_VMM_DESTROY |
---|
[433] | 783 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[595] | 784 | thread_t * this = CURRENT_THREAD; |
---|
[438] | 785 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
[595] | 786 | printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", |
---|
| 787 | __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle ); |
---|
[433] | 788 | #endif |
---|
[416] | 789 | |
---|
[438] | 790 | #if (DEBUG_VMM_DESTROY & 1 ) |
---|
[443] | 791 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
[437] | 792 | vmm_display( process , true ); |
---|
| 793 | #endif |
---|
| 794 | |
---|
[433] | 795 | // get pointer on local VMM |
---|
[1] | 796 | vmm_t * vmm = &process->vmm; |
---|
| 797 | |
---|
[408] | 798 | // get extended pointer on VSL root and VSL lock |
---|
| 799 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
| 800 | |
---|
[611] | 801 | // scan the VSL to delete all registered vsegs |
---|
| 802 | // (don't use a FOREACH for item deletion in xlist) |
---|
[408] | 803 | while( !xlist_is_empty( root_xp ) ) |
---|
[1] | 804 | { |
---|
[409] | 805 | // get pointer on first vseg in VSL |
---|
[567] | 806 | vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); |
---|
[433] | 807 | vseg = GET_PTR( vseg_xp ); |
---|
[409] | 808 | |
---|
[611] | 809 | // delete vseg and release physical pages |
---|
| 810 | vmm_delete_vseg( process->pid , vseg->min ); |
---|
[409] | 811 | |
---|
[443] | 812 | #if( DEBUG_VMM_DESTROY & 1 ) |
---|
| 813 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
[611] | 814 | printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n", |
---|
[443] | 815 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
| 816 | #endif |
---|
| 817 | |
---|
[1] | 818 | } |
---|
| 819 | |
---|
| 820 | // remove all vsegs from zombi_lists in MMAP allocator |
---|
| 821 | uint32_t i; |
---|
| 822 | for( i = 0 ; i<32 ; i++ ) |
---|
| 823 | { |
---|
| 824 | while( !list_is_empty( &vmm->mmap_mgr.zombi_list[i] ) ) |
---|
| 825 | { |
---|
[408] | 826 | vseg = LIST_FIRST( &vmm->mmap_mgr.zombi_list[i] , vseg_t , zlist ); |
---|
[443] | 827 | |
---|
| 828 | #if( DEBUG_VMM_DESTROY & 1 ) |
---|
| 829 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
[595] | 830 | printk("\n[%s] found zombi vseg / vpn_base %x / vpn_size %d\n", |
---|
[443] | 831 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
| 832 | #endif |
---|
[611] | 833 | // clean vseg descriptor |
---|
| 834 | vseg->vmm = NULL; |
---|
| 835 | |
---|
| 836 | // remove vseg from xlist |
---|
| 837 | xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); |
---|
| 838 | |
---|
| 839 | // release vseg descriptor |
---|
[1] | 840 | vseg_free( vseg ); |
---|
[443] | 841 | |
---|
| 842 | #if( DEBUG_VMM_DESTROY & 1 ) |
---|
| 843 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
[595] | 844 | printk("\n[%s] zombi vseg released / vpn_base %x / vpn_size %d\n", |
---|
[443] | 845 | __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); |
---|
| 846 | #endif |
---|
[1] | 847 | } |
---|
| 848 | } |
---|
| 849 | |
---|
[409] | 850 | // release memory allocated to the GPT itself |
---|
[1] | 851 | hal_gpt_destroy( &vmm->gpt ); |
---|
| 852 | |
---|
[438] | 853 | #if DEBUG_VMM_DESTROY |
---|
[433] | 854 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 855 | if( DEBUG_VMM_DESTROY < cycle ) |
---|
[595] | 856 | printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", |
---|
| 857 | __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle ); |
---|
[433] | 858 | #endif |
---|
[416] | 859 | |
---|
[204] | 860 | } // end vmm_destroy() |
---|
| 861 | |
---|
[1] | 862 | ///////////////////////////////////////////////// |
---|
| 863 | vseg_t * vmm_check_conflict( process_t * process, |
---|
[21] | 864 | vpn_t vpn_base, |
---|
[1] | 865 | vpn_t vpn_size ) |
---|
| 866 | { |
---|
| 867 | vmm_t * vmm = &process->vmm; |
---|
[408] | 868 | |
---|
| 869 | // scan the VSL |
---|
[1] | 870 | vseg_t * vseg; |
---|
[408] | 871 | xptr_t iter_xp; |
---|
| 872 | xptr_t vseg_xp; |
---|
| 873 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
[1] | 874 | |
---|
[408] | 875 | XLIST_FOREACH( root_xp , iter_xp ) |
---|
[1] | 876 | { |
---|
[408] | 877 | vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
[433] | 878 | vseg = GET_PTR( vseg_xp ); |
---|
[204] | 879 | |
---|
[21] | 880 | if( ((vpn_base + vpn_size) > vseg->vpn_base) && |
---|
| 881 | (vpn_base < (vseg->vpn_base + vseg->vpn_size)) ) return vseg; |
---|
[1] | 882 | } |
---|
| 883 | return NULL; |
---|
| 884 | |
---|
[204] | 885 | } // end vmm_check_conflict() |
---|
| 886 | |
---|
[1] | 887 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 888 | // This static function is called by the vmm_create_vseg() function, and implements |
---|
| 889 | // the VMM stack_vseg specific allocator. |
---|
| 890 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 891 | // @ vmm : pointer on VMM. |
---|
[21] | 892 | // @ vpn_base : (return value) first allocated page |
---|
[1] | 893 | // @ vpn_size : (return value) number of allocated pages |
---|
| 894 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 895 | static error_t vmm_stack_alloc( vmm_t * vmm, |
---|
| 896 | vpn_t * vpn_base, |
---|
| 897 | vpn_t * vpn_size ) |
---|
| 898 | { |
---|
| 899 | // get stack allocator pointer |
---|
| 900 | stack_mgr_t * mgr = &vmm->stack_mgr; |
---|
| 901 | |
---|
| 902 | // get lock on stack allocator |
---|
[567] | 903 | busylock_acquire( &mgr->lock ); |
---|
[1] | 904 | |
---|
| 905 | // get first free slot index in bitmap |
---|
| 906 | int32_t index = bitmap_ffc( &mgr->bitmap , 4 ); |
---|
[179] | 907 | if( (index < 0) || (index > 31) ) |
---|
| 908 | { |
---|
[567] | 909 | busylock_release( &mgr->lock ); |
---|
| 910 | return 0xFFFFFFFF; |
---|
[179] | 911 | } |
---|
[1] | 912 | |
---|
| 913 | // update bitmap |
---|
| 914 | bitmap_set( &mgr->bitmap , index ); |
---|
[21] | 915 | |
---|
[1] | 916 | // release lock on stack allocator |
---|
[567] | 917 | busylock_release( &mgr->lock ); |
---|
[1] | 918 | |
---|
[21] | 919 | // returns vpn_base, vpn_size (one page non allocated) |
---|
[1] | 920 | *vpn_base = mgr->vpn_base + index * CONFIG_VMM_STACK_SIZE + 1; |
---|
| 921 | *vpn_size = CONFIG_VMM_STACK_SIZE - 1; |
---|
| 922 | return 0; |
---|
| 923 | |
---|
[204] | 924 | } // end vmm_stack_alloc() |
---|
| 925 | |
---|
[1] | 926 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 927 | // This static function is called by the vmm_create_vseg() function, and implements |
---|
| 928 | // the VMM MMAP specific allocator. |
---|
| 929 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 930 | // @ vmm : [in] pointer on VMM. |
---|
| 931 | // @ npages : [in] requested number of pages. |
---|
[21] | 932 | // @ vpn_base : [out] first allocated page. |
---|
[1] | 933 | // @ vpn_size : [out] actual number of allocated pages. |
---|
| 934 | //////////////////////////////////////////////////////////////////////////////////////////// |
---|
| 935 | static error_t vmm_mmap_alloc( vmm_t * vmm, |
---|
| 936 | vpn_t npages, |
---|
| 937 | vpn_t * vpn_base, |
---|
| 938 | vpn_t * vpn_size ) |
---|
| 939 | { |
---|
| 940 | uint32_t index; |
---|
| 941 | vseg_t * vseg; |
---|
| 942 | vpn_t base; |
---|
| 943 | vpn_t size; |
---|
[21] | 944 | vpn_t free; |
---|
[1] | 945 | |
---|
[21] | 946 | // mmap vseg size must be power of 2 |
---|
[1] | 947 | // compute actual size and index in zombi_list array |
---|
| 948 | size = POW2_ROUNDUP( npages ); |
---|
| 949 | index = bits_log2( size ); |
---|
| 950 | |
---|
| 951 | // get mmap allocator pointer |
---|
| 952 | mmap_mgr_t * mgr = &vmm->mmap_mgr; |
---|
| 953 | |
---|
| 954 | // get lock on mmap allocator |
---|
[567] | 955 | busylock_acquire( &mgr->lock ); |
---|
[1] | 956 | |
---|
| 957 | // get vseg from zombi_list or from mmap zone |
---|
| 958 | if( list_is_empty( &mgr->zombi_list[index] ) ) // from mmap zone |
---|
| 959 | { |
---|
| 960 | // check overflow |
---|
| 961 | free = mgr->first_free_vpn; |
---|
| 962 | if( (free + size) > mgr->vpn_size ) return ENOMEM; |
---|
| 963 | |
---|
| 964 | // update STACK allocator |
---|
| 965 | mgr->first_free_vpn += size; |
---|
| 966 | |
---|
| 967 | // compute base |
---|
| 968 | base = free; |
---|
| 969 | } |
---|
| 970 | else // from zombi_list |
---|
| 971 | { |
---|
| 972 | // get pointer on zombi vseg from zombi_list |
---|
[408] | 973 | vseg = LIST_FIRST( &mgr->zombi_list[index] , vseg_t , zlist ); |
---|
[1] | 974 | |
---|
| 975 | // remove vseg from free-list |
---|
[408] | 976 | list_unlink( &vseg->zlist ); |
---|
[1] | 977 | |
---|
| 978 | // compute base |
---|
| 979 | base = vseg->vpn_base; |
---|
[21] | 980 | } |
---|
| 981 | |
---|
[1] | 982 | // release lock on mmap allocator |
---|
[567] | 983 | busylock_release( &mgr->lock ); |
---|
[1] | 984 | |
---|
| 985 | // returns vpn_base, vpn_size |
---|
| 986 | *vpn_base = base; |
---|
| 987 | *vpn_size = size; |
---|
| 988 | return 0; |
---|
| 989 | |
---|
[204] | 990 | } // end vmm_mmap_alloc() |
---|
| 991 | |
---|
[407] | 992 | //////////////////////////////////////////////// |
---|
| 993 | vseg_t * vmm_create_vseg( process_t * process, |
---|
| 994 | vseg_type_t type, |
---|
| 995 | intptr_t base, |
---|
| 996 | uint32_t size, |
---|
| 997 | uint32_t file_offset, |
---|
| 998 | uint32_t file_size, |
---|
| 999 | xptr_t mapper_xp, |
---|
| 1000 | cxy_t cxy ) |
---|
[1] | 1001 | { |
---|
| 1002 | vseg_t * vseg; // created vseg pointer |
---|
[204] | 1003 | vpn_t vpn_base; // first page index |
---|
[595] | 1004 | vpn_t vpn_size; // number of pages covered by vseg |
---|
[1] | 1005 | error_t error; |
---|
| 1006 | |
---|
[438] | 1007 | #if DEBUG_VMM_CREATE_VSEG |
---|
[595] | 1008 | thread_t * this = CURRENT_THREAD; |
---|
| 1009 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 1010 | if( DEBUG_VMM_CREATE_VSEG < cycle ) |
---|
[595] | 1011 | printk("\n[%s] thread[%x,%x] enter / %s / cxy %x / cycle %d\n", |
---|
| 1012 | __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), cxy, cycle ); |
---|
[433] | 1013 | #endif |
---|
[21] | 1014 | |
---|
[407] | 1015 | // get pointer on VMM |
---|
| 1016 | vmm_t * vmm = &process->vmm; |
---|
[21] | 1017 | |
---|
[204] | 1018 | // compute base, size, vpn_base, vpn_size, depending on vseg type |
---|
[407] | 1019 | // we use the VMM specific allocators for "stack", "file", "anon", & "remote" vsegs |
---|
[595] | 1020 | |
---|
[1] | 1021 | if( type == VSEG_TYPE_STACK ) |
---|
| 1022 | { |
---|
| 1023 | // get vpn_base and vpn_size from STACK allocator |
---|
| 1024 | error = vmm_stack_alloc( vmm , &vpn_base , &vpn_size ); |
---|
| 1025 | if( error ) |
---|
| 1026 | { |
---|
[407] | 1027 | printk("\n[ERROR] in %s : no space for stack vseg / process %x in cluster %x\n", |
---|
| 1028 | __FUNCTION__ , process->pid , local_cxy ); |
---|
[1] | 1029 | return NULL; |
---|
| 1030 | } |
---|
| 1031 | |
---|
| 1032 | // compute vseg base and size from vpn_base and vpn_size |
---|
| 1033 | base = vpn_base << CONFIG_PPM_PAGE_SHIFT; |
---|
| 1034 | size = vpn_size << CONFIG_PPM_PAGE_SHIFT; |
---|
| 1035 | } |
---|
[595] | 1036 | else if( type == VSEG_TYPE_FILE ) |
---|
| 1037 | { |
---|
| 1038 | // compute page index (in mapper) for first byte |
---|
| 1039 | vpn_t vpn_min = file_offset >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1040 | |
---|
| 1041 | // compute page index (in mapper) for last byte |
---|
| 1042 | vpn_t vpn_max = (file_offset + size - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1043 | |
---|
| 1044 | // compute offset in first page |
---|
| 1045 | uint32_t offset = file_offset & CONFIG_PPM_PAGE_MASK; |
---|
| 1046 | |
---|
| 1047 | // compute number of pages required in virtual space |
---|
| 1048 | vpn_t npages = vpn_max - vpn_min + 1; |
---|
| 1049 | |
---|
| 1050 | // get vpn_base and vpn_size from MMAP allocator |
---|
| 1051 | error = vmm_mmap_alloc( vmm , npages , &vpn_base , &vpn_size ); |
---|
| 1052 | if( error ) |
---|
| 1053 | { |
---|
| 1054 | printk("\n[ERROR] in %s : no vspace for mmap vseg / process %x in cluster %x\n", |
---|
| 1055 | __FUNCTION__ , process->pid , local_cxy ); |
---|
| 1056 | return NULL; |
---|
| 1057 | } |
---|
| 1058 | |
---|
| 1059 | // set the vseg base (not always aligned for FILE) |
---|
| 1060 | base = (vpn_base << CONFIG_PPM_PAGE_SHIFT) + offset; |
---|
| 1061 | } |
---|
[21] | 1062 | else if( (type == VSEG_TYPE_ANON) || |
---|
[1] | 1063 | (type == VSEG_TYPE_REMOTE) ) |
---|
| 1064 | { |
---|
[595] | 1065 | // compute number of required pages in virtual space |
---|
| 1066 | vpn_t npages = size >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1067 | if( size & CONFIG_PPM_PAGE_MASK) npages++; |
---|
| 1068 | |
---|
[1] | 1069 | // get vpn_base and vpn_size from MMAP allocator |
---|
| 1070 | error = vmm_mmap_alloc( vmm , npages , &vpn_base , &vpn_size ); |
---|
| 1071 | if( error ) |
---|
| 1072 | { |
---|
| 1073 | printk("\n[ERROR] in %s : no vspace for mmap vseg / process %x in cluster %x\n", |
---|
| 1074 | __FUNCTION__ , process->pid , local_cxy ); |
---|
| 1075 | return NULL; |
---|
| 1076 | } |
---|
| 1077 | |
---|
[595] | 1078 | // set vseg base (always aligned for ANON or REMOTE) |
---|
[1] | 1079 | base = vpn_base << CONFIG_PPM_PAGE_SHIFT; |
---|
| 1080 | } |
---|
[595] | 1081 | else // VSEG_TYPE_DATA or VSEG_TYPE_CODE |
---|
[1] | 1082 | { |
---|
[204] | 1083 | uint32_t vpn_min = base >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1084 | uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1085 | |
---|
| 1086 | vpn_base = vpn_min; |
---|
| 1087 | vpn_size = vpn_max - vpn_min + 1; |
---|
[1] | 1088 | } |
---|
| 1089 | |
---|
| 1090 | // check collisions |
---|
| 1091 | vseg = vmm_check_conflict( process , vpn_base , vpn_size ); |
---|
| 1092 | if( vseg != NULL ) |
---|
| 1093 | { |
---|
[21] | 1094 | printk("\n[ERROR] in %s for process %x : new vseg [vpn_base = %x / vpn_size = %x]\n" |
---|
[1] | 1095 | " overlap existing vseg [vpn_base = %x / vpn_size = %x]\n", |
---|
[407] | 1096 | __FUNCTION__ , process->pid, vpn_base, vpn_size, vseg->vpn_base, vseg->vpn_size ); |
---|
[1] | 1097 | return NULL; |
---|
| 1098 | } |
---|
| 1099 | |
---|
| 1100 | // allocate physical memory for vseg descriptor |
---|
| 1101 | vseg = vseg_alloc(); |
---|
| 1102 | if( vseg == NULL ) |
---|
| 1103 | { |
---|
| 1104 | printk("\n[ERROR] in %s for process %x : cannot allocate memory for vseg\n", |
---|
[407] | 1105 | __FUNCTION__ , process->pid ); |
---|
[1] | 1106 | return NULL; |
---|
| 1107 | } |
---|
| 1108 | |
---|
| 1109 | // initialize vseg descriptor |
---|
[407] | 1110 | vseg_init( vseg, |
---|
| 1111 | type, |
---|
| 1112 | base, |
---|
| 1113 | size, |
---|
| 1114 | vpn_base, |
---|
| 1115 | vpn_size, |
---|
| 1116 | file_offset, |
---|
| 1117 | file_size, |
---|
| 1118 | mapper_xp, |
---|
| 1119 | cxy ); |
---|
[1] | 1120 | |
---|
[408] | 1121 | // attach vseg to VSL |
---|
[611] | 1122 | vmm_attach_vseg_to_vsl( vmm , vseg ); |
---|
[1] | 1123 | |
---|
[438] | 1124 | #if DEBUG_VMM_CREATE_VSEG |
---|
[433] | 1125 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 1126 | if( DEBUG_VMM_CREATE_VSEG < cycle ) |
---|
[595] | 1127 | printk("\n[%s] thread[%x,%x] exit / %s / cxy %x / cycle %d\n", |
---|
| 1128 | __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), cxy, cycle ); |
---|
[433] | 1129 | #endif |
---|
[21] | 1130 | |
---|
[1] | 1131 | return vseg; |
---|
| 1132 | |
---|
[406] | 1133 | } // vmm_create_vseg() |
---|
| 1134 | |
---|
[611] | 1135 | /////////////////////////////////// |
---|
| 1136 | void vmm_delete_vseg( pid_t pid, |
---|
| 1137 | intptr_t vaddr ) |
---|
[1] | 1138 | { |
---|
[611] | 1139 | process_t * process; // local pointer on local process |
---|
| 1140 | vmm_t * vmm; // local pointer on local process VMM |
---|
| 1141 | vseg_t * vseg; // local pointer on local vseg containing vaddr |
---|
| 1142 | gpt_t * gpt; // local pointer on local process GPT |
---|
[21] | 1143 | vpn_t vpn; // VPN of current PTE |
---|
| 1144 | vpn_t vpn_min; // VPN of first PTE |
---|
[1] | 1145 | vpn_t vpn_max; // VPN of last PTE (excluded) |
---|
[409] | 1146 | ppn_t ppn; // current PTE ppn value |
---|
| 1147 | uint32_t attr; // current PTE attributes |
---|
| 1148 | kmem_req_t req; // request to release memory |
---|
| 1149 | xptr_t page_xp; // extended pointer on page descriptor |
---|
| 1150 | cxy_t page_cxy; // page descriptor cluster |
---|
| 1151 | page_t * page_ptr; // page descriptor pointer |
---|
[433] | 1152 | xptr_t forks_xp; // extended pointer on pending forks counter |
---|
[469] | 1153 | xptr_t lock_xp; // extended pointer on lock protecting forks counter |
---|
| 1154 | uint32_t forks; // actual number of pendinf forks |
---|
[1] | 1155 | |
---|
[611] | 1156 | #if DEBUG_VMM_DELETE_VSEG |
---|
[595] | 1157 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
| 1158 | thread_t * this = CURRENT_THREAD; |
---|
[611] | 1159 | if( DEBUG_VMM_DELETE_VSEG < cycle ) |
---|
| 1160 | printk("\n[%s] thread[%x,%x] enter / process %x / vaddr %x / cycle %d\n", |
---|
| 1161 | __FUNCTION__, this->process->pid, this->trdid, pid, vaddr, cycle ); |
---|
[433] | 1162 | #endif |
---|
[409] | 1163 | |
---|
[611] | 1164 | // get local pointer on local process descriptor |
---|
| 1165 | process = cluster_get_local_process_from_pid( pid ); |
---|
[1] | 1166 | |
---|
[611] | 1167 | if( process == NULL ) return; |
---|
| 1168 | |
---|
| 1169 | // get pointers on local process VMM an GPT |
---|
| 1170 | vmm = &process->vmm; |
---|
| 1171 | gpt = &process->vmm.gpt; |
---|
| 1172 | |
---|
| 1173 | // get local pointer on vseg containing vaddr |
---|
| 1174 | vseg = vmm_vseg_from_vaddr( vmm , vaddr ); |
---|
| 1175 | |
---|
| 1176 | if( vseg == NULL ) return; |
---|
| 1177 | |
---|
| 1178 | // loop to invalidate all vseg PTEs in GPT |
---|
[1] | 1179 | vpn_min = vseg->vpn_base; |
---|
| 1180 | vpn_max = vpn_min + vseg->vpn_size; |
---|
| 1181 | for( vpn = vpn_min ; vpn < vpn_max ; vpn++ ) |
---|
| 1182 | { |
---|
[409] | 1183 | // get GPT entry |
---|
[585] | 1184 | hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn ); |
---|
[409] | 1185 | |
---|
| 1186 | if( attr & GPT_MAPPED ) // entry is mapped |
---|
| 1187 | { |
---|
[437] | 1188 | |
---|
[611] | 1189 | #if( DEBUG_VMM_DELETE_VSEG & 1 ) |
---|
| 1190 | if( DEBUG_VMM_DELETE_VSEG < cycle ) |
---|
| 1191 | printk("- unmap vpn %x / ppn %x / vseg %s \n" , vpn , ppn, vseg_type_str(vseg->type) ); |
---|
[437] | 1192 | #endif |
---|
| 1193 | |
---|
[567] | 1194 | // check small page |
---|
[585] | 1195 | assert( (attr & GPT_SMALL) , "an user vseg must use small pages" ); |
---|
[409] | 1196 | |
---|
[585] | 1197 | // unmap GPT entry in local GPT |
---|
[409] | 1198 | hal_gpt_reset_pte( gpt , vpn ); |
---|
| 1199 | |
---|
[433] | 1200 | // handle pending forks counter if |
---|
| 1201 | // 1) not identity mapped |
---|
[567] | 1202 | // 2) reference cluster |
---|
[433] | 1203 | if( ((vseg->flags & VSEG_IDENT) == 0) && |
---|
| 1204 | (GET_CXY( process->ref_xp ) == local_cxy) ) |
---|
[409] | 1205 | { |
---|
[433] | 1206 | // get extended pointer on physical page descriptor |
---|
[409] | 1207 | page_xp = ppm_ppn2page( ppn ); |
---|
| 1208 | page_cxy = GET_CXY( page_xp ); |
---|
[433] | 1209 | page_ptr = GET_PTR( page_xp ); |
---|
[409] | 1210 | |
---|
[469] | 1211 | // get extended pointers on forks and lock fields |
---|
| 1212 | forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
| 1213 | lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
[433] | 1214 | |
---|
| 1215 | // get pending forks counter |
---|
[567] | 1216 | forks = hal_remote_l32( forks_xp ); |
---|
[433] | 1217 | |
---|
[469] | 1218 | if( forks ) // decrement pending forks counter |
---|
[409] | 1219 | { |
---|
[433] | 1220 | hal_remote_atomic_add( forks_xp , -1 ); |
---|
| 1221 | } |
---|
| 1222 | else // release physical page to relevant cluster |
---|
[409] | 1223 | { |
---|
[433] | 1224 | if( page_cxy == local_cxy ) // local cluster |
---|
| 1225 | { |
---|
| 1226 | req.type = KMEM_PAGE; |
---|
| 1227 | req.ptr = page_ptr; |
---|
| 1228 | kmem_free( &req ); |
---|
| 1229 | } |
---|
| 1230 | else // remote cluster |
---|
| 1231 | { |
---|
| 1232 | rpc_pmem_release_pages_client( page_cxy , page_ptr ); |
---|
| 1233 | } |
---|
[611] | 1234 | |
---|
| 1235 | #if( DEBUG_VMM_DELETE_VSEG & 1 ) |
---|
| 1236 | if( DEBUG_VMM_DELETE_VSEG < cycle ) |
---|
| 1237 | printk("- release ppn %x\n", ppn ); |
---|
| 1238 | #endif |
---|
[409] | 1239 | } |
---|
| 1240 | } |
---|
| 1241 | } |
---|
[1] | 1242 | } |
---|
[433] | 1243 | |
---|
[611] | 1244 | // remove vseg from VSL and release vseg descriptor (if not MMAP) |
---|
| 1245 | vmm_detach_vseg_from_vsl( vmm , vseg ); |
---|
| 1246 | |
---|
| 1247 | #if DEBUG_VMM_DELETE_VSEG |
---|
[433] | 1248 | cycle = (uint32_t)hal_get_cycles(); |
---|
[611] | 1249 | if( DEBUG_VMM_DELETE_VSEG < cycle ) |
---|
[595] | 1250 | printk("\n[%s] thread[%x,%x] exit / process %x / vseg %s / base %x / cycle %d\n", |
---|
[611] | 1251 | __FUNCTION__, this->process->pid, this->trdid, pid, vseg_type_str(vseg->type), vaddr, cycle ); |
---|
[433] | 1252 | #endif |
---|
| 1253 | |
---|
[611] | 1254 | } // end vmm_delete_vseg() |
---|
[1] | 1255 | |
---|
[611] | 1256 | ///////////////////////////////////////////// |
---|
| 1257 | vseg_t * vmm_vseg_from_vaddr( vmm_t * vmm, |
---|
| 1258 | intptr_t vaddr ) |
---|
[406] | 1259 | { |
---|
[408] | 1260 | xptr_t iter_xp; |
---|
| 1261 | xptr_t vseg_xp; |
---|
| 1262 | vseg_t * vseg; |
---|
[406] | 1263 | |
---|
[408] | 1264 | // get extended pointers on VSL lock and root |
---|
| 1265 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); |
---|
| 1266 | xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); |
---|
[406] | 1267 | |
---|
[408] | 1268 | // get lock protecting the VSL |
---|
[567] | 1269 | remote_rwlock_rd_acquire( lock_xp ); |
---|
[408] | 1270 | |
---|
| 1271 | // scan the list of vsegs in VSL |
---|
| 1272 | XLIST_FOREACH( root_xp , iter_xp ) |
---|
[406] | 1273 | { |
---|
[408] | 1274 | vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); |
---|
[433] | 1275 | vseg = GET_PTR( vseg_xp ); |
---|
[595] | 1276 | |
---|
[408] | 1277 | if( (vaddr >= vseg->min) && (vaddr < vseg->max) ) |
---|
[595] | 1278 | { |
---|
[408] | 1279 | // return success |
---|
[567] | 1280 | remote_rwlock_rd_release( lock_xp ); |
---|
[408] | 1281 | return vseg; |
---|
| 1282 | } |
---|
[406] | 1283 | } |
---|
| 1284 | |
---|
[408] | 1285 | // return failure |
---|
[567] | 1286 | remote_rwlock_rd_release( lock_xp ); |
---|
[408] | 1287 | return NULL; |
---|
[406] | 1288 | |
---|
[595] | 1289 | } // end vmm_vseg_from_vaddr() |
---|
[406] | 1290 | |
---|
[1] | 1291 | ///////////////////////////////////////////// |
---|
| 1292 | error_t vmm_resize_vseg( process_t * process, |
---|
| 1293 | intptr_t base, |
---|
| 1294 | intptr_t size ) |
---|
| 1295 | { |
---|
[406] | 1296 | error_t error; |
---|
| 1297 | vseg_t * new; |
---|
| 1298 | vpn_t vpn_min; |
---|
| 1299 | vpn_t vpn_max; |
---|
[1] | 1300 | |
---|
| 1301 | // get pointer on process VMM |
---|
| 1302 | vmm_t * vmm = &process->vmm; |
---|
| 1303 | |
---|
| 1304 | intptr_t addr_min = base; |
---|
| 1305 | intptr_t addr_max = base + size; |
---|
| 1306 | |
---|
| 1307 | // get pointer on vseg |
---|
[595] | 1308 | vseg_t * vseg = vmm_vseg_from_vaddr( vmm , base ); |
---|
[1] | 1309 | |
---|
| 1310 | if( vseg == NULL) return EINVAL; |
---|
[21] | 1311 | |
---|
[408] | 1312 | // get extended pointer on VSL lock |
---|
| 1313 | xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); |
---|
[21] | 1314 | |
---|
[408] | 1315 | // get lock protecting VSL |
---|
[567] | 1316 | remote_rwlock_wr_acquire( lock_xp ); |
---|
[408] | 1317 | |
---|
[611] | 1318 | if( (vseg->min > addr_min) || (vseg->max < addr_max) ) // not included in vseg |
---|
[1] | 1319 | { |
---|
[611] | 1320 | error = -1; |
---|
[1] | 1321 | } |
---|
[611] | 1322 | else if( (vseg->min == addr_min) && (vseg->max == addr_max) ) // vseg must be deleted |
---|
[1] | 1323 | { |
---|
[611] | 1324 | vmm_delete_vseg( process->pid , vseg->min ); |
---|
[1] | 1325 | error = 0; |
---|
| 1326 | } |
---|
[611] | 1327 | else if( vseg->min == addr_min ) // vseg must be resized |
---|
[1] | 1328 | { |
---|
[406] | 1329 | // update vseg base address |
---|
| 1330 | vseg->min = addr_max; |
---|
| 1331 | |
---|
| 1332 | // update vpn_base and vpn_size |
---|
| 1333 | vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1334 | vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1335 | vseg->vpn_base = vpn_min; |
---|
| 1336 | vseg->vpn_size = vpn_max - vpn_min + 1; |
---|
| 1337 | error = 0; |
---|
[1] | 1338 | } |
---|
[611] | 1339 | else if( vseg->max == addr_max ) // vseg must be resized |
---|
[1] | 1340 | { |
---|
[406] | 1341 | // update vseg max address |
---|
| 1342 | vseg->max = addr_min; |
---|
| 1343 | |
---|
| 1344 | // update vpn_base and vpn_size |
---|
| 1345 | vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1346 | vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1347 | vseg->vpn_base = vpn_min; |
---|
| 1348 | vseg->vpn_size = vpn_max - vpn_min + 1; |
---|
| 1349 | error = 0; |
---|
[1] | 1350 | } |
---|
[611] | 1351 | else // vseg cut in three regions |
---|
[1] | 1352 | { |
---|
[406] | 1353 | // resize existing vseg |
---|
| 1354 | vseg->max = addr_min; |
---|
| 1355 | |
---|
| 1356 | // update vpn_base and vpn_size |
---|
| 1357 | vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1358 | vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 1359 | vseg->vpn_base = vpn_min; |
---|
| 1360 | vseg->vpn_size = vpn_max - vpn_min + 1; |
---|
| 1361 | |
---|
| 1362 | // create new vseg |
---|
[407] | 1363 | new = vmm_create_vseg( process, |
---|
| 1364 | vseg->type, |
---|
| 1365 | addr_min, |
---|
| 1366 | (vseg->max - addr_max), |
---|
| 1367 | vseg->file_offset, |
---|
| 1368 | vseg->file_size, |
---|
| 1369 | vseg->mapper_xp, |
---|
| 1370 | vseg->cxy ); |
---|
| 1371 | |
---|
[406] | 1372 | if( new == NULL ) error = EINVAL; |
---|
| 1373 | else error = 0; |
---|
[1] | 1374 | } |
---|
| 1375 | |
---|
| 1376 | // release VMM lock |
---|
[567] | 1377 | remote_rwlock_wr_release( lock_xp ); |
---|
[1] | 1378 | |
---|
| 1379 | return error; |
---|
| 1380 | |
---|
[406] | 1381 | } // vmm_resize_vseg() |
---|
| 1382 | |
---|
[1] | 1383 | /////////////////////////////////////////// |
---|
[388] | 1384 | error_t vmm_get_vseg( process_t * process, |
---|
[394] | 1385 | intptr_t vaddr, |
---|
[388] | 1386 | vseg_t ** found_vseg ) |
---|
[1] | 1387 | { |
---|
[595] | 1388 | xptr_t vseg_xp; |
---|
| 1389 | vseg_t * vseg; |
---|
| 1390 | vmm_t * vmm; |
---|
| 1391 | error_t error; |
---|
[1] | 1392 | |
---|
[440] | 1393 | // get pointer on local VMM |
---|
| 1394 | vmm = &process->vmm; |
---|
[1] | 1395 | |
---|
[440] | 1396 | // try to get vseg from local VMM |
---|
[595] | 1397 | vseg = vmm_vseg_from_vaddr( vmm , vaddr ); |
---|
[440] | 1398 | |
---|
[388] | 1399 | if( vseg == NULL ) // vseg not found in local cluster => try to get it from ref |
---|
| 1400 | { |
---|
| 1401 | // get extended pointer on reference process |
---|
| 1402 | xptr_t ref_xp = process->ref_xp; |
---|
[1] | 1403 | |
---|
[388] | 1404 | // get cluster and local pointer on reference process |
---|
| 1405 | cxy_t ref_cxy = GET_CXY( ref_xp ); |
---|
[433] | 1406 | process_t * ref_ptr = GET_PTR( ref_xp ); |
---|
[388] | 1407 | |
---|
| 1408 | if( local_cxy == ref_cxy ) return -1; // local cluster is the reference |
---|
| 1409 | |
---|
| 1410 | // get extended pointer on reference vseg |
---|
[394] | 1411 | rpc_vmm_get_vseg_client( ref_cxy , ref_ptr , vaddr , &vseg_xp , &error ); |
---|
[388] | 1412 | |
---|
[440] | 1413 | if( error ) return -1; // vseg not found => illegal user vaddr |
---|
[388] | 1414 | |
---|
| 1415 | // allocate a vseg in local cluster |
---|
| 1416 | vseg = vseg_alloc(); |
---|
| 1417 | |
---|
[440] | 1418 | if( vseg == NULL ) return -1; // cannot allocate a local vseg |
---|
[388] | 1419 | |
---|
| 1420 | // initialise local vseg from reference |
---|
| 1421 | vseg_init_from_ref( vseg , vseg_xp ); |
---|
| 1422 | |
---|
[611] | 1423 | // register local vseg in local VSL |
---|
| 1424 | vmm_attach_vseg_to_vsl( vmm , vseg ); |
---|
[388] | 1425 | } |
---|
[595] | 1426 | |
---|
[388] | 1427 | // success |
---|
| 1428 | *found_vseg = vseg; |
---|
[394] | 1429 | return 0; |
---|
[388] | 1430 | |
---|
| 1431 | } // end vmm_get_vseg() |
---|
| 1432 | |
---|
[407] | 1433 | ////////////////////////////////////////////////////////////////////////////////////// |
---|
| 1434 | // This static function compute the target cluster to allocate a physical page |
---|
| 1435 | // for a given <vpn> in a given <vseg>, allocates the page (with an RPC if required) |
---|
| 1436 | // and returns an extended pointer on the allocated page descriptor. |
---|
[585] | 1437 | // It can be called by a thread running in any cluster. |
---|
[407] | 1438 | // The vseg cannot have the FILE type. |
---|
| 1439 | ////////////////////////////////////////////////////////////////////////////////////// |
---|
| 1440 | static xptr_t vmm_page_allocate( vseg_t * vseg, |
---|
| 1441 | vpn_t vpn ) |
---|
| 1442 | { |
---|
[433] | 1443 | |
---|
[438] | 1444 | #if DEBUG_VMM_ALLOCATE_PAGE |
---|
[595] | 1445 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
| 1446 | thread_t * this = CURRENT_THREAD; |
---|
[438] | 1447 | if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) |
---|
[595] | 1448 | printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", |
---|
| 1449 | __FUNCTION__ , this->process->pid, this->trdid, vpn, cycle ); |
---|
[433] | 1450 | #endif |
---|
| 1451 | |
---|
[407] | 1452 | page_t * page_ptr; |
---|
| 1453 | cxy_t page_cxy; |
---|
| 1454 | kmem_req_t req; |
---|
[577] | 1455 | uint32_t index; |
---|
[407] | 1456 | |
---|
[577] | 1457 | uint32_t type = vseg->type; |
---|
| 1458 | uint32_t flags = vseg->flags; |
---|
| 1459 | uint32_t x_size = LOCAL_CLUSTER->x_size; |
---|
| 1460 | uint32_t y_size = LOCAL_CLUSTER->y_size; |
---|
[407] | 1461 | |
---|
[567] | 1462 | // check vseg type |
---|
| 1463 | assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" ); |
---|
[407] | 1464 | |
---|
| 1465 | if( flags & VSEG_DISTRIB ) // distributed => cxy depends on vpn LSB |
---|
| 1466 | { |
---|
[577] | 1467 | index = vpn & ((x_size * y_size) - 1); |
---|
| 1468 | page_cxy = HAL_CXY_FROM_XY( (index / y_size) , (index % y_size) ); |
---|
[561] | 1469 | |
---|
[577] | 1470 | // If the cluster selected from VPN's LSBs is empty, we select one randomly |
---|
| 1471 | if ( cluster_is_active( page_cxy ) == false ) |
---|
| 1472 | { |
---|
| 1473 | page_cxy = cluster_random_select(); |
---|
[561] | 1474 | } |
---|
[407] | 1475 | } |
---|
| 1476 | else // other cases => cxy specified in vseg |
---|
| 1477 | { |
---|
[561] | 1478 | page_cxy = vseg->cxy; |
---|
[407] | 1479 | } |
---|
| 1480 | |
---|
| 1481 | // allocate a physical page from target cluster |
---|
| 1482 | if( page_cxy == local_cxy ) // target cluster is the local cluster |
---|
| 1483 | { |
---|
| 1484 | req.type = KMEM_PAGE; |
---|
| 1485 | req.size = 0; |
---|
| 1486 | req.flags = AF_NONE; |
---|
| 1487 | page_ptr = (page_t *)kmem_alloc( &req ); |
---|
| 1488 | } |
---|
| 1489 | else // target cluster is not the local cluster |
---|
| 1490 | { |
---|
| 1491 | rpc_pmem_get_pages_client( page_cxy , 0 , &page_ptr ); |
---|
| 1492 | } |
---|
| 1493 | |
---|
[438] | 1494 | #if DEBUG_VMM_ALLOCATE_PAGE |
---|
[595] | 1495 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 1496 | if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) |
---|
[595] | 1497 | printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n", |
---|
| 1498 | __FUNCTION__ , this->process->pid, this->trdid, vpn, |
---|
| 1499 | ppm_page2ppn( XPTR( page_cxy , page_ptr ) , cycle ); |
---|
[433] | 1500 | #endif |
---|
| 1501 | |
---|
[407] | 1502 | if( page_ptr == NULL ) return XPTR_NULL; |
---|
| 1503 | else return XPTR( page_cxy , page_ptr ); |
---|
| 1504 | |
---|
| 1505 | } // end vmm_page_allocate() |
---|
| 1506 | |
---|
[313] | 1507 | //////////////////////////////////////// |
---|
| 1508 | error_t vmm_get_one_ppn( vseg_t * vseg, |
---|
| 1509 | vpn_t vpn, |
---|
| 1510 | ppn_t * ppn ) |
---|
| 1511 | { |
---|
| 1512 | error_t error; |
---|
[407] | 1513 | xptr_t page_xp; // extended pointer on physical page descriptor |
---|
[606] | 1514 | uint32_t page_id; // missing page index in vseg mapper |
---|
[406] | 1515 | uint32_t type; // vseg type; |
---|
[313] | 1516 | |
---|
[406] | 1517 | type = vseg->type; |
---|
[606] | 1518 | page_id = vpn - vseg->vpn_base; |
---|
[313] | 1519 | |
---|
[438] | 1520 | #if DEBUG_VMM_GET_ONE_PPN |
---|
[595] | 1521 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
| 1522 | thread_t * this = CURRENT_THREAD; |
---|
| 1523 | if( DEBUG_VMM_GET_ONE_PPN < cycle ) |
---|
[606] | 1524 | printk("\n[%s] thread[%x,%x] enter for vpn %x / type %s / page_id %d / cycle %d\n", |
---|
| 1525 | __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle ); |
---|
[433] | 1526 | #endif |
---|
[313] | 1527 | |
---|
[406] | 1528 | // FILE type : get the physical page from the file mapper |
---|
[313] | 1529 | if( type == VSEG_TYPE_FILE ) |
---|
| 1530 | { |
---|
[406] | 1531 | // get extended pointer on mapper |
---|
[407] | 1532 | xptr_t mapper_xp = vseg->mapper_xp; |
---|
[313] | 1533 | |
---|
[567] | 1534 | assert( (mapper_xp != XPTR_NULL), |
---|
| 1535 | "mapper not defined for a FILE vseg\n" ); |
---|
[406] | 1536 | |
---|
[606] | 1537 | // get extended pointer on page descriptor |
---|
| 1538 | page_xp = mapper_remote_get_page( mapper_xp , page_id ); |
---|
[406] | 1539 | |
---|
[606] | 1540 | if ( page_xp == XPTR_NULL ) return EINVAL; |
---|
[313] | 1541 | } |
---|
| 1542 | |
---|
[406] | 1543 | // Other types : allocate a physical page from target cluster, |
---|
[407] | 1544 | // as defined by vseg type and vpn value |
---|
[313] | 1545 | else |
---|
| 1546 | { |
---|
[433] | 1547 | // allocate one physical page |
---|
[407] | 1548 | page_xp = vmm_page_allocate( vseg , vpn ); |
---|
[406] | 1549 | |
---|
[407] | 1550 | if( page_xp == XPTR_NULL ) return ENOMEM; |
---|
[313] | 1551 | |
---|
[406] | 1552 | // initialise missing page from .elf file mapper for DATA and CODE types |
---|
[440] | 1553 | // the vseg->mapper_xp field is an extended pointer on the .elf file mapper |
---|
[313] | 1554 | if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) ) |
---|
| 1555 | { |
---|
[406] | 1556 | // get extended pointer on mapper |
---|
| 1557 | xptr_t mapper_xp = vseg->mapper_xp; |
---|
[313] | 1558 | |
---|
[567] | 1559 | assert( (mapper_xp != XPTR_NULL), |
---|
| 1560 | "mapper not defined for a CODE or DATA vseg\n" ); |
---|
[406] | 1561 | |
---|
| 1562 | // compute missing page offset in vseg |
---|
[606] | 1563 | uint32_t offset = page_id << CONFIG_PPM_PAGE_SHIFT; |
---|
[406] | 1564 | |
---|
[313] | 1565 | // compute missing page offset in .elf file |
---|
[406] | 1566 | uint32_t elf_offset = vseg->file_offset + offset; |
---|
[313] | 1567 | |
---|
[438] | 1568 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
[469] | 1569 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
[595] | 1570 | printk("\n[%s] thread[%x,%x] for vpn = %x / elf_offset = %x\n", |
---|
| 1571 | __FUNCTION__, this->process->pid, this->trdid, vpn, elf_offset ); |
---|
[433] | 1572 | #endif |
---|
[406] | 1573 | // compute extended pointer on page base |
---|
[407] | 1574 | xptr_t base_xp = ppm_page2base( page_xp ); |
---|
[313] | 1575 | |
---|
[406] | 1576 | // file_size (in .elf mapper) can be smaller than vseg_size (BSS) |
---|
| 1577 | uint32_t file_size = vseg->file_size; |
---|
| 1578 | |
---|
| 1579 | if( file_size < offset ) // missing page fully in BSS |
---|
[313] | 1580 | { |
---|
[406] | 1581 | |
---|
[438] | 1582 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
[469] | 1583 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
[595] | 1584 | printk("\n[%s] thread[%x,%x] for vpn %x / fully in BSS\n", |
---|
| 1585 | __FUNCTION__, this->process->pid, this->trdid, vpn ); |
---|
[433] | 1586 | #endif |
---|
[407] | 1587 | if( GET_CXY( page_xp ) == local_cxy ) |
---|
[313] | 1588 | { |
---|
[315] | 1589 | memset( GET_PTR( base_xp ) , 0 , CONFIG_PPM_PAGE_SIZE ); |
---|
[313] | 1590 | } |
---|
| 1591 | else |
---|
| 1592 | { |
---|
[315] | 1593 | hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE ); |
---|
[313] | 1594 | } |
---|
| 1595 | } |
---|
[406] | 1596 | else if( file_size >= (offset + CONFIG_PPM_PAGE_SIZE) ) // fully in mapper |
---|
[315] | 1597 | { |
---|
[406] | 1598 | |
---|
[438] | 1599 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
[469] | 1600 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
[595] | 1601 | printk("\n[%s] thread[%x,%x] for vpn %x / fully in mapper\n", |
---|
| 1602 | __FUNCTION__, this->process->pid, this->trdid, vpn ); |
---|
[433] | 1603 | #endif |
---|
[606] | 1604 | error = mapper_move_kernel( mapper_xp, |
---|
| 1605 | true, // to_buffer |
---|
| 1606 | elf_offset, |
---|
| 1607 | base_xp, |
---|
| 1608 | CONFIG_PPM_PAGE_SIZE ); |
---|
[313] | 1609 | if( error ) return EINVAL; |
---|
| 1610 | } |
---|
[406] | 1611 | else // both in mapper and in BSS : |
---|
| 1612 | // - (file_size - offset) bytes from mapper |
---|
| 1613 | // - (page_size + offset - file_size) bytes from BSS |
---|
[313] | 1614 | { |
---|
[406] | 1615 | |
---|
[438] | 1616 | #if (DEBUG_VMM_GET_ONE_PPN & 0x1) |
---|
[469] | 1617 | if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) |
---|
[610] | 1618 | printk("\n[%s] thread[%x,%x] for vpn %x / both mapper & BSS\n" |
---|
[433] | 1619 | " %d bytes from mapper / %d bytes from BSS\n", |
---|
[595] | 1620 | __FUNCTION__, this->process->pid, this->trdid, vpn, |
---|
[407] | 1621 | file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size ); |
---|
[433] | 1622 | #endif |
---|
[313] | 1623 | // initialize mapper part |
---|
[606] | 1624 | error = mapper_move_kernel( mapper_xp, |
---|
| 1625 | true, // to buffer |
---|
| 1626 | elf_offset, |
---|
| 1627 | base_xp, |
---|
| 1628 | file_size - offset ); |
---|
[313] | 1629 | if( error ) return EINVAL; |
---|
| 1630 | |
---|
| 1631 | // initialize BSS part |
---|
[407] | 1632 | if( GET_CXY( page_xp ) == local_cxy ) |
---|
[313] | 1633 | { |
---|
[406] | 1634 | memset( GET_PTR( base_xp ) + file_size - offset , 0 , |
---|
| 1635 | offset + CONFIG_PPM_PAGE_SIZE - file_size ); |
---|
[313] | 1636 | } |
---|
| 1637 | else |
---|
| 1638 | { |
---|
[406] | 1639 | hal_remote_memset( base_xp + file_size - offset , 0 , |
---|
| 1640 | offset + CONFIG_PPM_PAGE_SIZE - file_size ); |
---|
[313] | 1641 | } |
---|
| 1642 | } |
---|
| 1643 | } // end initialisation for CODE or DATA types |
---|
| 1644 | } |
---|
| 1645 | |
---|
| 1646 | // return ppn |
---|
[407] | 1647 | *ppn = ppm_page2ppn( page_xp ); |
---|
[406] | 1648 | |
---|
[438] | 1649 | #if DEBUG_VMM_GET_ONE_PPN |
---|
[595] | 1650 | cycle = (uint32_t)hal_get_cycles(); |
---|
| 1651 | if( DEBUG_VMM_GET_ONE_PPN < cycle ) |
---|
| 1652 | printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle\n", |
---|
| 1653 | __FUNCTION__ , this->process->pid, this->trdid , vpn , *ppn, cycle ); |
---|
[433] | 1654 | #endif |
---|
[406] | 1655 | |
---|
[313] | 1656 | return 0; |
---|
| 1657 | |
---|
| 1658 | } // end vmm_get_one_ppn() |
---|
| 1659 | |
---|
[585] | 1660 | /////////////////////////////////////////////////// |
---|
| 1661 | error_t vmm_handle_page_fault( process_t * process, |
---|
| 1662 | vpn_t vpn ) |
---|
[1] | 1663 | { |
---|
[585] | 1664 | vseg_t * vseg; // vseg containing vpn |
---|
| 1665 | uint32_t new_attr; // new PTE_ATTR value |
---|
| 1666 | ppn_t new_ppn; // new PTE_PPN value |
---|
| 1667 | uint32_t ref_attr; // PTE_ATTR value in reference GPT |
---|
| 1668 | ppn_t ref_ppn; // PTE_PPN value in reference GPT |
---|
| 1669 | cxy_t ref_cxy; // reference cluster for missing vpn |
---|
| 1670 | process_t * ref_ptr; // reference process for missing vpn |
---|
| 1671 | xptr_t local_gpt_xp; // extended pointer on local GPT |
---|
| 1672 | xptr_t local_lock_xp; // extended pointer on local GPT lock |
---|
| 1673 | xptr_t ref_gpt_xp; // extended pointer on reference GPT |
---|
| 1674 | xptr_t ref_lock_xp; // extended pointer on reference GPT lock |
---|
| 1675 | error_t error; // value returned by called functions |
---|
[1] | 1676 | |
---|
[585] | 1677 | // get local vseg (access to reference VSL can be required) |
---|
| 1678 | error = vmm_get_vseg( process, |
---|
| 1679 | (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT, |
---|
| 1680 | &vseg ); |
---|
| 1681 | if( error ) |
---|
| 1682 | { |
---|
[595] | 1683 | printk("\n[ERROR] in %s : vpn %x in process %x not in a registered vseg\n", |
---|
[585] | 1684 | __FUNCTION__ , vpn , process->pid ); |
---|
| 1685 | |
---|
| 1686 | return EXCP_USER_ERROR; |
---|
| 1687 | } |
---|
| 1688 | |
---|
| 1689 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
| 1690 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[567] | 1691 | thread_t * this = CURRENT_THREAD; |
---|
[585] | 1692 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
[595] | 1693 | printk("\n[%s] threadr[%x,%x] enter for vpn %x / %s / cycle %d\n", |
---|
[585] | 1694 | __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(vseg->type), cycle ); |
---|
[433] | 1695 | #endif |
---|
[406] | 1696 | |
---|
[585] | 1697 | //////////////// private vseg => access only the local GPT |
---|
| 1698 | if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) |
---|
[438] | 1699 | { |
---|
[585] | 1700 | // build extended pointer on local GPT and local GPT lock |
---|
| 1701 | local_gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); |
---|
| 1702 | local_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); |
---|
[407] | 1703 | |
---|
[585] | 1704 | // take local GPT lock in write mode |
---|
| 1705 | remote_rwlock_wr_acquire( local_lock_xp ); |
---|
[407] | 1706 | |
---|
[585] | 1707 | // check VPN still unmapped in local GPT |
---|
[595] | 1708 | |
---|
[585] | 1709 | // do nothing if VPN has been mapped by a a concurrent page_fault |
---|
| 1710 | hal_gpt_get_pte( local_gpt_xp, |
---|
| 1711 | vpn, |
---|
| 1712 | &new_attr, |
---|
| 1713 | &new_ppn ); |
---|
[407] | 1714 | |
---|
[585] | 1715 | if( (new_attr & GPT_MAPPED) == 0 ) // VPN still unmapped |
---|
| 1716 | { |
---|
| 1717 | // allocate and initialise a physical page depending on the vseg type |
---|
| 1718 | error = vmm_get_one_ppn( vseg , vpn , &new_ppn ); |
---|
[407] | 1719 | |
---|
[585] | 1720 | if( error ) |
---|
[408] | 1721 | { |
---|
| 1722 | printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", |
---|
| 1723 | __FUNCTION__ , process->pid , vpn ); |
---|
[1] | 1724 | |
---|
[585] | 1725 | // release local GPT lock in write mode |
---|
| 1726 | remote_rwlock_wr_release( local_lock_xp ); |
---|
[406] | 1727 | |
---|
[585] | 1728 | return EXCP_KERNEL_PANIC; |
---|
[407] | 1729 | } |
---|
| 1730 | |
---|
[408] | 1731 | // define new_attr from vseg flags |
---|
[407] | 1732 | new_attr = GPT_MAPPED | GPT_SMALL; |
---|
| 1733 | if( vseg->flags & VSEG_USER ) new_attr |= GPT_USER; |
---|
| 1734 | if( vseg->flags & VSEG_WRITE ) new_attr |= GPT_WRITABLE; |
---|
| 1735 | if( vseg->flags & VSEG_EXEC ) new_attr |= GPT_EXECUTABLE; |
---|
| 1736 | if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE; |
---|
| 1737 | |
---|
[585] | 1738 | // set PTE (PPN & attribute) to local GPT |
---|
| 1739 | error = hal_gpt_set_pte( local_gpt_xp, |
---|
[408] | 1740 | vpn, |
---|
| 1741 | new_attr, |
---|
| 1742 | new_ppn ); |
---|
[585] | 1743 | if ( error ) |
---|
[407] | 1744 | { |
---|
[585] | 1745 | printk("\n[ERROR] in %s : cannot update local GPT / process %x / vpn = %x\n", |
---|
[407] | 1746 | __FUNCTION__ , process->pid , vpn ); |
---|
[585] | 1747 | |
---|
| 1748 | // release local GPT lock in write mode |
---|
| 1749 | remote_rwlock_wr_release( local_lock_xp ); |
---|
| 1750 | |
---|
| 1751 | return EXCP_KERNEL_PANIC; |
---|
[407] | 1752 | } |
---|
| 1753 | } |
---|
[585] | 1754 | |
---|
| 1755 | // release local GPT lock in write mode |
---|
| 1756 | remote_rwlock_wr_release( local_lock_xp ); |
---|
| 1757 | |
---|
| 1758 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
| 1759 | cycle = (uint32_t)hal_get_cycles(); |
---|
| 1760 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
[595] | 1761 | printk("\n[%s] private page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", |
---|
[585] | 1762 | __FUNCTION__, vpn, new_ppn, new_attr, cycle ); |
---|
| 1763 | #endif |
---|
| 1764 | return EXCP_NON_FATAL; |
---|
| 1765 | |
---|
| 1766 | } // end local GPT access |
---|
| 1767 | |
---|
| 1768 | //////////// public vseg => access reference GPT |
---|
| 1769 | else |
---|
| 1770 | { |
---|
| 1771 | // get reference process cluster and local pointer |
---|
| 1772 | ref_cxy = GET_CXY( process->ref_xp ); |
---|
| 1773 | ref_ptr = GET_PTR( process->ref_xp ); |
---|
| 1774 | |
---|
| 1775 | // build extended pointer on reference GPT and reference GPT lock |
---|
| 1776 | ref_gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt ); |
---|
| 1777 | ref_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt_lock ); |
---|
| 1778 | |
---|
| 1779 | // build extended pointer on local GPT and local GPT lock |
---|
| 1780 | local_gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); |
---|
| 1781 | local_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); |
---|
| 1782 | |
---|
| 1783 | // take reference GPT lock in read mode |
---|
| 1784 | remote_rwlock_rd_acquire( ref_lock_xp ); |
---|
| 1785 | |
---|
| 1786 | // get directly PPN & attributes from reference GPT |
---|
| 1787 | // this can avoids a costly RPC for a false page fault |
---|
| 1788 | hal_gpt_get_pte( ref_gpt_xp, |
---|
| 1789 | vpn, |
---|
| 1790 | &ref_attr, |
---|
| 1791 | &ref_ppn ); |
---|
| 1792 | |
---|
| 1793 | // release reference GPT lock in read mode |
---|
| 1794 | remote_rwlock_rd_release( ref_lock_xp ); |
---|
| 1795 | |
---|
| 1796 | if( ref_attr & GPT_MAPPED ) // false page fault => update local GPT |
---|
[1] | 1797 | { |
---|
[585] | 1798 | // take local GPT lock in write mode |
---|
| 1799 | remote_rwlock_wr_acquire( local_lock_xp ); |
---|
| 1800 | |
---|
| 1801 | // check VPN still unmapped in local GPT |
---|
| 1802 | hal_gpt_get_pte( local_gpt_xp, |
---|
| 1803 | vpn, |
---|
| 1804 | &new_attr, |
---|
| 1805 | &new_ppn ); |
---|
[1] | 1806 | |
---|
[585] | 1807 | if( (new_attr & GPT_MAPPED) == 0 ) // VPN still unmapped |
---|
| 1808 | { |
---|
| 1809 | // update local GPT from reference GPT |
---|
| 1810 | error = hal_gpt_set_pte( local_gpt_xp, |
---|
| 1811 | vpn, |
---|
| 1812 | ref_attr, |
---|
| 1813 | ref_ppn ); |
---|
| 1814 | if( error ) |
---|
| 1815 | { |
---|
[595] | 1816 | printk("\n[ERROR] in %s : cannot update local GPT / process %x / vpn %x\n", |
---|
[585] | 1817 | __FUNCTION__ , process->pid , vpn ); |
---|
| 1818 | |
---|
| 1819 | // release local GPT lock in write mode |
---|
| 1820 | remote_rwlock_wr_release( local_lock_xp ); |
---|
| 1821 | |
---|
| 1822 | return EXCP_KERNEL_PANIC; |
---|
| 1823 | } |
---|
| 1824 | } |
---|
| 1825 | else // VPN has been mapped by a a concurrent page_fault |
---|
| 1826 | { |
---|
| 1827 | // keep PTE from local GPT |
---|
| 1828 | ref_attr = new_attr; |
---|
| 1829 | ref_ppn = new_ppn; |
---|
| 1830 | } |
---|
| 1831 | |
---|
| 1832 | // release local GPT lock in write mode |
---|
| 1833 | remote_rwlock_wr_release( local_lock_xp ); |
---|
| 1834 | |
---|
| 1835 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
[433] | 1836 | cycle = (uint32_t)hal_get_cycles(); |
---|
[585] | 1837 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
[595] | 1838 | printk("\n[%s] false page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", |
---|
[585] | 1839 | __FUNCTION__, vpn, ref_ppn, ref_attr, cycle ); |
---|
[433] | 1840 | #endif |
---|
[585] | 1841 | return EXCP_NON_FATAL; |
---|
| 1842 | } |
---|
| 1843 | else // true page fault => update reference GPT |
---|
| 1844 | { |
---|
| 1845 | // take reference GPT lock in write mode |
---|
| 1846 | remote_rwlock_wr_acquire( ref_lock_xp ); |
---|
| 1847 | |
---|
| 1848 | // check VPN still unmapped in reference GPT |
---|
| 1849 | // do nothing if VPN has been mapped by a a concurrent page_fault |
---|
| 1850 | hal_gpt_get_pte( ref_gpt_xp, |
---|
| 1851 | vpn, |
---|
| 1852 | &ref_attr, |
---|
| 1853 | &ref_ppn ); |
---|
[406] | 1854 | |
---|
[585] | 1855 | if( (ref_attr & GPT_MAPPED) == 0 ) // VPN actually unmapped |
---|
| 1856 | { |
---|
| 1857 | // allocate and initialise a physical page depending on the vseg type |
---|
| 1858 | error = vmm_get_one_ppn( vseg , vpn , &new_ppn ); |
---|
[1] | 1859 | |
---|
[585] | 1860 | if( error ) |
---|
| 1861 | { |
---|
| 1862 | printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", |
---|
| 1863 | __FUNCTION__ , process->pid , vpn ); |
---|
[313] | 1864 | |
---|
[585] | 1865 | // release reference GPT lock in write mode |
---|
| 1866 | remote_rwlock_wr_release( ref_lock_xp ); |
---|
| 1867 | |
---|
| 1868 | return EXCP_KERNEL_PANIC; |
---|
| 1869 | } |
---|
[1] | 1870 | |
---|
[585] | 1871 | // define new_attr from vseg flags |
---|
| 1872 | new_attr = GPT_MAPPED | GPT_SMALL; |
---|
| 1873 | if( vseg->flags & VSEG_USER ) new_attr |= GPT_USER; |
---|
| 1874 | if( vseg->flags & VSEG_WRITE ) new_attr |= GPT_WRITABLE; |
---|
| 1875 | if( vseg->flags & VSEG_EXEC ) new_attr |= GPT_EXECUTABLE; |
---|
| 1876 | if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE; |
---|
[440] | 1877 | |
---|
[585] | 1878 | // update reference GPT |
---|
| 1879 | error = hal_gpt_set_pte( ref_gpt_xp, |
---|
| 1880 | vpn, |
---|
| 1881 | new_attr, |
---|
| 1882 | new_ppn ); |
---|
| 1883 | |
---|
| 1884 | // update local GPT (protected by reference GPT lock) |
---|
| 1885 | error |= hal_gpt_set_pte( local_gpt_xp, |
---|
| 1886 | vpn, |
---|
| 1887 | new_attr, |
---|
| 1888 | new_ppn ); |
---|
| 1889 | |
---|
| 1890 | if( error ) |
---|
| 1891 | { |
---|
| 1892 | printk("\n[ERROR] in %s : cannot update GPT / process %x / vpn = %x\n", |
---|
| 1893 | __FUNCTION__ , process->pid , vpn ); |
---|
| 1894 | |
---|
| 1895 | // release reference GPT lock in write mode |
---|
| 1896 | remote_rwlock_wr_release( ref_lock_xp ); |
---|
| 1897 | |
---|
| 1898 | return EXCP_KERNEL_PANIC; |
---|
| 1899 | } |
---|
| 1900 | } |
---|
| 1901 | |
---|
| 1902 | // release reference GPT lock in write mode |
---|
| 1903 | remote_rwlock_wr_release( ref_lock_xp ); |
---|
| 1904 | |
---|
[440] | 1905 | #if DEBUG_VMM_HANDLE_PAGE_FAULT |
---|
[585] | 1906 | cycle = (uint32_t)hal_get_cycles(); |
---|
[469] | 1907 | if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) |
---|
[595] | 1908 | printk("\n[%s] true page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", |
---|
[585] | 1909 | __FUNCTION__, vpn, new_ppn, new_attr, cycle ); |
---|
[435] | 1910 | #endif |
---|
[585] | 1911 | return EXCP_NON_FATAL; |
---|
| 1912 | } |
---|
| 1913 | } |
---|
| 1914 | } // end vmm_handle_page_fault() |
---|
[435] | 1915 | |
---|
[585] | 1916 | //////////////////////////////////////////// |
---|
| 1917 | error_t vmm_handle_cow( process_t * process, |
---|
| 1918 | vpn_t vpn ) |
---|
| 1919 | { |
---|
| 1920 | vseg_t * vseg; // vseg containing vpn |
---|
| 1921 | cxy_t ref_cxy; // reference cluster for missing vpn |
---|
| 1922 | process_t * ref_ptr; // reference process for missing vpn |
---|
| 1923 | xptr_t gpt_xp; // extended pointer on GPT |
---|
| 1924 | xptr_t gpt_lock_xp; // extended pointer on GPT lock |
---|
| 1925 | uint32_t old_attr; // current PTE_ATTR value |
---|
| 1926 | ppn_t old_ppn; // current PTE_PPN value |
---|
| 1927 | uint32_t new_attr; // new PTE_ATTR value |
---|
| 1928 | ppn_t new_ppn; // new PTE_PPN value |
---|
| 1929 | error_t error; |
---|
[1] | 1930 | |
---|
[585] | 1931 | #if DEBUG_VMM_HANDLE_COW |
---|
| 1932 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
| 1933 | thread_t * this = CURRENT_THREAD; |
---|
| 1934 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
[595] | 1935 | printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n", |
---|
[585] | 1936 | __FUNCTION__, process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); |
---|
| 1937 | #endif |
---|
| 1938 | |
---|
[610] | 1939 | // access local GPT to get GPT_COW flag |
---|
| 1940 | bool_t cow = hal_gpt_pte_is_cow( &(process->vmm.gpt), vpn ); |
---|
| 1941 | |
---|
| 1942 | if( cow == false ) return EXCP_USER_ERROR; |
---|
| 1943 | |
---|
[585] | 1944 | // get local vseg |
---|
| 1945 | error = vmm_get_vseg( process, |
---|
| 1946 | (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT, |
---|
| 1947 | &vseg ); |
---|
[440] | 1948 | if( error ) |
---|
[1] | 1949 | { |
---|
[595] | 1950 | printk("\n[PANIC] in %s : vpn %x in process %x not in a registered vseg\n", |
---|
[585] | 1951 | __FUNCTION__, vpn, process->pid ); |
---|
| 1952 | |
---|
| 1953 | return EXCP_KERNEL_PANIC; |
---|
[440] | 1954 | } |
---|
[407] | 1955 | |
---|
[585] | 1956 | // get reference GPT cluster and local pointer |
---|
| 1957 | ref_cxy = GET_CXY( process->ref_xp ); |
---|
| 1958 | ref_ptr = GET_PTR( process->ref_xp ); |
---|
[407] | 1959 | |
---|
[610] | 1960 | // build relevant extended pointers on relevant GPT and GPT lock |
---|
[585] | 1961 | // - access local GPT for a private vseg |
---|
| 1962 | // - access reference GPT for a public vseg |
---|
| 1963 | if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) |
---|
[440] | 1964 | { |
---|
[585] | 1965 | gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); |
---|
| 1966 | gpt_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); |
---|
[1] | 1967 | } |
---|
[440] | 1968 | else |
---|
[1] | 1969 | { |
---|
[585] | 1970 | gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt ); |
---|
| 1971 | gpt_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt_lock ); |
---|
[1] | 1972 | } |
---|
| 1973 | |
---|
[585] | 1974 | // take GPT lock in write mode |
---|
| 1975 | remote_rwlock_wr_acquire( gpt_lock_xp ); |
---|
[441] | 1976 | |
---|
[585] | 1977 | // get current PTE from reference GPT |
---|
| 1978 | hal_gpt_get_pte( gpt_xp, |
---|
| 1979 | vpn, |
---|
| 1980 | &old_attr, |
---|
| 1981 | &old_ppn ); |
---|
[441] | 1982 | |
---|
[585] | 1983 | // the PTE must be mapped for a COW |
---|
| 1984 | if( (old_attr & GPT_MAPPED) == 0 ) |
---|
| 1985 | { |
---|
| 1986 | printk("\n[PANIC] in %s : VPN %x in process %x unmapped\n", |
---|
| 1987 | __FUNCTION__, vpn, process->pid ); |
---|
[407] | 1988 | |
---|
[585] | 1989 | // release GPT lock in write mode |
---|
| 1990 | remote_rwlock_wr_acquire( gpt_lock_xp ); |
---|
[407] | 1991 | |
---|
[585] | 1992 | return EXCP_KERNEL_PANIC; |
---|
[407] | 1993 | } |
---|
| 1994 | |
---|
[585] | 1995 | // get extended pointer, cluster and local pointer on physical page descriptor |
---|
| 1996 | xptr_t page_xp = ppm_ppn2page( old_ppn ); |
---|
| 1997 | cxy_t page_cxy = GET_CXY( page_xp ); |
---|
| 1998 | page_t * page_ptr = GET_PTR( page_xp ); |
---|
[435] | 1999 | |
---|
[585] | 2000 | // get extended pointers on forks and lock field in page descriptor |
---|
| 2001 | xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); |
---|
| 2002 | xptr_t forks_lock_xp = XPTR( page_cxy , &page_ptr->lock ); |
---|
[407] | 2003 | |
---|
[585] | 2004 | // take lock protecting "forks" counter |
---|
| 2005 | remote_busylock_acquire( forks_lock_xp ); |
---|
[407] | 2006 | |
---|
[585] | 2007 | // get number of pending forks from page descriptor |
---|
| 2008 | uint32_t forks = hal_remote_l32( forks_xp ); |
---|
[441] | 2009 | |
---|
[585] | 2010 | if( forks ) // pending fork => allocate a new page, and copy old to new |
---|
| 2011 | { |
---|
| 2012 | // allocate a new physical page |
---|
| 2013 | page_xp = vmm_page_allocate( vseg , vpn ); |
---|
| 2014 | if( page_xp == XPTR_NULL ) |
---|
| 2015 | { |
---|
| 2016 | printk("\n[PANIC] in %s : no memory for vpn %x in process %x\n", |
---|
| 2017 | __FUNCTION__ , vpn, process->pid ); |
---|
[441] | 2018 | |
---|
[585] | 2019 | // release GPT lock in write mode |
---|
| 2020 | remote_rwlock_wr_acquire( gpt_lock_xp ); |
---|
[441] | 2021 | |
---|
[585] | 2022 | // release lock protecting "forks" counter |
---|
| 2023 | remote_busylock_release( forks_lock_xp ); |
---|
[441] | 2024 | |
---|
[585] | 2025 | return EXCP_KERNEL_PANIC; |
---|
| 2026 | } |
---|
[441] | 2027 | |
---|
[585] | 2028 | // compute allocated page PPN |
---|
| 2029 | new_ppn = ppm_page2ppn( page_xp ); |
---|
[441] | 2030 | |
---|
[585] | 2031 | // copy old page content to new page |
---|
| 2032 | xptr_t old_base_xp = ppm_ppn2base( old_ppn ); |
---|
| 2033 | xptr_t new_base_xp = ppm_ppn2base( new_ppn ); |
---|
| 2034 | memcpy( GET_PTR( new_base_xp ), |
---|
| 2035 | GET_PTR( old_base_xp ), |
---|
| 2036 | CONFIG_PPM_PAGE_SIZE ); |
---|
[441] | 2037 | |
---|
[585] | 2038 | // decrement pending forks counter in page descriptor |
---|
| 2039 | hal_remote_atomic_add( forks_xp , -1 ); |
---|
[441] | 2040 | |
---|
[585] | 2041 | #if(DEBUG_VMM_HANDLE_COW & 1) |
---|
| 2042 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
[595] | 2043 | printk("\n[%s] thread[%x,%x] : pending forks => allocate a new PPN %x\n", |
---|
[585] | 2044 | __FUNCTION__, process->pid, this->trdid, new_ppn ); |
---|
| 2045 | #endif |
---|
[440] | 2046 | |
---|
[585] | 2047 | } |
---|
| 2048 | else // no pending fork => keep the existing page |
---|
| 2049 | { |
---|
[1] | 2050 | |
---|
[585] | 2051 | #if(DEBUG_VMM_HANDLE_COW & 1) |
---|
| 2052 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
[595] | 2053 | printk("\n[%s] thread[%x,%x] no pending forks => keep existing PPN %x\n", |
---|
[585] | 2054 | __FUNCTION__, process->pid, this->trdid, new_ppn ); |
---|
| 2055 | #endif |
---|
| 2056 | new_ppn = old_ppn; |
---|
| 2057 | } |
---|
[1] | 2058 | |
---|
[585] | 2059 | // release lock protecting "forks" counter |
---|
| 2060 | remote_busylock_release( forks_lock_xp ); |
---|
| 2061 | |
---|
| 2062 | // build new_attr : reset COW and set WRITABLE, |
---|
| 2063 | new_attr = (old_attr | GPT_WRITABLE) & (~GPT_COW); |
---|
| 2064 | |
---|
| 2065 | // update the relevan GPT |
---|
| 2066 | // - private vseg => update local GPT |
---|
| 2067 | // - public vseg => update all GPT copies |
---|
| 2068 | if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) |
---|
[1] | 2069 | { |
---|
[585] | 2070 | hal_gpt_set_pte( gpt_xp, |
---|
| 2071 | vpn, |
---|
| 2072 | new_attr, |
---|
| 2073 | new_ppn ); |
---|
[1] | 2074 | } |
---|
[585] | 2075 | else |
---|
[1] | 2076 | { |
---|
[585] | 2077 | if( ref_cxy == local_cxy ) // reference cluster is local |
---|
| 2078 | { |
---|
| 2079 | vmm_global_update_pte( process, |
---|
| 2080 | vpn, |
---|
| 2081 | new_attr, |
---|
| 2082 | new_ppn ); |
---|
| 2083 | } |
---|
| 2084 | else // reference cluster is remote |
---|
| 2085 | { |
---|
| 2086 | rpc_vmm_global_update_pte_client( ref_cxy, |
---|
| 2087 | ref_ptr, |
---|
| 2088 | vpn, |
---|
| 2089 | new_attr, |
---|
| 2090 | new_ppn ); |
---|
| 2091 | } |
---|
[1] | 2092 | } |
---|
| 2093 | |
---|
[585] | 2094 | // release GPT lock in write mode |
---|
| 2095 | remote_rwlock_wr_release( gpt_lock_xp ); |
---|
[21] | 2096 | |
---|
[585] | 2097 | #if DEBUG_VMM_HANDLE_COW |
---|
| 2098 | cycle = (uint32_t)hal_get_cycles(); |
---|
| 2099 | if( DEBUG_VMM_HANDLE_COW < cycle ) |
---|
[595] | 2100 | printk("\n[%s] thread[%x,%x] exit for vpn %x / core[%x,%d] / cycle %d\n", |
---|
[585] | 2101 | __FUNCTION__, process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); |
---|
| 2102 | #endif |
---|
[313] | 2103 | |
---|
[585] | 2104 | return EXCP_NON_FATAL; |
---|
[1] | 2105 | |
---|
[585] | 2106 | } // end vmm_handle_cow() |
---|
| 2107 | |
---|