Changeset 625 for trunk/kernel/mm/vmm.c
- Timestamp:
- Apr 10, 2019, 10:09:39 AM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/vmm.c
r624 r625 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018 )6 * Alain Greiner (2016,2017,2018,2019) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 55 55 extern process_t process_zero; // allocated in cluster.c 56 56 57 /////////////////////////////////////// 58 error_t vmm_init( process_t * process ) 57 //////////////////////////////////////////////////////////////////////////////////////////// 58 // This static function is called by the vmm_create_vseg() function, and implements 59 // the VMM STACK specific allocator. 60 //////////////////////////////////////////////////////////////////////////////////////////// 61 // @ vmm : [in] pointer on VMM. 62 // @ ltid : [in] requested slot == local user thread identifier. 63 // @ vpn_base : [out] first allocated page 64 // @ vpn_size : [out] number of allocated pages 65 //////////////////////////////////////////////////////////////////////////////////////////// 66 static void vmm_stack_alloc( vmm_t * vmm, 67 ltid_t ltid, 68 vpn_t * vpn_base, 69 vpn_t * vpn_size ) 59 70 { 60 error_t error; 71 72 // check ltid argument 73 assert( (ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)), 74 "slot index %d too large for an user stack vseg", ltid ); 75 76 // get stack allocator pointer 77 stack_mgr_t * mgr = &vmm->stack_mgr; 78 79 // get lock on stack allocator 80 busylock_acquire( &mgr->lock ); 81 82 // check requested slot is available 83 assert( (bitmap_state( &mgr->bitmap , ltid ) == false), 84 "slot index %d already allocated", ltid ); 85 86 // update bitmap 87 bitmap_set( &mgr->bitmap , ltid ); 88 89 // release lock on stack allocator 90 busylock_release( &mgr->lock ); 91 92 // returns vpn_base, vpn_size (first page non allocated) 93 *vpn_base = mgr->vpn_base + ltid * CONFIG_VMM_STACK_SIZE + 1; 94 *vpn_size = CONFIG_VMM_STACK_SIZE - 1; 95 96 } // end vmm_stack_alloc() 97 98 //////////////////////////////////////////////////////////////////////////////////////////// 99 // This static function is called by the vmm_remove_vseg() function, and implements 100 // the VMM STACK specific desallocator. 101 //////////////////////////////////////////////////////////////////////////////////////////// 102 // @ vmm : [in] pointer on VMM. 103 // @ vseg : [in] pointer on released vseg. 104 //////////////////////////////////////////////////////////////////////////////////////////// 105 static void vmm_stack_free( vmm_t * vmm, 106 vseg_t * vseg ) 107 { 108 // get stack allocator pointer 109 stack_mgr_t * mgr = &vmm->stack_mgr; 110 111 // compute slot index 112 uint32_t index = (vseg->vpn_base - 1 - mgr->vpn_base) / CONFIG_VMM_STACK_SIZE; 113 114 // check index 115 assert( (index <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)), 116 "slot index %d too large for an user stack vseg", index ); 117 118 // check released slot is allocated 119 assert( (bitmap_state( &mgr->bitmap , index ) == true), 120 "released slot index %d non allocated", index ); 121 122 // get lock on stack allocator 123 busylock_acquire( &mgr->lock ); 124 125 // update stacks_bitmap 126 bitmap_clear( &mgr->bitmap , index ); 127 128 // release lock on stack allocator 129 busylock_release( &mgr->lock ); 130 131 } // end vmm_stack_free() 132 133 //////////////////////////////////////////////////////////////////////////////////////////// 134 // This static function is called by the vmm_create_vseg() function, and implements 135 // the VMM MMAP specific allocator. 136 //////////////////////////////////////////////////////////////////////////////////////////// 137 // @ vmm : [in] pointer on VMM. 138 // @ npages : [in] requested number of pages. 139 // @ vpn_base : [out] first allocated page. 140 // @ vpn_size : [out] actual number of allocated pages. 141 //////////////////////////////////////////////////////////////////////////////////////////// 142 static error_t vmm_mmap_alloc( vmm_t * vmm, 143 vpn_t npages, 144 vpn_t * vpn_base, 145 vpn_t * vpn_size ) 146 { 147 uint32_t order; 148 xptr_t vseg_xp; 149 vseg_t * vseg; 150 vpn_t base; 151 vpn_t size; 152 vpn_t free; 153 154 #if DEBUG_VMM_MMAP_ALLOC 155 thread_t * this = CURRENT_THREAD; 156 uint32_t cycle = (uint32_t)hal_get_cycles(); 157 if( DEBUG_VMM_MMAP_ALLOC < cycle ) 158 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 159 __FUNCTION__, this->process->pid, this->trdid, cycle ); 160 #endif 161 162 // number of allocated pages must be power of 2 163 // compute actual size and order 164 size = POW2_ROUNDUP( npages ); 165 order = bits_log2( size ); 166 167 // get mmap allocator pointer 168 mmap_mgr_t * mgr = &vmm->mmap_mgr; 169 170 // build extended pointer on root of zombi_list[order] 171 xptr_t root_xp = XPTR( local_cxy , &mgr->zombi_list[order] ); 172 173 // take lock protecting zombi_lists 174 busylock_acquire( &mgr->lock ); 175 176 // get vseg from zombi_list or from mmap zone 177 if( xlist_is_empty( root_xp ) ) // from mmap zone 178 { 179 // check overflow 180 free = mgr->first_free_vpn; 181 if( (free + size) > mgr->vpn_size ) return -1; 182 183 // update MMAP allocator 184 mgr->first_free_vpn += size; 185 186 // compute base 187 base = free; 188 } 189 else // from zombi_list 190 { 191 // get pointer on zombi vseg from zombi_list 192 vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); 193 vseg = GET_PTR( vseg_xp ); 194 195 // remove vseg from free-list 196 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 197 198 // compute base 199 base = vseg->vpn_base; 200 } 201 202 // release lock 203 busylock_release( &mgr->lock ); 204 205 #if DEBUG_VMM_MMAP_ALLOC 206 cycle = (uint32_t)hal_get_cycles(); 207 if( DEBUG_VMM_DESTROY < cycle ) 208 printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n", 209 __FUNCTION__, this->process->pid, this->trdid, base, size, cycle ); 210 #endif 211 212 // returns vpn_base, vpn_size 213 *vpn_base = base; 214 *vpn_size = size; 215 return 0; 216 217 } // end vmm_mmap_alloc() 218 219 //////////////////////////////////////////////////////////////////////////////////////////// 220 // This static function is called by the vmm_remove_vseg() function, and implements 221 // the VMM MMAP specific desallocator. 222 //////////////////////////////////////////////////////////////////////////////////////////// 223 // @ vmm : [in] pointer on VMM. 224 // @ vseg : [in] pointer on released vseg. 225 //////////////////////////////////////////////////////////////////////////////////////////// 226 static void vmm_mmap_free( vmm_t * vmm, 227 vseg_t * vseg ) 228 { 229 // get pointer on mmap allocator 230 mmap_mgr_t * mgr = &vmm->mmap_mgr; 231 232 // compute zombi_list order 233 uint32_t order = bits_log2( vseg->vpn_size ); 234 235 // take lock protecting zombi lists 236 busylock_acquire( &mgr->lock ); 237 238 // update relevant zombi_list 239 xlist_add_first( XPTR( local_cxy , &mgr->zombi_list[order] ), 240 XPTR( local_cxy , &vseg->xlist ) ); 241 242 // release lock 243 busylock_release( &mgr->lock ); 244 245 } // end of vmm_mmap_free() 246 247 //////////////////////////////////////////////////////////////////////////////////////////// 248 // This static function registers one vseg in the VSL of a local process descriptor. 249 //////////////////////////////////////////////////////////////////////////////////////////// 250 // vmm : [in] pointer on VMM. 251 // vseg : [in] pointer on vseg. 252 //////////////////////////////////////////////////////////////////////////////////////////// 253 void vmm_attach_vseg_to_vsl( vmm_t * vmm, 254 vseg_t * vseg ) 255 { 256 // update vseg descriptor 257 vseg->vmm = vmm; 258 259 // increment vsegs number 260 vmm->vsegs_nr++; 261 262 // add vseg in vmm list 263 xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ), 264 XPTR( local_cxy , &vseg->xlist ) ); 265 266 } // end vmm_attach_vseg_from_vsl() 267 268 //////////////////////////////////////////////////////////////////////////////////////////// 269 // This static function removes one vseg from the VSL of a local process descriptor. 270 //////////////////////////////////////////////////////////////////////////////////////////// 271 // vmm : [in] pointer on VMM. 272 // vseg : [in] pointer on vseg. 273 //////////////////////////////////////////////////////////////////////////////////////////// 274 void vmm_detach_vseg_from_vsl( vmm_t * vmm, 275 vseg_t * vseg ) 276 { 277 // update vseg descriptor 278 vseg->vmm = NULL; 279 280 // decrement vsegs number 281 vmm->vsegs_nr--; 282 283 // remove vseg from VSL 284 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 285 286 } // end vmm_detach_from_vsl() 287 288 289 290 291 //////////////////////////////////////////// 292 error_t vmm_user_init( process_t * process ) 293 { 61 294 vseg_t * vseg_args; 62 295 vseg_t * vseg_envs; … … 65 298 uint32_t i; 66 299 67 #if DEBUG_VMM_ INIT300 #if DEBUG_VMM_USER_INIT 68 301 thread_t * this = CURRENT_THREAD; 69 302 uint32_t cycle = (uint32_t)hal_get_cycles(); 70 if( DEBUG_VMM_ INIT )303 if( DEBUG_VMM_USER_INIT ) 71 304 printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", 72 305 __FUNCTION__ , this->process->pid, this->trdid, process->pid, local_cxy, cycle ); … … 76 309 vmm_t * vmm = &process->vmm; 77 310 78 // initialize VSL (empty) 79 vmm->vsegs_nr = 0; 80 xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) ); 81 remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) , LOCK_VMM_VSL ); 82 311 // check UTILS zone 83 312 assert( ((CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) <= 84 313 (CONFIG_VMM_ELF_BASE - CONFIG_VMM_UTILS_BASE)) , 85 314 "UTILS zone too small\n" ); 86 315 316 // check STACK zone 87 317 assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <= 88 318 (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , 89 319 "STACK zone too small\n"); 90 320 91 // register argsvseg in VSL321 // register "args" vseg in VSL 92 322 base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_SHIFT; 93 323 size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT; … … 101 331 XPTR_NULL, // mapper_xp unused 102 332 local_cxy ); 103 104 333 if( vseg_args == NULL ) 105 334 { … … 110 339 vmm->args_vpn_base = base; 111 340 112 // register the envsvseg in VSL341 // register "envs" vseg in VSL 113 342 base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_SHIFT; 114 343 size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT; … … 122 351 XPTR_NULL, // mapper_xp unused 123 352 local_cxy ); 124 125 353 if( vseg_envs == NULL ) 126 354 { … … 130 358 131 359 vmm->envs_vpn_base = base; 132 133 // create GPT (empty)134 error = hal_gpt_create( &vmm->gpt );135 136 if( error )137 {138 printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );139 return -1;140 }141 142 // initialize GPT lock143 remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );144 145 // update process VMM with kernel vsegs as required by the hardware architecture146 error = hal_vmm_kernel_update( process );147 148 if( error )149 {150 printk("\n[ERROR] in %s : cannot update GPT for kernel vsegs\n", __FUNCTION__ );151 return -1;152 }153 360 154 361 // initialize STACK allocator … … 162 369 vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; 163 370 busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP ); 164 for( i = 0 ; i < 32 ; i++ ) list_root_init( &vmm->mmap_mgr.zombi_list[i] ); 371 for( i = 0 ; i < 32 ; i++ ) 372 { 373 xlist_root_init( XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] ) ); 374 } 165 375 166 376 // initialize instrumentation counters … … 169 379 hal_fence(); 170 380 171 #if DEBUG_VMM_ INIT381 #if DEBUG_VMM_USER_INIT 172 382 cycle = (uint32_t)hal_get_cycles(); 173 if( DEBUG_VMM_ INIT )383 if( DEBUG_VMM_USER_INIT ) 174 384 printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", 175 385 __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle ); … … 178 388 return 0; 179 389 180 } // end vmm_init() 181 390 } // end vmm_user_init() 182 391 183 392 ////////////////////////////////////////// 184 void vmm_attach_vseg_to_vsl( vmm_t * vmm, 185 vseg_t * vseg ) 393 void vmm_user_reset( process_t * process ) 186 394 { 187 // build extended pointer on rwlock protecting VSL 188 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); 189 190 // get rwlock in write mode 191 remote_rwlock_wr_acquire( lock_xp ); 192 193 // update vseg descriptor 194 vseg->vmm = vmm; 195 196 // increment vsegs number 197 vmm->vsegs_nr++; 198 199 // add vseg in vmm list 200 xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ), 201 XPTR( local_cxy , &vseg->xlist ) ); 202 203 // release rwlock in write mode 204 remote_rwlock_wr_release( lock_xp ); 205 } 206 207 //////////////////////////////////////////// 208 void vmm_detach_vseg_from_vsl( vmm_t * vmm, 209 vseg_t * vseg ) 210 { 211 // get vseg type 212 uint32_t type = vseg->type; 213 214 // build extended pointer on rwlock protecting VSL 215 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); 216 217 // get rwlock in write mode 218 remote_rwlock_wr_acquire( lock_xp ); 219 220 // update vseg descriptor 221 vseg->vmm = NULL; 222 223 // remove vseg from VSL 224 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 225 226 // release rwlock in write mode 227 remote_rwlock_wr_release( lock_xp ); 228 229 // release the stack slot to VMM stack allocator if STACK type 230 if( type == VSEG_TYPE_STACK ) 231 { 232 // get pointer on stack allocator 233 stack_mgr_t * mgr = &vmm->stack_mgr; 234 235 // compute slot index 236 uint32_t index = ((vseg->vpn_base - mgr->vpn_base - 1) / CONFIG_VMM_STACK_SIZE); 237 238 // update stacks_bitmap 239 busylock_acquire( &mgr->lock ); 240 bitmap_clear( &mgr->bitmap , index ); 241 busylock_release( &mgr->lock ); 242 } 243 244 // release the vseg to VMM mmap allocator if MMAP type 245 if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_FILE) || (type == VSEG_TYPE_REMOTE) ) 246 { 247 // get pointer on mmap allocator 248 mmap_mgr_t * mgr = &vmm->mmap_mgr; 249 250 // compute zombi_list index 251 uint32_t index = bits_log2( vseg->vpn_size ); 252 253 // update zombi_list 254 busylock_acquire( &mgr->lock ); 255 list_add_first( &mgr->zombi_list[index] , &vseg->zlist ); 256 busylock_release( &mgr->lock ); 257 } 258 259 // release physical memory allocated for vseg if no MMAP and no kernel type 260 if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) && 261 (type != VSEG_TYPE_KCODE) && (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) ) 262 { 263 vseg_free( vseg ); 264 } 265 266 } // end vmm_remove_vseg_from_vsl() 395 xptr_t vseg_xp; 396 vseg_t * vseg; 397 vseg_type_t vseg_type; 398 399 #if DEBUG_VMM_USER_RESET 400 uint32_t cycle = (uint32_t)hal_get_cycles(); 401 thread_t * this = CURRENT_THREAD; 402 if( DEBUG_VMM_USER_RESET < cycle ) 403 printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", 404 __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle ); 405 #endif 406 407 #if (DEBUG_VMM_USER_RESET & 1 ) 408 if( DEBUG_VMM_USER_RESET < cycle ) 409 hal_vmm_display( process , true ); 410 #endif 411 412 // get pointer on local VMM 413 vmm_t * vmm = &process->vmm; 414 415 // build extended pointer on VSL root and VSL lock 416 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 417 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 418 419 // take the VSL lock 420 remote_rwlock_wr_acquire( lock_xp ); 421 422 // scan the VSL to delete all non kernel vsegs 423 // (we don't use a FOREACH in case of item deletion) 424 xptr_t iter_xp; 425 xptr_t next_xp; 426 for( iter_xp = hal_remote_l64( root_xp ) ; 427 iter_xp != root_xp ; 428 iter_xp = next_xp ) 429 { 430 // save extended pointer on next item in xlist 431 next_xp = hal_remote_l64( iter_xp ); 432 433 // get pointers on current vseg in VSL 434 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 435 vseg = GET_PTR( vseg_xp ); 436 vseg_type = vseg->type; 437 438 #if( DEBUG_VMM_USER_RESET & 1 ) 439 if( DEBUG_VMM_USER_RESET < cycle ) 440 printk("\n[%s] found %s vseg / vpn_base %x / vpn_size %d\n", 441 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); 442 #endif 443 // delete non kernel vseg 444 if( (vseg_type != VSEG_TYPE_KCODE) && 445 (vseg_type != VSEG_TYPE_KDATA) && 446 (vseg_type != VSEG_TYPE_KDEV ) ) 447 { 448 // remove vseg from VSL 449 vmm_remove_vseg( process , vseg ); 450 451 #if( DEBUG_VMM_USER_RESET & 1 ) 452 if( DEBUG_VMM_USER_RESET < cycle ) 453 printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n", 454 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); 455 #endif 456 } 457 else 458 { 459 460 #if( DEBUG_VMM_USER_RESET & 1 ) 461 if( DEBUG_VMM_USER_RESET < cycle ) 462 printk("\n[%s] keep %s vseg / vpn_base %x / vpn_size %d\n", 463 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); 464 #endif 465 } 466 } // end loop on vsegs in VSL 467 468 // release the VSL lock 469 remote_rwlock_wr_release( lock_xp ); 470 471 // FIXME il faut gérer les process copies... 472 473 #if DEBUG_VMM_USER_RESET 474 cycle = (uint32_t)hal_get_cycles(); 475 if( DEBUG_VMM_USER_RESET < cycle ) 476 printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", 477 __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle ); 478 #endif 479 480 } // end vmm_user_reset() 267 481 268 482 //////////////////////////////////////////////// … … 507 721 cxy_t page_cxy; 508 722 xptr_t forks_xp; // extended pointer on forks counter in page descriptor 509 xptr_t lock_xp; // extended pointer on lock protecting the forks counter510 723 xptr_t parent_root_xp; 511 724 bool_t mapped; … … 528 741 child_vmm = &child_process->vmm; 529 742 530 // get extended pointer on lock protecting the parent VSL 531 parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsegs_lock ); 532 533 // initialize the lock protecting the child VSL 534 remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsegs_lock ), LOCK_VMM_STACK ); 743 // initialize the locks protecting the child VSL and GPT 744 remote_rwlock_init( XPTR( local_cxy , &child_vmm->gpt_lock ) , LOCK_VMM_GPT ); 745 remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsl_lock ) , LOCK_VMM_VSL ); 535 746 536 747 // initialize the child VSL as empty … … 538 749 child_vmm->vsegs_nr = 0; 539 750 540 // create thechild GPT751 // create an empty child GPT 541 752 error = hal_gpt_create( &child_vmm->gpt ); 542 543 753 if( error ) 544 754 { … … 547 757 } 548 758 549 // build extended pointer on parent VSL 759 // build extended pointer on parent VSL root and lock 550 760 parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root ); 761 parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsl_lock ); 551 762 552 763 // take the lock protecting the parent VSL in read mode … … 556 767 XLIST_FOREACH( parent_root_xp , iter_xp ) 557 768 { 558 // get local and extendedpointers on current parent vseg769 // get pointers on current parent vseg 559 770 parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 560 771 parent_vseg = GET_PTR( parent_vseg_xp ); … … 587 798 vseg_init_from_ref( child_vseg , parent_vseg_xp ); 588 799 800 // build extended pointer on VSL lock 801 xptr_t lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock ); 802 803 // take the VSL lock in write mode 804 remote_rwlock_wr_acquire( lock_xp ); 805 589 806 // register child vseg in child VSL 590 807 vmm_attach_vseg_to_vsl( child_vmm , child_vseg ); 808 809 // release the VSL lock 810 remote_rwlock_wr_release( lock_xp ); 591 811 592 812 #if DEBUG_VMM_FORK_COPY … … 597 817 hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); 598 818 #endif 599 600 // copy DATA, MMAP, REMOTE, FILE parent GPT entries to child GPT 819 // copy DATA, ANON, REMOTE, FILE parent GPT entries to child GPT 601 820 if( type != VSEG_TYPE_CODE ) 602 821 { 603 // activate the COW for DATA, MMAP, REMOTE vsegs only822 // activate the COW for DATA, ANON, REMOTE vsegs only 604 823 cow = ( type != VSEG_TYPE_FILE ); 605 824 … … 611 830 { 612 831 error = hal_gpt_pte_copy( &child_vmm->gpt, 832 vpn, 613 833 XPTR( parent_cxy , &parent_vmm->gpt ), 614 834 vpn, … … 677 897 child_vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; 678 898 child_vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; 679 for( i = 0 ; i < 32 ; i++ ) list_root_init( &child_vmm->mmap_mgr.zombi_list[i] ); 899 for( i = 0 ; i < 32 ; i++ ) 900 { 901 xlist_root_init( XPTR( local_cxy , &child_vmm->mmap_mgr.zombi_list[i] ) ); 902 } 680 903 681 904 // initialize instrumentation counters … … 726 949 vmm_t * vmm = &process->vmm; 727 950 728 // get extended pointer on VSL root and VSL lock 729 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 951 // build extended pointer on VSL root, VSL lock and GPT lock 952 xptr_t vsl_root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 953 xptr_t vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 954 xptr_t gpt_lock_xp = XPTR( local_cxy , &vmm->gpt_lock ); 955 956 // take the VSL lock 957 remote_rwlock_wr_acquire( vsl_lock_xp ); 730 958 731 959 // scan the VSL to delete all registered vsegs 732 // (don't use a FOREACH for item deletion in xlist) 733 734 while( !xlist_is_empty( root_xp ) ) 960 // (we don't use a FOREACH in case of item deletion) 961 xptr_t iter_xp; 962 xptr_t next_xp; 963 for( iter_xp = hal_remote_l64( vsl_root_xp ) ; 964 iter_xp != vsl_root_xp ; 965 iter_xp = next_xp ) 735 966 { 736 // get pointer on first vseg in VSL 737 vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); 738 vseg = GET_PTR( vseg_xp ); 967 // save extended pointer on next item in xlist 968 next_xp = hal_remote_l64( iter_xp ); 969 970 // get pointers on current vseg in VSL 971 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 972 vseg = GET_PTR( vseg_xp ); 739 973 740 974 // delete vseg and release physical pages 741 vmm_ delete_vseg( process->pid , vseg->min);975 vmm_remove_vseg( process , vseg ); 742 976 743 977 #if( DEBUG_VMM_DESTROY & 1 ) … … 749 983 } 750 984 751 // remove all vsegs from zombi_lists in MMAP allocator 985 // release the VSL lock 986 remote_rwlock_wr_release( vsl_lock_xp ); 987 988 // remove all registered MMAP vsegs 989 // from zombi_lists in MMAP allocator 752 990 uint32_t i; 753 991 for( i = 0 ; i<32 ; i++ ) 754 992 { 755 while( !list_is_empty( &vmm->mmap_mgr.zombi_list[i] ) ) 993 // build extended pointer on zombi_list[i] 994 xptr_t root_xp = XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] ); 995 996 // scan zombi_list[i] 997 while( !xlist_is_empty( root_xp ) ) 756 998 { 757 vseg = LIST_FIRST( &vmm->mmap_mgr.zombi_list[i] , vseg_t , zlist ); 999 vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); 1000 vseg = GET_PTR( vseg_xp ); 758 1001 759 1002 #if( DEBUG_VMM_DESTROY & 1 ) … … 765 1008 vseg->vmm = NULL; 766 1009 767 // remove vseg from xlist1010 // remove vseg from zombi_list 768 1011 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 769 1012 … … 779 1022 } 780 1023 1024 // take the GPT lock 1025 remote_rwlock_wr_acquire( gpt_lock_xp ); 1026 781 1027 // release memory allocated to the GPT itself 782 1028 hal_gpt_destroy( &vmm->gpt ); 1029 1030 // release the GPT lock 1031 remote_rwlock_wr_release( gpt_lock_xp ); 783 1032 784 1033 #if DEBUG_VMM_DESTROY … … 816 1065 } // end vmm_check_conflict() 817 1066 818 //////////////////////////////////////////////////////////////////////////////////////////// 819 // This static function is called by the vmm_create_vseg() function, and implements 820 // the VMM stack_vseg specific allocator. 821 //////////////////////////////////////////////////////////////////////////////////////////// 822 // @ vmm : pointer on VMM. 823 // @ vpn_base : (return value) first allocated page 824 // @ vpn_size : (return value) number of allocated pages 825 //////////////////////////////////////////////////////////////////////////////////////////// 826 static error_t vmm_stack_alloc( vmm_t * vmm, 827 vpn_t * vpn_base, 828 vpn_t * vpn_size ) 829 { 830 // get stack allocator pointer 831 stack_mgr_t * mgr = &vmm->stack_mgr; 832 833 // get lock on stack allocator 834 busylock_acquire( &mgr->lock ); 835 836 // get first free slot index in bitmap 837 int32_t index = bitmap_ffc( &mgr->bitmap , 4 ); 838 if( (index < 0) || (index > 31) ) 839 { 840 busylock_release( &mgr->lock ); 841 return 0xFFFFFFFF; 842 } 843 844 // update bitmap 845 bitmap_set( &mgr->bitmap , index ); 846 847 // release lock on stack allocator 848 busylock_release( &mgr->lock ); 849 850 // returns vpn_base, vpn_size (one page non allocated) 851 *vpn_base = mgr->vpn_base + index * CONFIG_VMM_STACK_SIZE + 1; 852 *vpn_size = CONFIG_VMM_STACK_SIZE - 1; 853 return 0; 854 855 } // end vmm_stack_alloc() 856 857 //////////////////////////////////////////////////////////////////////////////////////////// 858 // This static function is called by the vmm_create_vseg() function, and implements 859 // the VMM MMAP specific allocator. 860 //////////////////////////////////////////////////////////////////////////////////////////// 861 // @ vmm : [in] pointer on VMM. 862 // @ npages : [in] requested number of pages. 863 // @ vpn_base : [out] first allocated page. 864 // @ vpn_size : [out] actual number of allocated pages. 865 //////////////////////////////////////////////////////////////////////////////////////////// 866 static error_t vmm_mmap_alloc( vmm_t * vmm, 867 vpn_t npages, 868 vpn_t * vpn_base, 869 vpn_t * vpn_size ) 870 { 871 uint32_t index; 872 vseg_t * vseg; 873 vpn_t base; 874 vpn_t size; 875 vpn_t free; 876 877 #if DEBUG_VMM_MMAP_ALLOC 878 thread_t * this = CURRENT_THREAD; 879 uint32_t cycle = (uint32_t)hal_get_cycles(); 880 if( DEBUG_VMM_MMAP_ALLOC < cycle ) 881 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 882 __FUNCTION__, this->process->pid, this->trdid, cycle ); 883 #endif 884 885 // vseg size must be power of 2 886 // compute actual size and index in zombi_list array 887 size = POW2_ROUNDUP( npages ); 888 index = bits_log2( size ); 889 890 // get mmap allocator pointer 891 mmap_mgr_t * mgr = &vmm->mmap_mgr; 892 893 // get lock on mmap allocator 894 busylock_acquire( &mgr->lock ); 895 896 // get vseg from zombi_list or from mmap zone 897 if( list_is_empty( &mgr->zombi_list[index] ) ) // from mmap zone 898 { 899 // check overflow 900 free = mgr->first_free_vpn; 901 if( (free + size) > mgr->vpn_size ) return -1; 902 903 // update MMAP allocator 904 mgr->first_free_vpn += size; 905 906 // compute base 907 base = free; 908 } 909 else // from zombi_list 910 { 911 // get pointer on zombi vseg from zombi_list 912 vseg = LIST_FIRST( &mgr->zombi_list[index] , vseg_t , zlist ); 913 914 // remove vseg from free-list 915 list_unlink( &vseg->zlist ); 916 917 // compute base 918 base = vseg->vpn_base; 919 } 920 921 // release lock on mmap allocator 922 busylock_release( &mgr->lock ); 923 924 #if DEBUG_VMM_MMAP_ALLOC 925 cycle = (uint32_t)hal_get_cycles(); 926 if( DEBUG_VMM_DESTROY < cycle ) 927 printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n", 928 __FUNCTION__, this->process->pid, this->trdid, base, size, cycle ); 929 #endif 930 931 // returns vpn_base, vpn_size 932 *vpn_base = base; 933 *vpn_size = size; 934 return 0; 935 936 } // end vmm_mmap_alloc() 1067 937 1068 938 1069 //////////////////////////////////////////////// … … 968 1099 { 969 1100 // get vpn_base and vpn_size from STACK allocator 970 error = vmm_stack_alloc( vmm , &vpn_base , &vpn_size ); 971 if( error ) 972 { 973 printk("\n[ERROR] in %s : no space for stack vseg / process %x in cluster %x\n", 974 __FUNCTION__ , process->pid , local_cxy ); 975 return NULL; 976 } 1101 vmm_stack_alloc( vmm , base , &vpn_base , &vpn_size ); 977 1102 978 1103 // compute vseg base and size from vpn_base and vpn_size … … 1072 1197 cxy ); 1073 1198 1199 // build extended pointer on VSL lock 1200 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 1201 1202 // take the VSL lock in write mode 1203 remote_rwlock_wr_acquire( lock_xp ); 1204 1074 1205 // attach vseg to VSL 1075 1206 vmm_attach_vseg_to_vsl( vmm , vseg ); 1207 1208 // release the VSL lock 1209 remote_rwlock_wr_release( lock_xp ); 1076 1210 1077 1211 #if DEBUG_VMM_CREATE_VSEG … … 1086 1220 } // vmm_create_vseg() 1087 1221 1088 /////////////////////////////////// 1089 void vmm_delete_vseg( pid_t pid, 1090 intptr_t vaddr ) 1222 1223 ////////////////////////////////////////// 1224 void vmm_remove_vseg( process_t * process, 1225 vseg_t * vseg ) 1091 1226 { 1092 process_t * process; // local pointer on local process 1093 vmm_t * vmm; // local pointer on local process VMM 1094 vseg_t * vseg; // local pointer on local vseg containing vaddr 1095 gpt_t * gpt; // local pointer on local process GPT 1227 vmm_t * vmm; // local pointer on process VMM 1228 bool_t is_ref; // local process is reference process 1229 uint32_t vseg_type; // vseg type 1096 1230 vpn_t vpn; // VPN of current PTE 1097 1231 vpn_t vpn_min; // VPN of first PTE … … 1103 1237 cxy_t page_cxy; // page descriptor cluster 1104 1238 page_t * page_ptr; // page descriptor pointer 1105 xptr_t forks_xp; // extended pointer on pending forks counter 1106 xptr_t lock_xp; // extended pointer on lock protecting forks counter 1107 uint32_t forks; // actual number of pendinf forks 1108 uint32_t vseg_type; // vseg type 1109 1110 #if DEBUG_VMM_DELETE_VSEG 1111 uint32_t cycle = (uint32_t)hal_get_cycles(); 1112 thread_t * this = CURRENT_THREAD; 1113 if( DEBUG_VMM_DELETE_VSEG < cycle ) 1114 printk("\n[%s] thread[%x,%x] enter / process %x / vaddr %x / cycle %d\n", 1115 __FUNCTION__, this->process->pid, this->trdid, pid, vaddr, cycle ); 1116 #endif 1117 1118 // get local pointer on local process descriptor 1119 process = cluster_get_local_process_from_pid( pid ); 1120 1121 if( process == NULL ) 1122 { 1123 printk("\n[ERRORR] in %s : cannot get local process descriptor\n", 1124 __FUNCTION__ ); 1125 return; 1126 } 1127 1128 // get pointers on local process VMM an GPT 1239 xptr_t count_xp; // extended pointer on page refcount 1240 uint32_t count; // current value of page refcount 1241 1242 // check arguments 1243 assert( (process != NULL), "process argument is NULL" ); 1244 assert( (vseg != NULL), "vseg argument is NULL" ); 1245 1246 // compute is_ref 1247 is_ref = (GET_CXY( process->ref_xp ) == local_cxy); 1248 1249 // get pointers on local process VMM 1129 1250 vmm = &process->vmm; 1130 gpt = &process->vmm.gpt;1131 1132 // get local pointer on vseg containing vaddr1133 vseg = vmm_vseg_from_vaddr( vmm , vaddr );1134 1135 if( vseg == NULL )1136 {1137 printk("\n[ERRORR] in %s : cannot get vseg descriptor\n",1138 __FUNCTION__ );1139 return;1140 }1141 1251 1142 1252 // get relevant vseg infos … … 1145 1255 vpn_max = vpn_min + vseg->vpn_size; 1146 1256 1147 // loop to invalidate all vseg PTEs in GPT 1257 #if DEBUG_VMM_REMOVE_VSEG 1258 uint32_t cycle = (uint32_t)hal_get_cycles(); 1259 thread_t * this = CURRENT_THREAD; 1260 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1261 printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n", 1262 __FUNCTION__, this->process->pid, this->trdid, 1263 process->pid, vseg_type_str(vseg->type), vseg->min, cycle ); 1264 #endif 1265 1266 // loop on PTEs in GPT 1148 1267 for( vpn = vpn_min ; vpn < vpn_max ; vpn++ ) 1149 1268 { 1150 // get ppn and attr from GPT entry1151 hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn );1152 1153 if( attr & GPT_MAPPED ) // entryis mapped1269 // get ppn and attr 1270 hal_gpt_get_pte( XPTR( local_cxy , &vmm->gpt ) , vpn , &attr , &ppn ); 1271 1272 if( attr & GPT_MAPPED ) // PTE is mapped 1154 1273 { 1155 1274 1156 #if( DEBUG_VMM_ DELETE_VSEG & 1 )1157 if( DEBUG_VMM_ DELETE_VSEG < cycle )1158 printk("- unmap vpn %x / ppn %x / vseg %s \n" , vpn , ppn, vseg_type_str(vseg->type) );1275 #if( DEBUG_VMM_REMOVE_VSEG & 1 ) 1276 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1277 printk("- unmap vpn %x / ppn %x / %s" , vpn , ppn, vseg_type_str(vseg_type) ); 1159 1278 #endif 1160 1279 // unmap GPT entry in local GPT 1161 hal_gpt_reset_pte( gpt , vpn ); 1162 1163 // the allocated page is not released to for kernel vseg 1164 if( (vseg_type != VSEG_TYPE_KCODE) && 1165 (vseg_type != VSEG_TYPE_KDATA) && 1166 (vseg_type != VSEG_TYPE_KDEV ) ) 1280 hal_gpt_reset_pte( &vmm->gpt , vpn ); 1281 1282 // get pointers on physical page descriptor 1283 page_xp = ppm_ppn2page( ppn ); 1284 page_cxy = GET_CXY( page_xp ); 1285 page_ptr = GET_PTR( page_xp ); 1286 1287 // decrement page refcount 1288 count_xp = XPTR( page_cxy , &page_ptr->refcount ); 1289 count = hal_remote_atomic_add( count_xp , -1 ); 1290 1291 // compute the ppn_release condition depending on vseg type 1292 bool_t ppn_release; 1293 if( (vseg_type == VSEG_TYPE_FILE) || 1294 (vseg_type == VSEG_TYPE_KCODE) || 1295 (vseg_type == VSEG_TYPE_KDATA) || 1296 (vseg_type == VSEG_TYPE_KDEV) ) 1167 1297 { 1168 // get extended pointer on physical page descriptor 1169 page_xp = ppm_ppn2page( ppn ); 1170 page_cxy = GET_CXY( page_xp ); 1171 page_ptr = GET_PTR( page_xp ); 1172 1173 // FIXME This code must be re-written, as the actual release depends on vseg type, 1174 // the reference cluster, the page refcount and/or the forks counter... 1175 1176 // get extended pointers on forks and lock fields 1177 forks_xp = XPTR( page_cxy , &page_ptr->forks ); 1178 lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1179 1180 // get the lock protecting the page 1298 // no physical page release for FILE and KERNEL 1299 ppn_release = false; 1300 } 1301 else if( (vseg_type == VSEG_TYPE_CODE) || 1302 (vseg_type == VSEG_TYPE_STACK) ) 1303 { 1304 // always release physical page for private vsegs 1305 ppn_release = true; 1306 } 1307 else if( (vseg_type == VSEG_TYPE_ANON) || 1308 (vseg_type == VSEG_TYPE_REMOTE) ) 1309 { 1310 // release physical page if reference cluster 1311 ppn_release = is_ref; 1312 } 1313 else if( is_ref ) // vseg_type == DATA in reference cluster 1314 { 1315 // get extended pointers on forks and lock field in page descriptor 1316 xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); 1317 xptr_t lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1318 1319 // take lock protecting "forks" counter 1181 1320 remote_busylock_acquire( lock_xp ); 1182 1321 1183 // get pending forks counter 1184 forks = hal_remote_l32( forks_xp ); 1185 1186 if( forks ) // decrement pending forks counter 1322 // get number of pending forks from page descriptor 1323 uint32_t forks = hal_remote_l32( forks_xp ); 1324 1325 // decrement pending forks counter if required 1326 if( forks ) hal_remote_atomic_add( forks_xp , -1 ); 1327 1328 // release lock protecting "forks" counter 1329 remote_busylock_release( lock_xp ); 1330 1331 // release physical page if forks == 0 1332 ppn_release = (forks == 0); 1333 } 1334 else // vseg_type == DATA not in reference cluster 1335 { 1336 // no physical page release if not in reference cluster 1337 ppn_release = false; 1338 } 1339 1340 // release physical page to relevant kmem when required 1341 if( ppn_release ) 1342 { 1343 if( page_cxy == local_cxy ) 1187 1344 { 1188 // update forks counter 1189 hal_remote_atomic_add( forks_xp , -1 ); 1190 1191 // release the lock protecting the page 1192 remote_busylock_release( lock_xp ); 1193 } 1194 else // release physical page to relevant cluster 1345 req.type = KMEM_PAGE; 1346 req.ptr = page_ptr; 1347 kmem_free( &req ); 1348 } 1349 else 1195 1350 { 1196 // release the lock protecting the page 1197 remote_busylock_release( lock_xp ); 1198 1199 // release the page to kmem 1200 if( page_cxy == local_cxy ) // local cluster 1201 { 1202 req.type = KMEM_PAGE; 1203 req.ptr = page_ptr; 1204 kmem_free( &req ); 1205 } 1206 else // remote cluster 1207 { 1208 rpc_pmem_release_pages_client( page_cxy , page_ptr ); 1209 } 1210 1211 #if( DEBUG_VMM_DELETE_VSEG & 1 ) 1212 if( DEBUG_VMM_DELETE_VSEG < cycle ) 1213 printk("- release ppn %x\n", ppn ); 1214 #endif 1351 rpc_pmem_release_pages_client( page_cxy , page_ptr ); 1215 1352 } 1216 1217 1353 } 1354 1355 #if( DEBUG_VMM_REMOVE_VSEG & 1 ) 1356 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1357 { 1358 if( ppn_release ) printk(" / released to kmem\n" ); 1359 else printk("\n"); 1360 } 1361 #endif 1218 1362 } 1219 1363 } 1220 1364 1221 // remove vseg from VSL and release vseg descriptor (if not MMAP)1365 // remove vseg from VSL 1222 1366 vmm_detach_vseg_from_vsl( vmm , vseg ); 1223 1367 1224 #if DEBUG_VMM_DELETE_VSEG 1368 // release vseg descriptor depending on vseg type 1369 if( vseg_type == VSEG_TYPE_STACK ) 1370 { 1371 // release slot to local stack allocator 1372 vmm_stack_free( vmm , vseg ); 1373 1374 // release vseg descriptor to local kmem 1375 vseg_free( vseg ); 1376 } 1377 else if( (vseg_type == VSEG_TYPE_ANON) || 1378 (vseg_type == VSEG_TYPE_FILE) || 1379 (vseg_type == VSEG_TYPE_REMOTE) ) 1380 { 1381 // release vseg to local mmap allocator 1382 vmm_mmap_free( vmm , vseg ); 1383 } 1384 else 1385 { 1386 // release vseg descriptor to local kmem 1387 vseg_free( vseg ); 1388 } 1389 1390 #if DEBUG_VMM_REMOVE_VSEG 1225 1391 cycle = (uint32_t)hal_get_cycles(); 1226 if( DEBUG_VMM_DELETE_VSEG < cycle ) 1227 printk("\n[%s] thread[%x,%x] exit / process %x / vseg %s / base %x / cycle %d\n", 1228 __FUNCTION__, this->process->pid, this->trdid, pid, vseg_type_str(vseg->type), vaddr, cycle ); 1229 #endif 1230 1231 } // end vmm_delete_vseg() 1392 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1393 printk("[%s] thread[%x,%x] exit / process %x / %s / base %x / cycle %d\n", 1394 __FUNCTION__, this->process->pid, this->trdid, 1395 process->pid, vseg_type_str(vseg->type), vseg->min, cycle ); 1396 #endif 1397 1398 } // end vmm_remove_vseg() 1399 1400 1401 /////////////////////////////////// 1402 void vmm_delete_vseg( pid_t pid, 1403 intptr_t vaddr ) 1404 { 1405 process_t * process; // local pointer on local process 1406 vseg_t * vseg; // local pointer on local vseg containing vaddr 1407 1408 // get local pointer on local process descriptor 1409 process = cluster_get_local_process_from_pid( pid ); 1410 1411 if( process == NULL ) 1412 { 1413 printk("\n[WARNING] in %s : cannot get local process descriptor\n", 1414 __FUNCTION__ ); 1415 return; 1416 } 1417 1418 // get local pointer on local vseg containing vaddr 1419 vseg = vmm_vseg_from_vaddr( &process->vmm , vaddr ); 1420 1421 if( vseg == NULL ) 1422 { 1423 printk("\n[WARNING] in %s : cannot get vseg descriptor\n", 1424 __FUNCTION__ ); 1425 return; 1426 } 1427 1428 // call relevant function 1429 vmm_remove_vseg( process , vseg ); 1430 1431 } // end vmm_delete_vseg 1432 1232 1433 1233 1434 ///////////////////////////////////////////// … … 1235 1436 intptr_t vaddr ) 1236 1437 { 1237 xptr_t iter_xp;1238 1438 xptr_t vseg_xp; 1239 1439 vseg_t * vseg; 1440 xptr_t iter_xp; 1240 1441 1241 1442 // get extended pointers on VSL lock and root 1242 xptr_t lock_xp = XPTR( local_cxy , &vmm->vs egs_lock );1443 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 1243 1444 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 1244 1445 … … 1249 1450 XLIST_FOREACH( root_xp , iter_xp ) 1250 1451 { 1452 // get pointers on vseg 1251 1453 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 1252 1454 vseg = GET_PTR( vseg_xp ); 1253 1455 1456 // return success when match 1254 1457 if( (vaddr >= vseg->min) && (vaddr < vseg->max) ) 1255 1458 { … … 1262 1465 // return failure 1263 1466 remote_rwlock_rd_release( lock_xp ); 1264 1265 1467 return NULL; 1266 1468 … … 1462 1664 vseg_init_from_ref( vseg , vseg_xp ); 1463 1665 1666 // build extended pointer on VSL lock 1667 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 1668 1669 // take the VSL lock in write mode 1670 remote_rwlock_wr_acquire( lock_xp ); 1671 1464 1672 // register local vseg in local VSL 1465 1673 vmm_attach_vseg_to_vsl( vmm , vseg ); 1674 1675 // release the VSL lock 1676 remote_rwlock_wr_release( lock_xp ); 1466 1677 } 1467 1678 … … 1486 1697 uint32_t cycle = (uint32_t)hal_get_cycles(); 1487 1698 thread_t * this = CURRENT_THREAD; 1488 xptr_t this_xp = XPTR( local_cxy , this );1489 1699 if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) 1490 1700 printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", … … 1717 1927 error_t error; // value returned by called functions 1718 1928 1929 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1930 uint32_t cycle = (uint32_t)hal_get_cycles(); 1931 thread_t * this = CURRENT_THREAD; 1932 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1933 printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", 1934 __FUNCTION__, this->process->pid, this->trdid, vpn, cycle ); 1935 hal_vmm_display( process , true ); 1936 #endif 1937 1719 1938 // get local vseg (access to reference VSL can be required) 1720 1939 error = vmm_get_vseg( process, … … 1723 1942 if( error ) 1724 1943 { 1725 printk("\n[ERROR] in %s : vpn %x in process %x not in a registered vseg\n",1726 __FUNCTION__ , vpn , process->pid );1944 printk("\n[ERROR] in %s : vpn %x in process %x not in registered vseg / cycle %d\n", 1945 __FUNCTION__ , vpn , process->pid, (uint32_t)hal_get_cycles() ); 1727 1946 1728 1947 return EXCP_USER_ERROR; 1729 1948 } 1730 1949 1731 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1732 uint32_t cycle = (uint32_t)hal_get_cycles(); 1733 thread_t * this = CURRENT_THREAD; 1950 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1951 cycle = (uint32_t)hal_get_cycles(); 1734 1952 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1735 printk("\n[%s] threadr[%x,%x] enter for vpn %x /%s / cycle %d\n",1736 __FUNCTION__, this->process->pid, this->trdid, v pn, vseg_type_str(vseg->type), cycle );1953 printk("\n[%s] threadr[%x,%x] found vseg %s / cycle %d\n", 1954 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type), cycle ); 1737 1955 #endif 1738 1956 … … 1971 2189 error_t error; 1972 2190 2191 thread_t * this = CURRENT_THREAD; 2192 1973 2193 #if DEBUG_VMM_HANDLE_COW 1974 2194 uint32_t cycle = (uint32_t)hal_get_cycles(); 1975 thread_t * this = CURRENT_THREAD;1976 xptr_t this_xp = XPTR( local_cxy , this );1977 2195 if( DEBUG_VMM_HANDLE_COW < cycle ) 1978 2196 printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n", 1979 2197 __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); 2198 hal_vmm_display( process , true ); 1980 2199 #endif 1981 2200 … … 1991 2210 if( error ) 1992 2211 { 1993 printk("\n[PANIC] in %s : vpn %x in process %xnot in a registered vseg\n",1994 __FUNCTION__, vpn, process->pid );2212 printk("\n[PANIC] in %s vpn %x in thread[%x,%x] not in a registered vseg\n", 2213 __FUNCTION__, vpn, process->pid, this->trdid ); 1995 2214 1996 2215 return EXCP_KERNEL_PANIC;
Note: See TracChangeset
for help on using the changeset viewer.