Changeset 651 for trunk/kernel/mm
- Timestamp:
- Nov 14, 2019, 11:50:09 AM (5 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/mapper.c
r637 r651 442 442 if ( page_xp == XPTR_NULL ) return -1; 443 443 444 // compute extended pointer in kernel mapper444 // compute extended pointer on kernel mapper 445 445 xptr_t map_xp = ppm_page2base( page_xp ) + page_offset; 446 446 … … 448 448 if( DEBUG_MAPPER_MOVE_USER < cycle ) 449 449 printk("\n[%s] thread[%x,%x] : get buffer(%x,%x) in mapper\n", 450 __FUNCTION__, this->process->pid, this->trdid, map_cxy, map_ptr);450 __FUNCTION__, this->process->pid, this->trdid, GET_CXY(map_xp), GET_PTR(map_xp) ); 451 451 #endif 452 452 // compute pointer in user buffer -
trunk/kernel/mm/ppm.c
r637 r651 296 296 current_size >>= 1; 297 297 298 // update order fiel s in new free block298 // update order fields in new free block 299 299 current_block = found_block + current_size; 300 300 current_block->order = current_order; -
trunk/kernel/mm/vmm.c
r641 r651 55 55 56 56 //////////////////////////////////////////////////////////////////////////////////////////// 57 // This static function is called by the vmm_user_init() function. 58 // It initialises the free lists of vsegs used by the VMM MMAP allocator. 59 // It makes the assumption that HEAP_BASE == 1 Gbytes and HEAP_SIZE == 2 Gbytes. 60 //////////////////////////////////////////////////////////////////////////////////////////// 61 static void vmm_stack_init( vmm_t * vmm ) 62 { 63 64 // check STACK zone 65 assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <= 66 (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , "STACK zone too small\n"); 67 68 // get pointer on STACK allocator 69 stack_mgr_t * mgr = &vmm->stack_mgr; 70 71 mgr->bitmap = 0; 72 mgr->vpn_base = CONFIG_VMM_STACK_BASE; 73 busylock_init( &mgr->lock , LOCK_VMM_STACK ); 74 75 } 76 77 //////////////////////////////////////////////////////////////////////////////////////////// 57 78 // This static function is called by the vmm_create_vseg() function, and implements 58 // the VMM STACK specific allocator. 79 // the VMM STACK specific allocator. Depending on the local thread index <ltid>, 80 // it ckeks availability of the corresponding slot in the process STACKS region, 81 // allocates a vseg descriptor, and initializes the "vpn_base" and "vpn_size" fields. 59 82 //////////////////////////////////////////////////////////////////////////////////////////// 60 83 // @ vmm : [in] pointer on VMM. 61 84 // @ ltid : [in] requested slot == local user thread identifier. 62 // @ vpn_base : [out] first allocated page63 // @ vpn_size : [out] number of allocated pages64 85 //////////////////////////////////////////////////////////////////////////////////////////// 65 static void vmm_stack_alloc( vmm_t * vmm, 66 ltid_t ltid, 67 vpn_t * vpn_base, 68 vpn_t * vpn_size ) 86 static vseg_t * vmm_stack_alloc( vmm_t * vmm, 87 ltid_t ltid ) 69 88 { 70 89 … … 76 95 stack_mgr_t * mgr = &vmm->stack_mgr; 77 96 78 // get lock onstack allocator97 // get lock protecting stack allocator 79 98 busylock_acquire( &mgr->lock ); 80 99 … … 83 102 "slot index %d already allocated", ltid ); 84 103 104 // allocate a vseg descriptor 105 vseg_t * vseg = vseg_alloc(); 106 107 if( vseg == NULL ) 108 { 109 // release lock protecting free lists 110 busylock_release( &mgr->lock ); 111 112 printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n", 113 __FUNCTION__ , local_cxy ); 114 115 return NULL; 116 } 117 85 118 // update bitmap 86 119 bitmap_set( &mgr->bitmap , ltid ); … … 89 122 busylock_release( &mgr->lock ); 90 123 91 // returns vpn_base, vpn_size (first page non allocated) 92 *vpn_base = mgr->vpn_base + ltid * CONFIG_VMM_STACK_SIZE + 1; 93 *vpn_size = CONFIG_VMM_STACK_SIZE - 1; 124 // set "vpn_base" & "vpn_size" fields (first page non allocated) 125 vseg->vpn_base = mgr->vpn_base + (ltid * CONFIG_VMM_STACK_SIZE) + 1; 126 vseg->vpn_size = CONFIG_VMM_STACK_SIZE - 1; 127 128 return vseg; 94 129 95 130 } // end vmm_stack_alloc() … … 98 133 // This static function is called by the vmm_remove_vseg() function, and implements 99 134 // the VMM STACK specific desallocator. 135 // It updates the bitmap to release the corresponding slot in the process STACKS region, 136 // and releases memory allocated to vseg descriptor. 100 137 //////////////////////////////////////////////////////////////////////////////////////////// 101 138 // @ vmm : [in] pointer on VMM. … … 128 165 busylock_release( &mgr->lock ); 129 166 167 // release memory allocated to vseg descriptor 168 vseg_free( vseg ); 169 130 170 } // end vmm_stack_free() 171 172 173 174 //////////////////////////////////////////////////////////////////////////////////////////// 175 // This function display the current state of the VMM MMAP allocator of a process VMM 176 // identified by the <vmm> argument. 177 //////////////////////////////////////////////////////////////////////////////////////////// 178 void vmm_mmap_display( vmm_t * vmm ) 179 { 180 uint32_t order; 181 xptr_t root_xp; 182 xptr_t iter_xp; 183 184 // get pointer on process 185 process_t * process = (process_t *)(((char*)vmm) - OFFSETOF( process_t , vmm )); 186 187 // get process PID 188 pid_t pid = process->pid; 189 190 // get pointer on VMM MMAP allocator 191 mmap_mgr_t * mgr = &vmm->mmap_mgr; 192 193 // display header 194 printk("***** VMM MMAP allocator / process %x *****\n", pid ); 195 196 // scan the array of free lists of vsegs 197 for( order = 0 ; order <= CONFIG_VMM_HEAP_MAX_ORDER ; order++ ) 198 { 199 root_xp = XPTR( local_cxy , &mgr->free_list_root[order] ); 200 201 if( !xlist_is_empty( root_xp ) ) 202 { 203 printk(" - %d (%x pages) : ", order , 1<<order ); 204 205 XLIST_FOREACH( root_xp , iter_xp ) 206 { 207 xptr_t vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 208 vseg_t * vseg = GET_PTR( vseg_xp ); 209 210 printk("%x | ", vseg->vpn_base ); 211 } 212 213 printk("\n"); 214 } 215 } 216 } // end vmm_mmap_display() 217 218 //////////////////////////////////////////////////////////////////////////////////////////// 219 // This static function is called by the vmm_user_init() function. 220 // It initialises the free lists of vsegs used by the VMM MMAP allocator. 221 // TODO this function is only valid for 32 bits cores, and makes three assumptions: 222 // HEAP_BASE == 1 Gbytes / HEAP_SIZE == 2 Gbytes / MMAP_MAX_SIZE == 1 Gbytes 223 //////////////////////////////////////////////////////////////////////////////////////////// 224 void vmm_mmap_init( vmm_t * vmm ) 225 { 226 227 // check HEAP base and size 228 assert( (CONFIG_VMM_HEAP_BASE == 0x40000) & (CONFIG_VMM_STACK_BASE == 0xc0000), 229 "CONFIG_VMM_HEAP_BASE != 0x40000 or CONFIG_VMM_STACK_BASE != 0xc0000" ); 230 231 // check MMAP vseg max order 232 assert( (CONFIG_VMM_HEAP_MAX_ORDER == 18), "max mmap vseg size is 256K pages" ); 233 234 // get pointer on MMAP allocator 235 mmap_mgr_t * mgr = &vmm->mmap_mgr; 236 237 // initialize HEAP base and size 238 mgr->vpn_base = CONFIG_VMM_HEAP_BASE; 239 mgr->vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; 240 241 // initialize lock 242 busylock_init( &mgr->lock , LOCK_VMM_MMAP ); 243 244 // initialize free lists 245 uint32_t i; 246 for( i = 0 ; i <= CONFIG_VMM_HEAP_MAX_ORDER ; i++ ) 247 { 248 xlist_root_init( XPTR( local_cxy , &mgr->free_list_root[i] ) ); 249 } 250 251 // allocate and register first 1 Gbytes vseg 252 vseg_t * vseg0 = vseg_alloc(); 253 254 assert( (vseg0 != NULL) , "cannot allocate vseg" ); 255 256 vseg0->vpn_base = CONFIG_VMM_HEAP_BASE; 257 vseg0->vpn_size = CONFIG_VMM_HEAP_BASE; 258 259 xlist_add_first( XPTR( local_cxy , &mgr->free_list_root[CONFIG_VMM_HEAP_MAX_ORDER] ), 260 XPTR( local_cxy , &vseg0->xlist ) ); 261 262 // allocate and register second 1 Gbytes vseg 263 vseg_t * vseg1 = vseg_alloc(); 264 265 assert( (vseg1 != NULL) , "cannot allocate vseg" ); 266 267 vseg1->vpn_base = CONFIG_VMM_HEAP_BASE << 1; 268 vseg1->vpn_size = CONFIG_VMM_HEAP_BASE; 269 270 xlist_add_first( XPTR( local_cxy , &mgr->free_list_root[CONFIG_VMM_HEAP_MAX_ORDER] ), 271 XPTR( local_cxy , &vseg1->xlist ) ); 272 273 #if DEBUG_VMM_MMAP 274 thread_t * this = CURRENT_THREAD; 275 uint32_t cycle = (uint32_t)hal_get_cycles(); 276 printk("\n[%s] thread[%x,%x] / cycle %d\n", 277 __FUNCTION__, this->process->pid, this->trdid, cycle ); 278 vmm_mmap_display( vmm ); 279 #endif 280 281 } // end vmm_mmap_init() 131 282 132 283 //////////////////////////////////////////////////////////////////////////////////////////// 133 284 // This static function is called by the vmm_create_vseg() function, and implements 134 // the VMM MMAP specific allocator. 285 // the VMM MMAP specific allocator. Depending on the requested number of pages <npages>, 286 // it get a free vseg from the relevant free_list, and initializes the "vpn_base" and 287 // "vpn_size" fields. 135 288 //////////////////////////////////////////////////////////////////////////////////////////// 136 289 // @ vmm : [in] pointer on VMM. 137 290 // @ npages : [in] requested number of pages. 138 // @ vpn_base : [out] first allocated page. 139 // @ vpn_size : [out] actual number of allocated pages. 291 // @ returns local pointer on vseg if success / returns NULL if failure. 140 292 //////////////////////////////////////////////////////////////////////////////////////////// 141 static error_t vmm_mmap_alloc( vmm_t * vmm, 142 vpn_t npages, 143 vpn_t * vpn_base, 144 vpn_t * vpn_size ) 293 static vseg_t * vmm_mmap_alloc( vmm_t * vmm, 294 vpn_t npages ) 145 295 { 146 uint32_t order; 147 xptr_t vseg_xp; 148 vseg_t * vseg; 149 vpn_t base; 150 vpn_t size; 151 vpn_t free; 152 153 #if DEBUG_VMM_MMAP_ALLOC 296 297 #if DEBUG_VMM_MMAP 154 298 thread_t * this = CURRENT_THREAD; 155 299 uint32_t cycle = (uint32_t)hal_get_cycles(); 156 if( DEBUG_VMM_MMAP _ALLOC< cycle )157 printk("\n[%s] thread[%x,%x] enter/ cycle %d\n",158 __FUNCTION__, this->process->pid, this->trdid, cycle );300 if( DEBUG_VMM_MMAP < cycle ) 301 printk("\n[%s] thread[%x,%x] for %x pages / cycle %d\n", 302 __FUNCTION__, this->process->pid, this->trdid, npages, cycle ); 159 303 #endif 160 304 161 305 // number of allocated pages must be power of 2 162 306 // compute actual size and order 163 size= POW2_ROUNDUP( npages );164 order = bits_log2(size );307 vpn_t required_vpn_size = POW2_ROUNDUP( npages ); 308 uint32_t required_order = bits_log2( required_vpn_size ); 165 309 166 310 // get mmap allocator pointer 167 311 mmap_mgr_t * mgr = &vmm->mmap_mgr; 168 312 169 // build extended pointer on root of zombi_list[order] 170 xptr_t root_xp = XPTR( local_cxy , &mgr->zombi_list[order] ); 171 172 // take lock protecting zombi_lists 313 // take lock protecting free lists in MMAP allocator 173 314 busylock_acquire( &mgr->lock ); 174 315 175 // get vseg from zombi_list or from mmap zone 176 if( xlist_is_empty( root_xp ) ) // from mmap zone 177 { 178 // check overflow 179 free = mgr->first_free_vpn; 180 if( (free + size) > mgr->vpn_size ) return -1; 181 182 // update MMAP allocator 183 mgr->first_free_vpn += size; 184 185 // compute base 186 base = free; 187 } 188 else // from zombi_list 189 { 190 // get pointer on zombi vseg from zombi_list 191 vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); 192 vseg = GET_PTR( vseg_xp ); 193 194 // remove vseg from free-list 195 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 196 197 // compute base 198 base = vseg->vpn_base; 199 } 200 201 // release lock 202 busylock_release( &mgr->lock ); 203 204 #if DEBUG_VMM_MMAP_ALLOC 205 cycle = (uint32_t)hal_get_cycles(); 206 if( DEBUG_VMM_DESTROY < cycle ) 207 printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n", 208 __FUNCTION__, this->process->pid, this->trdid, base, size, cycle ); 209 #endif 210 211 // returns vpn_base, vpn_size 212 *vpn_base = base; 213 *vpn_size = size; 214 return 0; 316 // initialises the while loop variables 317 uint32_t current_order = required_order; 318 vseg_t * current_vseg = NULL; 319 320 // search a free vseg equal or larger than requested size 321 while( current_order <= CONFIG_VMM_HEAP_MAX_ORDER ) 322 { 323 // build extended pointer on free_pages_root[current_order] 324 xptr_t root_xp = XPTR( local_cxy , &mgr->free_list_root[current_order] ); 325 326 if( !xlist_is_empty( root_xp ) ) 327 { 328 // get extended pointer on first vseg in this free_list 329 xptr_t current_vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); 330 current_vseg = GET_PTR( current_vseg_xp ); 331 332 // build extended pointer on xlist field in vseg descriptor 333 xptr_t list_entry_xp = XPTR( local_cxy , ¤t_vseg->xlist ); 334 335 // remove this vseg from the free_list 336 xlist_unlink( list_entry_xp ); 337 338 break; 339 } 340 341 // increment loop index 342 current_order++; 343 344 } // end while loop 345 346 if( current_vseg == NULL ) // return failure 347 { 348 // release lock protecting free lists 349 busylock_release( &mgr->lock ); 350 351 printk("\n[ERROR] %s cannot allocate ) %d page(s) in cluster %x\n", 352 __FUNCTION__, npages , local_cxy ); 353 354 return NULL; 355 } 356 357 // split recursively the found vseg in smaller vsegs 358 // if required, and update the free-lists accordingly 359 while( current_order > required_order ) 360 { 361 // get found vseg base and size 362 vpn_t vpn_base = current_vseg->vpn_base; 363 vpn_t vpn_size = current_vseg->vpn_size; 364 365 // allocate a new vseg for the upper half of current vseg 366 vseg_t * new_vseg = vseg_alloc(); 367 368 if( new_vseg == NULL ) 369 { 370 // release lock protecting free lists 371 busylock_release( &mgr->lock ); 372 373 printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n", 374 __FUNCTION__ , local_cxy ); 375 376 return NULL; 377 } 378 379 // initialise new vseg (upper half of found vseg) 380 new_vseg->vmm = vmm; 381 new_vseg->vpn_base = vpn_base + (vpn_size >> 1); 382 new_vseg->vpn_size = vpn_size >> 1; 383 384 // insert new vseg in relevant free_list 385 xlist_add_first( XPTR( local_cxy , &mgr->free_list_root[current_order-1] ), 386 XPTR( local_cxy , &new_vseg->xlist ) ); 387 388 // update found vseg 389 current_vseg->vpn_size = vpn_size>>1; 390 391 // update order 392 current_order --; 393 } 394 395 // release lock protecting free lists 396 busylock_release( &mgr->lock ); 397 398 #if DEBUG_VMM_MMAP 399 vmm_mmap_display( vmm ); 400 #endif 401 402 return current_vseg; 215 403 216 404 } // end vmm_mmap_alloc() … … 219 407 // This static function implements the VMM MMAP specific desallocator. 220 408 // It is called by the vmm_remove_vseg() function. 409 // It releases the vseg to the relevant free_list, after trying (recursively) to 410 // merge it to the buddy vseg. 221 411 //////////////////////////////////////////////////////////////////////////////////////////// 222 412 // @ vmm : [in] pointer on VMM. … … 226 416 vseg_t * vseg ) 227 417 { 228 // get pointer on mmap allocator 418 419 #if DEBUG_VMM_MMAP 420 thread_t * this = CURRENT_THREAD; 421 uint32_t cycle = (uint32_t)hal_get_cycles(); 422 if( DEBUG_VMM_MMAP < cycle ) 423 printk("\n[%s] thread[%x,%x] for vpn_base %x / vpn_size %x / cycle %d\n", 424 __FUNCTION__, this->process->pid, this->trdid, vseg->vpn_base, vseg->vpn_size, cycle ); 425 #endif 426 427 vseg_t * buddy_vseg; 428 429 // get mmap allocator pointer 229 430 mmap_mgr_t * mgr = &vmm->mmap_mgr; 230 431 231 // compute zombi_list order 232 uint32_t order = bits_log2( vseg->vpn_size ); 233 234 // take lock protecting zombi lists 432 // take lock protecting free lists 235 433 busylock_acquire( &mgr->lock ); 236 434 237 // update relevant zombi_list 238 xlist_add_first( XPTR( local_cxy , &mgr->zombi_list[order] ), 239 XPTR( local_cxy , &vseg->xlist ) ); 435 // initialise loop variables 436 // released_vseg is the currently released vseg 437 vseg_t * released_vseg = vseg; 438 uint32_t released_order = bits_log2( vseg->vpn_size ); 439 440 // iteratively merge the released vseg to the buddy vseg 441 // release the current page and exit when buddy not found 442 while( released_order <= CONFIG_VMM_HEAP_MAX_ORDER ) 443 { 444 // compute buddy_vseg vpn_base 445 vpn_t buddy_vpn_base = released_vseg->vpn_base ^ (1 << released_order); 446 447 // build extended pointer on free_pages_root[current_order] 448 xptr_t root_xp = XPTR( local_cxy , &mgr->free_list_root[released_order] ); 449 450 // scan this free list to find the buddy vseg 451 xptr_t iter_xp; 452 buddy_vseg = NULL; 453 XLIST_FOREACH( root_xp , iter_xp ) 454 { 455 xptr_t current_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 456 vseg_t * current_vseg = GET_PTR( current_vseg_xp ); 457 458 if( current_vseg->vpn_base == buddy_vpn_base ) 459 { 460 buddy_vseg = current_vseg; 461 break; 462 } 463 } 464 465 if( buddy_vseg != NULL ) // buddy found => merge released & buddy 466 { 467 // update released vseg fields 468 released_vseg->vpn_size = buddy_vseg->vpn_size<<1; 469 if( released_vseg->vpn_base > buddy_vseg->vpn_base) 470 released_vseg->vpn_base = buddy_vseg->vpn_base; 471 472 // remove buddy vseg from free_list 473 xlist_unlink( XPTR( local_cxy , &buddy_vseg->xlist ) ); 474 475 // release memory allocated to buddy descriptor 476 vseg_free( buddy_vseg ); 477 } 478 else // buddy not found => register & exit 479 { 480 // register released vseg in free list 481 xlist_add_first( root_xp , XPTR( local_cxy , &released_vseg->xlist ) ); 482 483 // exit while loop 484 break; 485 } 486 487 // increment released_order 488 released_order++; 489 } 240 490 241 491 // release lock 242 492 busylock_release( &mgr->lock ); 243 493 244 } // end of vmm_mmap_free() 494 #if DEBUG_VMM_MMAP 495 vmm_mmap_display( vmm ); 496 #endif 497 498 } // end vmm_mmap_free() 245 499 246 500 //////////////////////////////////////////////////////////////////////////////////////////// … … 288 542 error_t vmm_user_init( process_t * process ) 289 543 { 290 uint32_t i;291 544 292 545 #if DEBUG_VMM_USER_INIT … … 306 559 "UTILS zone too small\n" ); 307 560 308 // check STACK zone 309 assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <= 310 (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , 311 "STACK zone too small\n"); 312 313 // initialize the lock protecting the VSL 561 // initialize lock protecting the VSL 314 562 remote_queuelock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL ); 315 563 564 565 // initialize STACK allocator 566 vmm_stack_init( vmm ); 567 568 // initialize MMAP allocator 569 vmm_mmap_init( vmm ); 570 571 // initialize instrumentation counters 572 vmm->false_pgfault_nr = 0; 573 vmm->local_pgfault_nr = 0; 574 vmm->global_pgfault_nr = 0; 575 vmm->false_pgfault_cost = 0; 576 vmm->local_pgfault_cost = 0; 577 vmm->global_pgfault_cost = 0; 316 578 317 579 /* … … 356 618 vmm->envs_vpn_base = base; 357 619 */ 358 // initialize STACK allocator359 vmm->stack_mgr.bitmap = 0;360 vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE;361 busylock_init( &vmm->stack_mgr.lock , LOCK_VMM_STACK );362 363 // initialize MMAP allocator364 vmm->mmap_mgr.vpn_base = CONFIG_VMM_HEAP_BASE;365 vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE;366 vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE;367 busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP );368 for( i = 0 ; i < 32 ; i++ )369 {370 xlist_root_init( XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] ) );371 }372 373 // initialize instrumentation counters374 vmm->false_pgfault_nr = 0;375 vmm->local_pgfault_nr = 0;376 vmm->global_pgfault_nr = 0;377 vmm->false_pgfault_cost = 0;378 vmm->local_pgfault_cost = 0;379 vmm->global_pgfault_cost = 0;380 381 620 hal_fence(); 382 621 … … 1158 1397 remote_queuelock_release( parent_lock_xp ); 1159 1398 1399 /* deprecated [AG] : this is already done by the vmm_user_init() funcfion 1400 1160 1401 // initialize the child VMM STACK allocator 1161 child_vmm->stack_mgr.bitmap = 0; 1162 child_vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE; 1402 vmm_stack_init( child_vmm ); 1163 1403 1164 1404 // initialize the child VMM MMAP allocator 1165 uint32_t i; 1166 child_vmm->mmap_mgr.vpn_base = CONFIG_VMM_HEAP_BASE; 1167 child_vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; 1168 child_vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; 1169 for( i = 0 ; i < 32 ; i++ ) 1170 { 1171 xlist_root_init( XPTR( local_cxy , &child_vmm->mmap_mgr.zombi_list[i] ) ); 1172 } 1405 vmm_mmap_init( child_vmm ); 1173 1406 1174 1407 // initialize instrumentation counters … … 1179 1412 child_vmm->local_pgfault_cost = 0; 1180 1413 child_vmm->global_pgfault_cost = 0; 1181 1414 */ 1182 1415 // copy base addresses from parent VMM to child VMM 1183 1416 child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base)); … … 1260 1493 remote_queuelock_release( vsl_lock_xp ); 1261 1494 1262 // remove all registered MMAP vsegs 1263 // from zombi_lists in MMAP allocator 1495 // remove all registered MMAP vsegs from free_lists in MMAP allocator 1264 1496 uint32_t i; 1265 for( i = 0 ; i <32; i++ )1266 { 1267 // build extended pointer on zombi_list[i]1268 xptr_t root_xp = XPTR( local_cxy , &vmm->mmap_mgr. zombi_list[i] );1497 for( i = 0 ; i <= CONFIG_VMM_HEAP_MAX_ORDER ; i++ ) 1498 { 1499 // build extended pointer on free list root 1500 xptr_t root_xp = XPTR( local_cxy , &vmm->mmap_mgr.free_list_root[i] ); 1269 1501 1270 1502 // scan zombi_list[i] … … 1343 1575 cxy_t cxy ) 1344 1576 { 1345 vseg_t * vseg; // created vseg pointer 1346 vpn_t vpn_base; // first page index 1347 vpn_t vpn_size; // number of pages covered by vseg 1348 error_t error; 1577 vseg_t * vseg; // pointer on allocated vseg descriptor 1349 1578 1350 1579 #if DEBUG_VMM_CREATE_VSEG … … 1364 1593 vmm_t * vmm = &process->vmm; 1365 1594 1366 // compute base, size, vpn_base, vpn_size, depending on vseg type 1367 // we use the VMM specific allocators for "stack", "file", "anon", & "remote" vsegs 1368 1595 // allocate a vseg descriptor and initialize it, depending on type 1596 // we use specific allocators for "stack" and "mmap" types 1597 1598 ///////////////////////////// 1369 1599 if( type == VSEG_TYPE_STACK ) 1370 1600 { 1371 // get vpn_base and vpn_size from STACK allocator 1372 vmm_stack_alloc( vmm , base , &vpn_base , &vpn_size ); 1373 1374 // compute vseg base and size from vpn_base and vpn_size 1375 base = vpn_base << CONFIG_PPM_PAGE_SHIFT; 1376 size = vpn_size << CONFIG_PPM_PAGE_SHIFT; 1377 } 1378 else if( type == VSEG_TYPE_FILE ) 1379 { 1380 // compute page index (in mapper) for first byte 1381 vpn_t vpn_min = file_offset >> CONFIG_PPM_PAGE_SHIFT; 1382 1383 // compute page index (in mapper) for last byte 1384 vpn_t vpn_max = (file_offset + size - 1) >> CONFIG_PPM_PAGE_SHIFT; 1385 1386 // compute offset in first page 1387 uint32_t offset = file_offset & CONFIG_PPM_PAGE_MASK; 1388 1389 // compute number of pages required in virtual space 1390 vpn_t npages = vpn_max - vpn_min + 1; 1391 1392 // get vpn_base and vpn_size from MMAP allocator 1393 error = vmm_mmap_alloc( vmm , npages , &vpn_base , &vpn_size ); 1394 if( error ) 1601 // get vseg from STACK allocator 1602 vseg = vmm_stack_alloc( vmm , base ); // base == ltid 1603 1604 if( vseg == NULL ) 1395 1605 { 1396 printk("\n[ERROR] in %s : no vspace for mmap vseg /process %x in cluster %x\n",1397 __FUNCTION__, process->pid , local_cxy );1606 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1607 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1398 1608 return NULL; 1399 1609 } 1400 1610 1401 // set the vseg base (not always aligned for FILE) 1402 base = (vpn_base << CONFIG_PPM_PAGE_SHIFT) + offset; 1403 } 1404 else if( (type == VSEG_TYPE_ANON) || 1405 (type == VSEG_TYPE_REMOTE) ) 1611 // initialize vseg 1612 vseg->type = type; 1613 vseg->vmm = vmm; 1614 vseg->min = vseg->vpn_base << CONFIG_PPM_PAGE_SHIFT; 1615 vseg->max = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_SHIFT); 1616 vseg->cxy = cxy; 1617 1618 vseg_init_flags( vseg , type ); 1619 } 1620 ///////////////////////////////// 1621 else if( type == VSEG_TYPE_FILE ) 1622 { 1623 // compute page index (in mapper) for first and last byte 1624 vpn_t vpn_min = file_offset >> CONFIG_PPM_PAGE_SHIFT; 1625 vpn_t vpn_max = (file_offset + size - 1) >> CONFIG_PPM_PAGE_SHIFT; 1626 1627 // compute offset in first page and number of pages 1628 uint32_t offset = file_offset & CONFIG_PPM_PAGE_MASK; 1629 vpn_t npages = vpn_max - vpn_min + 1; 1630 1631 // get vseg from MMAP allocator 1632 vseg = vmm_mmap_alloc( vmm , npages ); 1633 1634 if( vseg == NULL ) 1635 { 1636 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1637 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1638 return NULL; 1639 } 1640 1641 // initialize vseg 1642 vseg->type = type; 1643 vseg->vmm = vmm; 1644 vseg->min = (vseg->vpn_base << CONFIG_PPM_PAGE_SHIFT) + offset; 1645 vseg->max = vseg->min + size; 1646 vseg->file_offset = file_offset; 1647 vseg->file_size = file_size; 1648 vseg->mapper_xp = mapper_xp; 1649 vseg->cxy = cxy; 1650 1651 vseg_init_flags( vseg , type ); 1652 } 1653 ///////////////////////////////////////////////////////////////// 1654 else if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_REMOTE) ) 1406 1655 { 1407 1656 // compute number of required pages in virtual space … … 1409 1658 if( size & CONFIG_PPM_PAGE_MASK) npages++; 1410 1659 1411 // get vpn_base and vpn_size from MMAP allocator 1412 error = vmm_mmap_alloc( vmm , npages , &vpn_base , &vpn_size ); 1413 if( error ) 1660 // allocate vseg from MMAP allocator 1661 vseg = vmm_mmap_alloc( vmm , npages ); 1662 1663 if( vseg == NULL ) 1414 1664 { 1415 printk("\n[ERROR] in %s : no vspace for mmap vseg /process %x in cluster %x\n",1416 __FUNCTION__, process->pid , local_cxy );1665 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1666 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1417 1667 return NULL; 1418 1668 } 1419 1669 1420 // set vseg base (always aligned for ANON or REMOTE) 1421 base = vpn_base << CONFIG_PPM_PAGE_SHIFT; 1422 } 1670 // initialize vseg 1671 vseg->type = type; 1672 vseg->vmm = vmm; 1673 vseg->min = vseg->vpn_base << CONFIG_PPM_PAGE_SHIFT; 1674 vseg->max = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_SHIFT); 1675 vseg->cxy = cxy; 1676 1677 vseg_init_flags( vseg , type ); 1678 } 1679 ///////////////////////////////////////////////////////////////// 1423 1680 else // VSEG_TYPE_DATA, VSEG_TYPE_CODE or KERNEL vseg 1424 1681 { … … 1426 1683 uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_SHIFT; 1427 1684 1428 vpn_base = vpn_min; 1429 vpn_size = vpn_max - vpn_min + 1; 1685 // allocate vseg descriptor 1686 vseg = vseg_alloc(); 1687 1688 if( vseg == NULL ) 1689 { 1690 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1691 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1692 return NULL; 1693 } 1694 // initialize vseg 1695 vseg->type = type; 1696 vseg->vmm = vmm; 1697 vseg->min = base; 1698 vseg->max = base + size; 1699 vseg->vpn_base = base >> CONFIG_PPM_PAGE_SHIFT; 1700 vseg->vpn_size = vpn_max - vpn_min + 1; 1701 vseg->file_offset = file_offset; 1702 vseg->file_size = file_size; 1703 vseg->mapper_xp = mapper_xp; 1704 vseg->cxy = cxy; 1705 1706 vseg_init_flags( vseg , type ); 1430 1707 } 1431 1708 1432 1709 // check collisions 1433 vseg = vmm_check_conflict( process , vpn_base , vpn_size ); 1434 1435 if( vseg != NULL ) 1436 { 1437 printk("\n[ERROR] in %s for process %x : new vseg [vpn_base %x / vpn_size %x]\n" 1438 " overlap existing vseg [vpn_base %x / vpn_size %x]\n", 1439 __FUNCTION__ , process->pid, vpn_base, vpn_size, vseg->vpn_base, vseg->vpn_size ); 1710 vseg_t * existing_vseg = vmm_check_conflict( process , vseg->vpn_base , vseg->vpn_size ); 1711 1712 if( existing_vseg != NULL ) 1713 { 1714 printk("\n[ERROR] in %s for process %x : new vseg %s [vpn_base %x / vpn_size %x]\n" 1715 " overlap existing vseg %s [vpn_base %x / vpn_size %x]\n", 1716 __FUNCTION__ , process->pid, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size, 1717 vseg_type_str(existing_vseg->type), existing_vseg->vpn_base, existing_vseg->vpn_size ); 1718 vseg_free( vseg ); 1440 1719 return NULL; 1441 1720 } 1442 1443 // allocate physical memory for vseg descriptor1444 vseg = vseg_alloc();1445 if( vseg == NULL )1446 {1447 printk("\n[ERROR] in %s for process %x : cannot allocate memory for vseg\n",1448 __FUNCTION__ , process->pid );1449 return NULL;1450 }1451 1452 #if (DEBUG_VMM_CREATE_VSEG & 1)1453 if( DEBUG_VMM_CREATE_VSEG < cycle )1454 printk("\n[%s] thread[%x,%x] : base %x / size %x / vpn_base %x / vpn_size %x\n",1455 __FUNCTION__, this->process->pid, this->trdid, base, size, vpn_base, vpn_size );1456 #endif1457 1458 // initialize vseg descriptor1459 vseg_init( vseg,1460 type,1461 base,1462 size,1463 vpn_base,1464 vpn_size,1465 file_offset,1466 file_size,1467 mapper_xp,1468 cxy );1469 1721 1470 1722 // build extended pointer on VSL lock … … 1480 1732 remote_queuelock_release( lock_xp ); 1481 1733 1482 #if DEBUG_VMM_CREATE_VSEG 1734 #if DEBUG_VMM_CREATE_VSEG 1483 1735 cycle = (uint32_t)hal_get_cycles(); 1484 // if( DEBUG_VMM_CREATE_VSEG < cycle ) 1485 if( type == VSEG_TYPE_REMOTE ) 1486 printk("\n[%s] thread[%x,%x] exit / process %x / %s / base %x / cxy %x / cycle %d\n", 1487 __FUNCTION__, this->process->pid, this->trdid, 1488 process->pid, vseg_type_str(type), base, cxy, cycle ); 1736 if( DEBUG_VMM_CREATE_VSEG < cycle ) 1737 printk("\n[%s] thread[%x,%x] exit / %s / vpn_base %x / vpn_size %x / cycle %d\n", 1738 __FUNCTION__, this->process->pid, this->trdid, 1739 vseg_type_str(type), vseg->vpn_base, vseg->vpn_size, cycle ); 1489 1740 #endif 1490 1741 … … 1654 1905 // release slot to local stack allocator 1655 1906 vmm_stack_free( vmm , vseg ); 1656 1657 // release vseg descriptor to local kmem1658 vseg_free( vseg );1659 1907 } 1660 1908 else if( (vseg_type == VSEG_TYPE_ANON) || -
trunk/kernel/mm/vmm.h
r640 r651 64 64 65 65 /********************************************************************************************* 66 * This structure defines the MMAP allocator used by the VMM to dynamically handle 67 * MMAP vsegs requested or released by an user process. 68 * This allocator should be only used in the reference cluster. 69 * - allocation policy : all allocated vsegs occupy an integer number of pages that is 70 * power of 2, and are aligned on a page boundary. The requested number of pages is 71 * rounded if required. The first_free_vpn variable defines completely the MMAP zone state. 72 * It is never decremented, as the released vsegs are simply registered in a zombi_list. 73 * The relevant zombi_list is checked first for each allocation request. 74 * - release policy : a released MMAP vseg is registered in an array of zombi_lists. 75 * This array is indexed by ln(number of pages), and each entry contains the root of 76 * a local list of zombi vsegs that have the same size. The physical memory allocated 77 * for a zombi vseg descriptor is not released, to use the "list" field. 78 * This physical memory allocated for MMAP vseg descriptors is actually released 79 * when the VMM is destroyed. 66 * This structure defines the MMAP allocator used by the VMM to dynamically handle MMAP vsegs 67 * requested or released by an user process. It must be called in the reference cluster. 68 * - allocation policy : 69 * This allocator implements the buddy algorithm. All allocated vsegs occupy an integer 70 * number of pages, that is power of 2, and are aligned (vpn_base is multiple of vpn_size). 71 * The requested number of pages is rounded if required. The global allocator state is 72 * completely defined by the free_pages_root[] array indexed by the vseg order. 73 * These free lists are local, but are implemented as xlist because we use the existing 74 * vseg.xlist to register a free vseg in its free list. 75 * - release policy : 76 * A released vseg is recursively merged with the "buddy" vseg when it is free, in 77 * order to build the largest possible aligned free vsegs. The resulting vseg.vpn_size 78 * field is updated. 79 * Implementation note: 80 * The only significant (and documented) fiels in the vsegs registered in the MMAP allocator 81 * free lists are "xlist", "vpn_base", and "vpn_size". 80 82 ********************************************************************************************/ 81 83 … … 85 87 vpn_t vpn_base; /*! first page of MMAP zone */ 86 88 vpn_t vpn_size; /*! number of pages in MMAP zone */ 87 vpn_t first_free_vpn; /*! first free page in MMAP zone */ 88 xlist_entry_t zombi_list[32]; /*! array of roots of released vsegs lists */ 89 xlist_entry_t free_list_root[CONFIG_VMM_HEAP_MAX_ORDER + 1]; /* roots of free lists */ 89 90 } 90 91 mmap_mgr_t; … … 103 104 * 2. The VSL contains only local vsegs, but it is implemented as an xlist, and protected by 104 105 * a remote_rwlock, because it can be accessed by a thread running in a remote cluster. 105 * An ex emple is the vmm_fork_copy() function.106 * An example is the vmm_fork_copy() function. 106 107 * 3. The GPT in the reference cluster can be directly accessed by remote threads to handle 107 108 * false page-fault (page is mapped in the reference GPT, but the PTE copy is missing … … 119 120 120 121 stack_mgr_t stack_mgr; /*! embedded STACK vsegs allocator */ 122 121 123 mmap_mgr_t mmap_mgr; /*! embedded MMAP vsegs allocator */ 122 124 … … 156 158 * call to the vmm_user_init() function after an exec() syscall. 157 159 * It removes from the VMM of the process identified by the <process> argument all 158 * non kernel vsegs (i.e. all user vsegs), by calling the vmm_remove_vseg() function.160 * all user vsegs, by calling the vmm_remove_vseg() function. 159 161 * - the vsegs are removed from the VSL. 160 162 * - the corresponding GPT entries are removed from the GPT. … … 279 281 /********************************************************************************************* 280 282 * This function allocates memory for a vseg descriptor, initialises it, and register it 281 * in the VSL of the local process descriptor , that must be the reference process.282 * - For the FILE, ANON, & REMOTE types, it does not use the <base> a nd <size> arguments,283 * but uses the specific MMAP virtual memoryallocator.283 * in the VSL of the local process descriptor. 284 * - For the FILE, ANON, & REMOTE types, it does not use the <base> argument, but uses 285 * the specific VMM MMAP allocator. 284 286 * - For the STACK type, it does not use the <base> and <size> arguments, but uses the 285 * and the <base> argument the specific STACK virtual memoryallocator.287 * the specific VMM STACK allocator. 286 288 * It checks collision with pre-existing vsegs. 287 289 * To comply with the "on-demand" paging policy, this function does NOT modify the GPT, -
trunk/kernel/mm/vseg.c
r635 r651 81 81 } 82 82 83 /////////////////////////////////// 84 void vseg_init( vseg_t * vseg, 85 vseg_type_t type, 86 intptr_t base, 87 uint32_t size, 88 vpn_t vpn_base, 89 vpn_t vpn_size, 90 uint32_t file_offset, 91 uint32_t file_size, 92 xptr_t mapper_xp, 93 cxy_t cxy ) 83 ///////////////////////////////////////// 84 void vseg_init_flags( vseg_t * vseg, 85 vseg_type_t type ) 94 86 { 95 vseg->type = type;96 vseg->min = base;97 vseg->max = base + size;98 vseg->vpn_base = vpn_base;99 vseg->vpn_size = vpn_size;100 vseg->file_offset = file_offset;101 vseg->file_size = file_size;102 vseg->mapper_xp = mapper_xp;103 vseg->cxy = cxy;104 105 87 // set vseg flags depending on type 106 if 88 if( type == VSEG_TYPE_CODE ) 107 89 { 108 90 vseg->flags = VSEG_USER | -
trunk/kernel/mm/vseg.h
r640 r651 41 41 typedef enum 42 42 { 43 VSEG_TYPE_CODE = 0, /*! executable user code / private / localized */44 VSEG_TYPE_DATA = 1, /*! initialized user data / public / distributed */45 VSEG_TYPE_STACK = 2, /*! execution user stack / private / localized */46 VSEG_TYPE_ANON = 3, /*! anonymous mmap / public / localized */47 VSEG_TYPE_FILE = 4, /*! file mmap / public / localized */48 VSEG_TYPE_REMOTE = 5, /*! remote mmap / public / localized */43 VSEG_TYPE_CODE = 1, /*! executable user code / private / localized */ 44 VSEG_TYPE_DATA = 2, /*! initialized user data / public / distributed */ 45 VSEG_TYPE_STACK = 3, /*! execution user stack / private / localized */ 46 VSEG_TYPE_ANON = 4, /*! anonymous mmap / public / localized */ 47 VSEG_TYPE_FILE = 5, /*! file mmap / public / localized */ 48 VSEG_TYPE_REMOTE = 6, /*! remote mmap / public / localized */ 49 49 50 VSEG_TYPE_KCODE = 6, /*! executable kernel code / private / localized */51 VSEG_TYPE_KDATA = 7, /*! initialized kernel data / private / localized */52 VSEG_TYPE_KDEV = 8, /*! kernel peripheral device / public / localized */50 VSEG_TYPE_KCODE = 7, /*! executable kernel code / private / localized */ 51 VSEG_TYPE_KDATA = 8, /*! initialized kernel data / private / localized */ 52 VSEG_TYPE_KDEV = 9, /*! kernel peripheral device / public / localized */ 53 53 } 54 54 vseg_type_t; … … 115 115 116 116 /******************************************************************************************* 117 * This function initializes a local vseg descriptor, from the arguments values.118 * It does NOT register the vseg in the local VMM.117 * This function initializes the "flags" field for a local <vseg> descriptor, 118 * depending on the vseg <type>. 119 119 ******************************************************************************************* 120 120 * @ vseg : pointer on the vseg descriptor. 121 * @ base : vseg base address.122 * @ size : vseg size (bytes).123 * @ vpn_base : first page index.124 * @ vpn_size : number of pages.125 121 * @ type : vseg type. 126 * @ cxy : target cluster for physical mapping.127 122 ******************************************************************************************/ 128 void vseg_init( vseg_t * vseg, 129 vseg_type_t type, 130 intptr_t base, 131 uint32_t size, 132 vpn_t vpn_base, 133 vpn_t vpn_size, 134 uint32_t file_offset, 135 uint32_t file_size, 136 xptr_t mapper_xp, 137 cxy_t cxy ); 123 void vseg_init_flags( vseg_t * vseg, 124 vseg_type_t type ); 138 125 139 126 /*******************************************************************************************
Note: See TracChangeset
for help on using the changeset viewer.