Changeset 21 for trunk/kernel/mm
- Timestamp:
- Jun 3, 2017, 6:56:47 PM (8 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/vmm.c
r14 r21 5 5 * Mohamed Lamine Karaoui (2015) 6 6 * Alain Greiner (2016) 7 * 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites 9 9 * … … 52 52 //////////////////////////////////// 53 53 void vmm_init( process_t * process ) 54 { 54 { 55 55 error_t error; 56 56 vseg_t * vseg_kentry; … … 72 72 hal_core_sleep(); 73 73 } 74 74 75 75 // check max number of stacks slots 76 76 if( CONFIG_THREAD_MAX_PER_CLUSTER > 32 ) 77 77 { 78 printk("\n[PANIC] in %s : max number o t threads per cluster for a sihgle process"78 printk("\n[PANIC] in %s : max number of threads per cluster for a single process" 79 79 " cannot be larger than 32\n", __FUNCTION__ ); 80 80 hal_core_sleep(); … … 82 82 83 83 // check STACK zone size 84 if( (CONFIG_VMM_STACK_SIZE * CONFIG_THREAD_MAX_PER_CLUSTER) > 84 if( (CONFIG_VMM_STACK_SIZE * CONFIG_THREAD_MAX_PER_CLUSTER) > 85 85 (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) ) 86 86 { … … 90 90 } 91 91 92 // initiali se the rwlock protecting the vsegs list92 // initialize the rwlock protecting the vsegs list 93 93 rwlock_init( &vmm->vsegs_lock ); 94 94 … … 102 102 if( error ) 103 103 { 104 printk("\n[PANIC] in %s : cannot initiali se radix tree for process %x\n",104 printk("\n[PANIC] in %s : cannot initialize radix tree for process %x\n", 105 105 __FUNCTION__ , process->pid ); 106 106 hal_core_sleep(); … … 111 111 size = CONFIG_VMM_KENTRY_SIZE << CONFIG_PPM_PAGE_SHIFT; 112 112 vseg_kentry = vmm_create_vseg( process , base , size , VSEG_TYPE_CODE ); 113 if( vseg_kentry == NULL ) 113 if( vseg_kentry == NULL ) 114 114 { 115 115 printk("\n[PANIC] in %s : cannot register kent vseg for process %x\n", … … 123 123 size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT; 124 124 vseg_args = vmm_create_vseg( process , base , size , VSEG_TYPE_DATA ); 125 if( vseg_args == NULL ) 125 if( vseg_args == NULL ) 126 126 { 127 127 printk("\n[PANIC] in %s : cannot register args vseg for process %x\n", … … 135 135 size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT; 136 136 vseg_envs = vmm_create_vseg( process , base , size , VSEG_TYPE_DATA ); 137 if( vseg_envs == NULL ) 137 if( vseg_envs == NULL ) 138 138 { 139 139 printk("\n[PANIC] in %s : cannot register envs vseg for process %x\n", … … 147 147 size = (CONFIG_VMM_MMAP_BASE-CONFIG_VMM_HEAP_BASE) << CONFIG_PPM_PAGE_SHIFT; 148 148 vseg_heap = vmm_create_vseg( process , base , size , VSEG_TYPE_HEAP ); 149 if( vseg_heap == NULL ) 149 if( vseg_heap == NULL ) 150 150 { 151 151 printk("\n[PANIC] in %s : cannot register heap vseg in for process %x\n", … … 174 174 for( i = 0 ; i < 32 ; i++ ) list_root_init( &vmm->mmap_mgr.zombi_list[i] ); 175 175 176 // initiali se instrumentation counters176 // initialize instrumentation counters 177 177 vmm->pgfault_nr = 0; 178 178 vmm->u_err_nr = 0; … … 227 227 ///////////////////////////////////////////////// 228 228 vseg_t * vmm_check_conflict( process_t * process, 229 vpn_t vpn_base, 229 vpn_t vpn_base, 230 230 vpn_t vpn_size ) 231 231 { … … 238 238 { 239 239 vseg = LIST_ELEMENT( iter , vseg_t , list ); 240 if( ((vpn_base + vpn_size) > vseg->vpn_base) && 241 (vpn_base < (vseg->vpn_base + vseg->vpn_size)) ) return vseg; 240 if( ((vpn_base + vpn_size) > vseg->vpn_base) && 241 (vpn_base < (vseg->vpn_base + vseg->vpn_size)) ) return vseg; 242 242 } 243 243 return NULL; … … 249 249 //////////////////////////////////////////////////////////////////////////////////////////// 250 250 // @ vmm : pointer on VMM. 251 // @ vpn_base : (return value) first al ocated page251 // @ vpn_base : (return value) first allocated page 252 252 // @ vpn_size : (return value) number of allocated pages 253 253 //////////////////////////////////////////////////////////////////////////////////////////// … … 268 268 // update bitmap 269 269 bitmap_set( &mgr->bitmap , index ); 270 270 271 271 // release lock on stack allocator 272 272 spinlock_unlock( &mgr->lock ); 273 273 274 // returns vpn_base, vpn_size (one page non allocated) 274 // returns vpn_base, vpn_size (one page non allocated) 275 275 *vpn_base = mgr->vpn_base + index * CONFIG_VMM_STACK_SIZE + 1; 276 276 *vpn_size = CONFIG_VMM_STACK_SIZE - 1; … … 285 285 // @ vmm : [in] pointer on VMM. 286 286 // @ npages : [in] requested number of pages. 287 // @ vpn_base : [out] first allocated page. 287 // @ vpn_base : [out] first allocated page. 288 288 // @ vpn_size : [out] actual number of allocated pages. 289 289 //////////////////////////////////////////////////////////////////////////////////////////// … … 297 297 vpn_t base; 298 298 vpn_t size; 299 vpn_t free; 300 301 // mmap vseg size must be power of 2 299 vpn_t free; 300 301 // mmap vseg size must be power of 2 302 302 // compute actual size and index in zombi_list array 303 303 size = POW2_ROUNDUP( npages ); … … 333 333 // compute base 334 334 base = vseg->vpn_base; 335 } 336 335 } 336 337 337 // release lock on mmap allocator 338 338 spinlock_unlock( &mgr->lock ); … … 343 343 return 0; 344 344 345 } // end vmm_mmap_alloc ator()345 } // end vmm_mmap_alloc() 346 346 347 347 ////////////////////////////////////////////// 348 348 vseg_t * vmm_create_vseg( process_t * process, 349 intptr_t base, 350 intptr_t size, 349 intptr_t base, 350 intptr_t size, 351 351 uint32_t type ) 352 352 { 353 353 vseg_t * vseg; // created vseg pointer 354 vpn_t vpn_base; // vseg first page 354 vpn_t vpn_base; // vseg first page 355 355 vpn_t vpn_size; // number of pages 356 356 error_t error; … … 358 358 // get pointer on VMM 359 359 vmm_t * vmm = &process->vmm; 360 360 361 361 vmm_dmsg("\n[INFO] %s enter for process %x / base = %x / size = %x / type = %s\n", 362 362 __FUNCTION__ , process->pid , base , size , vseg_type_name[type] ); 363 363 364 364 // compute base, size, vpn_base, vpn_size, depending on type 365 365 // we use the VMM specific allocators for STACK and MMAP vsegs … … 379 379 size = vpn_size << CONFIG_PPM_PAGE_SHIFT; 380 380 } 381 else if( (type == VSEG_TYPE_ANON) || 382 (type == VSEG_TYPE_FILE) || 381 else if( (type == VSEG_TYPE_ANON) || 382 (type == VSEG_TYPE_FILE) || 383 383 (type == VSEG_TYPE_REMOTE) ) 384 384 { … … 407 407 if( vseg != NULL ) 408 408 { 409 printk("\n[ERROR] in %s for process %x : new vseg [vpn_base = %x / vpn_size = %x]\n" 409 printk("\n[ERROR] in %s for process %x : new vseg [vpn_base = %x / vpn_size = %x]\n" 410 410 " overlap existing vseg [vpn_base = %x / vpn_size = %x]\n", 411 __FUNCTION__ , process->pid, vpn_base, vpn_size, 412 vseg->vpn_base, vseg->vpn_size ); 411 __FUNCTION__ , process->pid, vpn_base, vpn_size, 412 vseg->vpn_base, vseg->vpn_size ); 413 413 return NULL; 414 414 } … … 434 434 rwlock_wr_unlock( &vmm->vsegs_lock ); 435 435 436 vmm_dmsg("\n[INFO] : %s exit for process %x, vseg [%x, %x] has been mapped\n", 436 vmm_dmsg("\n[INFO] : %s exit for process %x, vseg [%x, %x] has been mapped\n", 437 437 __FUNCTION__ , process->pid , vseg->min , vseg->max ); 438 438 439 439 return vseg; 440 440 … … 517 517 gpt_t * gpt = &process_zero.vmm.gpt; 518 518 519 // define number of small pages per PTE 519 // define number of small pages per PTE 520 520 if( attr & GPT_SMALL ) order = 0; // 1 small page 521 521 else order = 9; // 512 small pages … … 531 531 req.flags = AF_KERNEL | AF_ZERO; 532 532 page = (page_t *)kmem_alloc( &req ); 533 if( page == NULL ) 533 if( page == NULL ) 534 534 { 535 535 printk("\n[ERROR] in %s : cannot allocate physical memory\n", __FUNCTION__ ); … … 540 540 ppn = ppm_page2ppn( page ); 541 541 error = hal_gpt_set_pte( gpt , vpn , ppn , attr ); 542 if( error ) 542 if( error ) 543 543 { 544 544 printk("\n[ERROR] in %s : cannot register PPE\n", __FUNCTION__ ); … … 546 546 } 547 547 } 548 548 549 549 return 0; 550 550 } // end vmm_map_vseg() … … 554 554 vseg_t * vseg ) 555 555 { 556 vpn_t vpn; // VPN of current PTE 557 vpn_t vpn_min; // VPN of first PTE 556 vpn_t vpn; // VPN of current PTE 557 vpn_t vpn_min; // VPN of first PTE 558 558 vpn_t vpn_max; // VPN of last PTE (excluded) 559 559 … … 583 583 intptr_t addr_min = base; 584 584 intptr_t addr_max = base + size; 585 uint32_t shift = CONFIG_PPM_PAGE_SHIFT; 585 uint32_t shift = CONFIG_PPM_PAGE_SHIFT; 586 586 587 587 // get pointer on vseg … … 589 589 590 590 if( vseg == NULL) return EINVAL; 591 591 592 592 // get VMM lock protecting vsegs list 593 593 rwlock_wr_lock( &vmm->vsegs_lock ); 594 594 595 595 if( (vseg->min > addr_min) || (vseg->max < addr_max) ) // region not included in vseg 596 596 { … … 659 659 error_t error; 660 660 661 // this function must be called by in the reference cluster 661 // this function must be called by in the reference cluster 662 662 if( process->is_ref == false ); 663 663 { … … 673 673 674 674 // if PTE unmapped => allocate one small physical page to map it 675 if( (attr & GPT_MAPPED) == 0 ) 675 if( (attr & GPT_MAPPED) == 0 ) 676 676 { 677 677 // get vseg pointer … … 702 702 page_t * page; 703 703 if( target_cxy == local_cxy ) // target cluster is the local cluster 704 { 704 { 705 705 req.type = KMEM_PAGE; 706 706 req.size = 0; … … 723 723 } 724 724 725 // define gGPT attributes from vseg flags725 // define GPT attributes from vseg flags 726 726 attr = GPT_MAPPED | GPT_SMALL; 727 727 if( vseg->flags & VSEG_USER ) attr |= GPT_USER; … … 741 741 742 742 *ret_ppn = ppn; 743 *ret_attr = attr; 743 *ret_attr = attr; 744 744 return 0; 745 745 } // end vmm_get_pte() … … 751 751 { 752 752 uint32_t attr; // missing page attributes 753 ppn_t ppn; // missing page PPN 753 ppn_t ppn; // missing page PPN 754 754 error_t error; // return value 755 755 … … 766 766 rpc_vmm_get_pte_client( ref_cxy , ref_ptr , vpn , &attr , &ppn , &error ); 767 767 } 768 else // local cluster is the reference cluster 768 else // local cluster is the reference cluster 769 769 { 770 770 error = vmm_get_pte( process , vpn , &attr , &ppn ); … … 775 775 { 776 776 printk("\n[ERROR] in %s : cannot allocate memory / process = %x / vpn = %x\n", 777 __FUNCTION__ , process->pid , vpn ); 777 __FUNCTION__ , process->pid , vpn ); 778 778 return ENOMEM; 779 779 } … … 784 784 { 785 785 printk("\n[ERROR] in %s : cannot register PTE / process = %x / vpn = %x\n", 786 __FUNCTION__ , process->pid , vpn ); 786 __FUNCTION__ , process->pid , vpn ); 787 787 return ENOMEM; 788 788 } 789 789 790 790 return 0; 791 791 } // end vmm_handle_page_fault() … … 807 807 } 808 808 809 // access page table 809 // access page table 810 810 error_t error; 811 811 vpn_t vpn; … … 834 834 return error; 835 835 } 836 836 837 837 // return paddr 838 838 *paddr = (((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT) | offset; 839 839 return 0; 840 840 841 841 } // end vmm_v2p_translate() 842 842
Note: See TracChangeset
for help on using the changeset viewer.