Changeset 204 for trunk/kernel/mm
- Timestamp:
- Jul 17, 2017, 8:42:59 AM (7 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/mapper.c
r183 r204 88 88 89 89 return mapper; 90 } 90 91 } // end mapper_create() 91 92 92 93 /////////////////////////////////////////// … … 127 128 128 129 return 0; 129 } 130 131 } // end mapper_destroy() 130 132 131 133 //////////////////////////////////////////// … … 137 139 error_t error; 138 140 141 mapper_dmsg("\n[INFO] %s : enter for page %d / mapper = %x\n", 142 __FUNCTION__ , index , mapper ); 143 139 144 thread_t * this = CURRENT_THREAD; 140 145 … … 148 153 if( ( page == NULL) || page_is_flag( page , PG_INLOAD ) ) // page not available 149 154 { 155 150 156 // release the lock in READ_MODE and take it in WRITE_MODE 151 157 rwlock_rd_unlock( &mapper->lock ); … … 160 166 if ( page == NULL ) // missing page => load it from file system 161 167 { 168 mapper_dmsg("\n[INFO] %s : missing page => load from FS\n", __FUNCTION__ ); 169 162 170 // allocate one page from PPM 163 171 req.type = KMEM_PAGE; … … 237 245 sched_yield(); 238 246 } 239 240 } 241 242 return page; 243 } 244 else 245 { 246 // release lock from READ_MODE 247 rwlock_rd_unlock( &mapper->lock ); 248 249 return page; 250 } 251 } 247 } 248 } 249 else // page available in mapper 250 { 251 252 rwlock_rd_unlock( &mapper->lock ); 253 } 254 255 mapper_dmsg("\n[INFO] %s : exit for page %d / page desc = %x\n", 256 __FUNCTION__ , index , page ); 257 258 return page; 259 260 } // end mapper_get_page() 252 261 253 262 /////////////////////////////////////////////// … … 282 291 283 292 return 0; 284 } 293 294 } // end mapper_release_page() 285 295 286 296 ///////////////////////////////////////// … … 298 308 uint8_t * map_ptr; // current mapper address 299 309 uint8_t * buf_ptr; // current buffer address 310 311 mapper_dmsg("\n[INFO] %s : enter / to_buf = %d / buffer = %x\n", 312 __FUNCTION__ , to_buffer , buffer ); 300 313 301 314 // compute offsets of first and last bytes in file … … 347 360 } 348 361 362 mapper_dmsg("\n[INFO] %s : exit for buffer %x\n", 363 __FUNCTION__, buffer ); 364 349 365 return 0; 350 } 351 366 367 } // end mapper_move() 368 -
trunk/kernel/mm/mapper.h
r23 r204 50 50 * readers, and only one writer. This lock implement a busy waiting policy. 51 51 * - The two functions vfs_move_page_to_mapper() and vfs_move_page_from_mapper() define 52 * the generic API used to move pages to or from the relevant file system on IOC device.52 * the generic API used to move pages to or from the relevant file system. 53 53 * - the mapper_move() function is used to move data to or from a, possibly distributed 54 54 * user buffer in user space. … … 122 122 * The offset in the file descriptor is not modified by this function. 123 123 ******************************************************************************************* 124 * @ mapper : extendedpointer on local mapper.124 * @ mapper : local pointer on local mapper. 125 125 * @ to_buffer : move data from mapper to buffer if true. 126 126 * @ file_offset : first byte to move in file. -
trunk/kernel/mm/vmm.c
r179 r204 62 62 intptr_t size; 63 63 64 vmm_dmsg("\n[INFO] %s : enter for process %x\n", __FUNCTION__ , process->pid ); 65 64 66 // get pointer on VMM 65 67 vmm_t * vmm = &process->vmm; 66 68 67 // check UTILS zone size 68 if( (CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE ) > 69 CONFIG_VMM_ELF_BASE ) 70 { 71 printk("\n[PANIC] in %s : UTILS zone too small for process %x\n", 72 __FUNCTION__ , process->pid ); 73 hal_core_sleep(); 74 } 75 76 // check max number of stacks slots 77 if( CONFIG_THREAD_MAX_PER_CLUSTER > 32 ) 78 { 79 printk("\n[PANIC] in %s : max number of threads per cluster for a single process" 80 " cannot be larger than 32\n", __FUNCTION__ ); 81 hal_core_sleep(); 82 } 83 84 // check STACK zone size 85 if( (CONFIG_VMM_STACK_SIZE * CONFIG_THREAD_MAX_PER_CLUSTER) > 86 (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) ) 87 { 88 printk("\n[PANIC] in %s : STACK zone too small for process %x\n", 89 __FUNCTION__ , process->pid ); 90 hal_core_sleep(); 91 } 69 assert( ((CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) 70 <= CONFIG_VMM_ELF_BASE) , __FUNCTION__ , "UTILS zone too small\n" ); 71 72 assert( (CONFIG_THREAD_MAX_PER_CLUSTER <= 32) , __FUNCTION__ , 73 "no more than 32 threads per cluster for a single process\n"); 74 75 assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREAD_MAX_PER_CLUSTER) <= 76 (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , __FUNCTION__ , 77 "STACK zone too small\n"); 92 78 93 79 // initialize the rwlock protecting the vsegs list … … 101 87 CONFIG_VMM_GRDXT_W2, 102 88 CONFIG_VMM_GRDXT_W3 ); 103 if( error ) 104 { 105 printk("\n[PANIC] in %s : cannot initialize radix tree for process %x\n", 106 __FUNCTION__ , process->pid ); 107 hal_core_sleep(); 108 } 89 90 assert( (error == 0) , __FUNCTION__ , "cannot initialize radix tree\n" ); 109 91 110 92 // register kentry vseg in VMM … … 112 94 size = CONFIG_VMM_KENTRY_SIZE << CONFIG_PPM_PAGE_SHIFT; 113 95 vseg_kentry = vmm_create_vseg( process , base , size , VSEG_TYPE_CODE ); 114 if( vseg_kentry == NULL ) 115 { 116 printk("\n[PANIC] in %s : cannot register kent vseg for process %x\n", 117 __FUNCTION__ , process->pid ); 118 hal_core_sleep(); 119 } 96 97 assert( (vseg_kentry != NULL) , __FUNCTION__ , "cannot register kentry vseg\n" ); 98 120 99 vmm->kent_vpn_base = 1; 121 100 … … 124 103 size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT; 125 104 vseg_args = vmm_create_vseg( process , base , size , VSEG_TYPE_DATA ); 126 if( vseg_args == NULL ) 127 { 128 printk("\n[PANIC] in %s : cannot register args vseg for process %x\n", 129 __FUNCTION__ , process->pid ); 130 hal_core_sleep(); 131 } 105 106 assert( (vseg_args != NULL) , __FUNCTION__ , "cannot register args vseg\n" ); 107 132 108 vmm->args_vpn_base = CONFIG_VMM_KENTRY_SIZE + 1; 133 109 … … 136 112 size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT; 137 113 vseg_envs = vmm_create_vseg( process , base , size , VSEG_TYPE_DATA ); 138 if( vseg_envs == NULL ) 139 { 140 printk("\n[PANIC] in %s : cannot register envs vseg for process %x\n", 141 __FUNCTION__ , process->pid ); 142 hal_core_sleep(); 143 } 114 115 assert( (vseg_envs != NULL) , __FUNCTION__ , "cannot register envs vseg\n" ); 116 144 117 vmm->envs_vpn_base = CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + 1; 145 118 … … 148 121 size = (CONFIG_VMM_MMAP_BASE-CONFIG_VMM_HEAP_BASE) << CONFIG_PPM_PAGE_SHIFT; 149 122 vseg_heap = vmm_create_vseg( process , base , size , VSEG_TYPE_HEAP ); 150 if( vseg_heap == NULL ) 151 { 152 printk("\n[PANIC] in %s : cannot register heap vseg in for process %x\n", 153 __FUNCTION__ , process->pid ); 154 hal_core_sleep(); 155 } 123 124 assert( (vseg_heap != NULL) , __FUNCTION__ , "cannot register heap vseg\n" ); 125 156 126 vmm->heap_vpn_base = CONFIG_VMM_HEAP_BASE; 157 127 158 128 // initialize generic page table 159 129 error = hal_gpt_create( &vmm->gpt ); 160 if( error ) 161 { 162 printk("PANIC in %s : cannot initialize page table\n", __FUNCTION__ ); 163 hal_core_sleep(); 164 } 130 131 assert( (error == 0) , __FUNCTION__ , "cannot initialize page table\n"); 165 132 166 133 // initialize STACK allocator … … 181 148 182 149 hal_fence(); 183 } 150 151 vmm_dmsg("\n[INFO] %s : exit for process %x\n", __FUNCTION__ , process->pid ); 152 153 } // end vmm_init() 184 154 185 155 ////////////////////////////////////////// … … 301 271 302 272 return 0; 303 } 273 274 } // vmm_copy() 304 275 305 276 /////////////////////////////////////// … … 342 313 // release memory allocated to the local page table 343 314 hal_gpt_destroy( &vmm->gpt ); 344 } 315 316 } // end vmm_destroy() 345 317 346 318 ///////////////////////////////////////////////// … … 357 329 { 358 330 vseg = LIST_ELEMENT( iter , vseg_t , list ); 331 359 332 if( ((vpn_base + vpn_size) > vseg->vpn_base) && 360 333 (vpn_base < (vseg->vpn_base + vseg->vpn_size)) ) return vseg; 361 334 } 362 335 return NULL; 363 } 336 337 } // end vmm_check_conflict() 364 338 365 339 //////////////////////////////////////////////////////////////////////////////////////////// … … 399 373 *vpn_size = CONFIG_VMM_STACK_SIZE - 1; 400 374 return 0; 401 } 375 376 } // end vmm_stack_alloc() 402 377 403 378 //////////////////////////////////////////////////////////////////////////////////////////// … … 464 439 *vpn_size = size; 465 440 return 0; 466 } 441 442 } // end vmm_mmap_alloc() 467 443 468 444 ////////////////////////////////////////////// … … 473 449 { 474 450 vseg_t * vseg; // created vseg pointer 475 vpn_t vpn_base; // vseg first page451 vpn_t vpn_base; // first page index 476 452 vpn_t vpn_size; // number of pages 477 453 error_t error; … … 480 456 vmm_t * vmm = &process->vmm; 481 457 482 vmm_dmsg("\n[INFO] %s enter for process %x / base = %x / size = %x / type = %s\n",458 vmm_dmsg("\n[INFO] %s : enter for process %x / base = %x / size = %x / type = %s\n", 483 459 __FUNCTION__ , process->pid , base , size , vseg_type_str(type) ); 484 460 485 // compute base, size, vpn_base, vpn_size, depending on type461 // compute base, size, vpn_base, vpn_size, depending on vseg type 486 462 // we use the VMM specific allocators for STACK and MMAP vsegs 487 463 if( type == VSEG_TYPE_STACK ) … … 520 496 else 521 497 { 522 vpn_base = ARROUND_DOWN( base , CONFIG_PPM_PAGE_SIZE ) >> CONFIG_PPM_PAGE_SHIFT; 523 vpn_size = ARROUND_UP( base + size , CONFIG_PPM_PAGE_SIZE ) >> CONFIG_PPM_PAGE_SHIFT; 498 uint32_t vpn_min = base >> CONFIG_PPM_PAGE_SHIFT; 499 uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_SHIFT; 500 501 vpn_base = vpn_min; 502 vpn_size = vpn_max - vpn_min + 1; 524 503 } 525 504 … … 555 534 rwlock_wr_unlock( &vmm->vsegs_lock ); 556 535 557 vmm_dmsg("\n[INFO] : %s exit for process %x,vseg [%x, %x] has been mapped\n",536 vmm_dmsg("\n[INFO] %s : exit for process %x / vseg [%x, %x] has been mapped\n", 558 537 __FUNCTION__ , process->pid , vseg->min , vseg->max ); 559 538
Note: See TracChangeset
for help on using the changeset viewer.