Changeset 672
- Timestamp:
- Nov 19, 2020, 11:49:01 PM (4 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/kcm.c
r657 r672 2 2 * kcm.c - Kernel Cache Manager implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018,2019 )4 * Author Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 60 60 uint64_t status = kcm_page->status; 61 61 62 assert( (count < max) , "kcm_page should not be full" );62 assert( __FUNCTION__, (count < max) , "kcm_page should not be full" ); 63 63 64 64 uint32_t index = 1; … … 94 94 void * ptr = (void *)((intptr_t)kcm_page + (index * size) ); 95 95 96 #if (DEBUG_KCM & 1)96 #if DEBUG_KCM 97 97 thread_t * this = CURRENT_THREAD; 98 98 uint32_t cycle = (uint32_t)hal_get_cycles(); … … 132 132 uint64_t mask = ((uint64_t)0x1) << index; 133 133 134 assert( (status & mask) , "released block not allocated : status (%x,%x) / mask(%x,%x)", 135 GET_CXY(status), GET_PTR(status), GET_CXY(mask ), GET_PTR(mask ) ); 134 if( (status & mask) == 0 ) 135 { 136 printk("\n[WARNING] in %s : block[%x,%x] not allocated / kcm %x / kcm_page %x\n", 137 __FUNCTION__, local_cxy, block_ptr, kcm, kcm_page ); 138 printk(" status %L / mask %L / sts & msk %L\n", status, mask, (status & mask) ); 139 kcm_remote_display( local_cxy , kcm ); 140 return; 141 } 136 142 137 143 // update status & count in kcm_page … … 149 155 } 150 156 151 #if (DEBUG_KCM & 1)157 #if DEBUG_KCM 152 158 thread_t * this = CURRENT_THREAD; 153 159 uint32_t cycle = (uint32_t)hal_get_cycles(); 154 160 if( DEBUG_KCM < cycle ) 155 printk("\n[%s] thread[%x,%x] released block %x inpage %x / size %d / count %d / cycle %d\n",161 printk("\n[%s] thread[%x,%x] block %x / page %x / size %d / count %d / cycle %d\n", 156 162 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_page, size, count - 1, cycle ); 157 163 #endif … … 219 225 { 220 226 221 assert( ((order > 5) && (order < 12)) , "order must be in [6,11]" ); 227 assert( __FUNCTION__, ((order > 5) && (order < 12)) , "order must be in [6,11]" ); 228 229 assert( __FUNCTION__, (CONFIG_PPM_PAGE_SHIFT == 12) , "check status bit_vector width" ); 222 230 223 231 // initialize lock … … 286 294 if( order < 6 ) order = 6; 287 295 288 assert( (order < 12) , "order = %d / must be less than 12" , order );296 assert( __FUNCTION__, (order < 12) , "order = %d / must be less than 12" , order ); 289 297 290 298 // get local pointer on relevant KCM allocator … … 303 311 thread_t * this = CURRENT_THREAD; 304 312 uint32_t cycle = (uint32_t)hal_get_cycles(); 305 if( (DEBUG_KCM < cycle) && (local_cxy == 1))313 if( DEBUG_KCM < cycle ) 306 314 { 307 315 printk("\n[%s] thread[%x,%x] enters / order %d / page %x / kcm %x / page_status (%x|%x)\n", … … 325 333 326 334 #if DEBUG_KCM 327 if( (DEBUG_KCM < cycle) && (local_cxy == 1) ) 328 { 335 if( DEBUG_KCM < cycle ) 329 336 printk("\n[%s] thread[%x,%x] exit / order %d / block %x / kcm %x / page_status (%x|%x)\n", 330 337 __FUNCTION__, this->process->pid, this->trdid, order, block_ptr, kcm_ptr, 331 338 GET_CXY( kcm_page->status ), GET_PTR( kcm_page->status ) ); 332 kcm_remote_display( local_cxy , kcm_ptr );333 }334 339 #endif 335 340 … … 345 350 346 351 // check argument 347 assert( (block_ptr != NULL) , "block pointer cannot be NULL" );352 assert( __FUNCTION__, (block_ptr != NULL) , "block pointer cannot be NULL" ); 348 353 349 354 // get local pointer on KCM page … … 389 394 } // end kcm_free() 390 395 396 391 397 ///////////////////////////////////////////////////////////////////////////////////// 392 398 // Remote access functions … … 414 420 uint32_t size = 1 << order; 415 421 416 assert( (count < max) , "kcm_page should not be full" );422 assert( __FUNCTION__, (count < max) , "kcm_page should not be full" ); 417 423 418 424 uint32_t index = 1; … … 485 491 486 492 // compute mask in bit vector 487 uint64_t mask = 1 << index; 488 489 assert( (status & mask) , "released page not allocated" ); 493 uint64_t mask = ((uint64_t)0x1) << index; 494 495 if( (status & mask) == 0 ) 496 { 497 printk("\n[WARNING] in %s : block[%x,%x] not allocated / kcm %x / kcm_page %x\n", 498 __FUNCTION__, kcm_cxy, block_ptr, kcm_ptr, kcm_page ); 499 printk(" status %L / mask %L / sts & msk %L\n", status, mask, (status & mask) ); 500 kcm_remote_display( kcm_cxy , kcm_ptr ); 501 return; 502 } 490 503 491 504 // update status & count in kcm_page … … 507 520 uint32_t cycle = (uint32_t)hal_get_cycles(); 508 521 if( DEBUG_KCM_REMOTE < cycle ) 509 printk("\n[%s] thread[%x,%x] released block %x inpage %x / cluster %x / size %x / count %d\n",522 printk("\n[%s] thread[%x,%x] block %x / page %x / cluster %x / size %x / count %d\n", 510 523 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_page, size, count - 1 ) 511 524 #endif … … 564 577 } // end kcm_remote_get_page() 565 578 566 ///////////////////////////////////////// 579 ////////////////////////////////////////// 567 580 void * kcm_remote_alloc( cxy_t kcm_cxy, 568 581 uint32_t order ) … … 574 587 if( order < 6 ) order = 6; 575 588 576 assert( (order < 12) , "order = %d / must be less than 12" , order );589 assert( __FUNCTION__, (order < 12) , "order = %d / must be less than 12" , order ); 577 590 578 591 // get local pointer on relevant KCM allocator … … 620 633 621 634 // check argument 622 assert( (block_ptr != NULL) , "block pointer cannot be NULL" );635 assert( __FUNCTION__, (block_ptr != NULL) , "block pointer cannot be NULL" ); 623 636 624 637 // get local pointer on remote KCM page -
trunk/kernel/mm/kcm.h
r657 r672 2 2 * kcm.h - Kernel Cache Manager definition. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 58 58 list_entry_t active_root; /*! root of active pages list */ 59 59 60 uint32_t full_pages_nr; /*! number of busypages */60 uint32_t full_pages_nr; /*! number of full pages */ 61 61 uint32_t active_pages_nr; /*! number of active pages */ 62 62 … … 69 69 /**************************************************************************************** 70 70 * This structure defines a KCM-page descriptor. 71 * A KCM-page contains at most (CONFIG_PPM_PAGE_SIZE / CONFIG_KCM_SLOT_SIZE) slots,72 * and each slot contains one block. The kcm page descriptor is stored in first slot.71 * A KCM-page contains (CONFIG_PPM_PAGE_SIZE / block_size) slots. 72 * Each slot contains one block, but the kcm page descriptor is stored in first slot. 73 73 * The current allocation status is defined by the 64 bits "status" bit vector: each 74 * non zero bit defines an allocated block / "count sis the number of allocated blocks.75 * Each kcm_page is registered in one of the two following page_list:74 * non zero bit defines an allocated block / "count" is the number of allocated blocks. 75 * Each kcm_page is registered in one of the two following lists, rooted in the kcm: 76 76 * - full : when count == max 77 77 * - active : count < max … … 148 148 149 149 /**************************************************************************************** 150 * This debug function can be called by any thread running in any cluster. 151 * It diplays on TXT0 the current state of a local KCM allocator.150 * This debug function can be called by any thread running in any cluster. It displays 151 * on TXT0 the state of a KCM, identified by the <kcm_cxy> & <kcm_ptr> arguments. 152 152 **************************************************************************************** 153 153 * @ kcm_cxy : remote KCM cluster identifier. -
trunk/kernel/mm/khm.c
r567 r672 40 40 { 41 41 // check config parameters 42 assert( ((CONFIG_PPM_PAGE_SHIFT + CONFIG_PPM_HEAP_ORDER) < 32 ) ,42 assert( __FUNCTION__, ((CONFIG_PPM_PAGE_SHIFT + CONFIG_PPM_HEAP_ORDER) < 32 ) , 43 43 "CONFIG_PPM_HEAP_ORDER too large" ); 44 44 … … 142 142 busylock_acquire(&khm->lock); 143 143 144 assert( (current->busy == 1) , "page already freed" );144 assert( __FUNCTION__, (current->busy == 1) , "page already freed" ); 145 145 146 146 // release block -
trunk/kernel/mm/kmem.c
r657 r672 27 27 #include <printk.h> 28 28 #include <cluster.h> 29 #include <thread.h> 29 30 #include <memcpy.h> 30 31 #include <khm.h> -
trunk/kernel/mm/mapper.c
r657 r672 230 230 // - it is a directory mapper 231 231 // - it is a file mapper, and it exist data on IOC device for this page 232 if( (inode == NULL) || (inode_type == INODE_TYPE_DIR) || (inode_size > (page_id << 10) ) )232 if( (inode == NULL) || (inode_type == FILE_TYPE_DIR) || (inode_size > (page_id << 10) ) ) 233 233 { 234 234 error = vfs_fs_move_page( page_xp , IOC_SYNC_READ ); … … 284 284 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 285 285 286 assert( (hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ) != NULL ),286 assert( __FUNCTION__, (hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ) != NULL ), 287 287 "should not be used for the FAT mapper"); 288 288 … … 385 385 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 386 386 387 assert( (hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ) == NULL ),387 assert( __FUNCTION__, (hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ) == NULL ), 388 388 "should be used for the FAT mapper"); 389 389 … … 890 890 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 891 891 892 assert( (nbytes <= 4096) , "nbytes cannot be larger than 4096");893 assert( (mapper_xp != XPTR_NULL) , "mapper_xp argument cannot be null");892 assert( __FUNCTION__, (nbytes <= 4096) , "nbytes cannot be larger than 4096"); 893 assert( __FUNCTION__, (mapper_xp != XPTR_NULL) , "mapper_xp argument cannot be null"); 894 894 895 895 // get mapper cluster and local pointer … … 908 908 mapper_t * mapper = hal_remote_lpt( XPTR( page_cxy , &page_ptr->mapper ) ); 909 909 910 assert( (mapper_cxy == page_cxy ) , "mapper and page must be in same cluster");911 assert( (mapper_ptr == mapper ) , "unconsistent mapper field in page descriptor");912 assert( (page_id == index ) , "unconsistent index field in page descriptor");910 assert( __FUNCTION__, (mapper_cxy == page_cxy ) , "mapper and page must be in same cluster"); 911 assert( __FUNCTION__, (mapper_ptr == mapper ) , "unconsistent mapper field in page descriptor"); 912 assert( __FUNCTION__, (page_id == index ) , "unconsistent index field in page descriptor"); 913 913 914 914 // get inode -
trunk/kernel/mm/ppm.c
r657 r672 158 158 page_t * pages_tbl = ppm->pages_tbl; 159 159 160 assert( !page_is_flag( page , PG_FREE ) ,160 assert( __FUNCTION__, !page_is_flag( page , PG_FREE ) , 161 161 "page already released : ppn = %x\n" , ppm_page2ppn( XPTR( local_cxy , page ) ) ); 162 162 163 assert( !page_is_flag( page , PG_RESERVED ) ,163 assert( __FUNCTION__, !page_is_flag( page , PG_RESERVED ) , 164 164 "reserved page : ppn = %x\n" , ppm_page2ppn( XPTR( local_cxy , page ) ) ); 165 165 … … 232 232 233 233 // check order 234 assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );234 assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order ); 235 235 236 236 //build extended pointer on lock protecting remote PPM … … 388 388 389 389 // check order 390 assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );390 assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order ); 391 391 392 392 // get local pointer on PPM (same in all clusters) … … 548 548 remote_busylock_acquire( lock_xp ); 549 549 550 assert( !page_remote_is_flag( page_xp , PG_FREE ) ,550 assert( __FUNCTION__, !page_remote_is_flag( page_xp , PG_FREE ) , 551 551 "page already released : ppn = %x\n" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) ); 552 552 553 assert( !page_remote_is_flag( page_xp , PG_RESERVED ) ,553 assert( __FUNCTION__, !page_remote_is_flag( page_xp , PG_RESERVED ) , 554 554 "reserved page : ppn = %x\n" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) ); 555 555 -
trunk/kernel/mm/vmm.c
r665 r672 2 2 * vmm.c - virtual memory manager related operations definition. 3 3 * 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 5 * Alain Greiner (2016,2017,2018,2019,2020)4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 63 63 64 64 // check STACK zone 65 assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <=65 assert( __FUNCTION__, ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <= 66 66 (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , "STACK zone too small\n"); 67 67 … … 89 89 90 90 // check ltid argument 91 assert( (ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),91 assert( __FUNCTION__, (ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)), 92 92 "slot index %d too large for an user stack vseg", ltid ); 93 93 … … 99 99 100 100 // check requested slot is available 101 assert( (bitmap_state( &mgr->bitmap , ltid ) == false),101 assert( __FUNCTION__, (bitmap_state( &mgr->bitmap , ltid ) == false), 102 102 "slot index %d already allocated", ltid ); 103 103 … … 149 149 150 150 // check index 151 assert( (index <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),151 assert( __FUNCTION__, (index <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)), 152 152 "slot index %d too large for an user stack vseg", index ); 153 153 154 154 // check released slot is allocated 155 assert( (bitmap_state( &mgr->bitmap , index ) == true),155 assert( __FUNCTION__, (bitmap_state( &mgr->bitmap , index ) == true), 156 156 "released slot index %d non allocated", index ); 157 157 … … 226 226 227 227 // check HEAP base and size 228 assert( (CONFIG_VMM_HEAP_BASE == 0x40000) & (CONFIG_VMM_STACK_BASE == 0xc0000),228 assert( __FUNCTION__, (CONFIG_VMM_HEAP_BASE == 0x40000) & (CONFIG_VMM_STACK_BASE == 0xc0000), 229 229 "CONFIG_VMM_HEAP_BASE != 0x40000 or CONFIG_VMM_STACK_BASE != 0xc0000" ); 230 230 231 231 // check MMAP vseg max order 232 assert( (CONFIG_VMM_HEAP_MAX_ORDER == 18), "max mmap vseg size is 256K pages" );232 assert( __FUNCTION__, (CONFIG_VMM_HEAP_MAX_ORDER == 18), "max mmap vseg size is 256K pages" ); 233 233 234 234 // get pointer on MMAP allocator … … 252 252 vseg_t * vseg0 = vseg_alloc(); 253 253 254 assert( (vseg0 != NULL) , "cannot allocate vseg" );254 assert( __FUNCTION__, (vseg0 != NULL) , "cannot allocate vseg" ); 255 255 256 256 vseg0->vpn_base = CONFIG_VMM_HEAP_BASE; … … 263 263 vseg_t * vseg1 = vseg_alloc(); 264 264 265 assert( (vseg1 != NULL) , "cannot allocate vseg" );265 assert( __FUNCTION__, (vseg1 != NULL) , "cannot allocate vseg" ); 266 266 267 267 vseg1->vpn_base = CONFIG_VMM_HEAP_BASE << 1; … … 555 555 556 556 // check UTILS zone 557 assert( ((CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) <= 558 (CONFIG_VMM_ELF_BASE - CONFIG_VMM_UTILS_BASE)) , 559 "UTILS zone too small\n" ); 557 assert( __FUNCTION__ , ((CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) <= 558 (CONFIG_VMM_ELF_BASE - CONFIG_VMM_UTILS_BASE)) , "UTILS zone too small\n" ); 560 559 561 560 // initialize lock protecting the VSL 562 561 remote_queuelock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL ); 563 564 562 565 563 // initialize STACK allocator … … 577 575 vmm->global_pgfault_cost = 0; 578 576 579 /*580 // register "args" vseg in VSL581 base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_SHIFT;582 size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT;583 584 vseg_args = vmm_create_vseg( process,585 VSEG_TYPE_DATA,586 base,587 size,588 0, // file_offset unused589 0, // file_size unused590 XPTR_NULL, // mapper_xp unused591 local_cxy );592 if( vseg_args == NULL )593 {594 printk("\n[ERROR] in %s : cannot register args vseg\n", __FUNCTION__ );595 return -1;596 }597 598 vmm->args_vpn_base = base;599 600 // register "envs" vseg in VSL601 base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_SHIFT;602 size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT;603 604 vseg_envs = vmm_create_vseg( process,605 VSEG_TYPE_DATA,606 base,607 size,608 0, // file_offset unused609 0, // file_size unused610 XPTR_NULL, // mapper_xp unused611 local_cxy );612 if( vseg_envs == NULL )613 {614 printk("\n[ERROR] in %s : cannot register envs vseg\n", __FUNCTION__ );615 return -1;616 }617 618 vmm->envs_vpn_base = base;619 */620 577 hal_fence(); 621 578 … … 716 673 // FIXME il faut gérer les process copies... 717 674 675 // re-initialise VMM 676 vmm_user_init( process ); 677 718 678 #if DEBUG_VMM_USER_RESET 719 679 cycle = (uint32_t)hal_get_cycles(); … … 1133 1093 1134 1094 // check cluster is reference 1135 assert( (XPTR( local_cxy , process ) == process->ref_xp),1095 assert( __FUNCTION__, (XPTR( local_cxy , process ) == process->ref_xp), 1136 1096 "local cluster must be process reference cluster\n"); 1137 1097 … … 1869 1829 1870 1830 // check arguments 1871 assert( (process != NULL), "process argument is NULL" );1872 assert( (vseg != NULL), "vseg argument is NULL" );1831 assert( __FUNCTION__, (process != NULL), "process argument is NULL" ); 1832 assert( __FUNCTION__, (vseg != NULL), "vseg argument is NULL" ); 1873 1833 1874 1834 // get pointers on local process VMM … … 1960 1920 1961 1921 // check arguments 1962 assert( (process != NULL), "process argument is NULL" );1963 assert( (vseg != NULL), "vseg argument is NULL" );1922 assert( __FUNCTION__, (process != NULL), "process argument is NULL" ); 1923 assert( __FUNCTION__, (vseg != NULL), "vseg argument is NULL" ); 1964 1924 1965 1925 #if DEBUG_VMM_RESIZE_VSEG … … 2228 2188 2229 2189 // check vseg type 2230 assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" );2190 assert( __FUNCTION__, ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" ); 2231 2191 2232 2192 // compute target cluster identifier … … 2302 2262 xptr_t mapper_xp = vseg->mapper_xp; 2303 2263 2304 assert( (mapper_xp != XPTR_NULL),2264 assert( __FUNCTION__, (mapper_xp != XPTR_NULL), 2305 2265 "mapper not defined for a FILE vseg\n" ); 2306 2266 … … 2327 2287 xptr_t mapper_xp = vseg->mapper_xp; 2328 2288 2329 assert( (mapper_xp != XPTR_NULL),2289 assert( __FUNCTION__, (mapper_xp != XPTR_NULL), 2330 2290 "mapper not defined for a CODE or DATA vseg\n" ); 2331 2291 -
trunk/kernel/mm/vmm.h
r657 r672 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 * Alain Greiner (2016,2017,2018,2019,2020))5 * Alain Greiner (2016,2017,2018,2019,2020)) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 53 53 * corresponding bit in the bitmap. 54 54 * - The de-allocator reset the corresponding bit in the bitmap. 55 * 56 * An architecture dependant hal_vmm_display() function is defined in <hal_vmm.h> file. 55 57 ********************************************************************************************/ 56 58 … … 93 95 /********************************************************************************************* 94 96 * This structure defines the Virtual Memory Manager for a given process in a given cluster. 95 * This localVMM implements four main services:96 * 1) It contains the local copy of vseg list (VSL), only complete in referrence.97 * This VMM implements four main services: 98 * 1) It contains the local copy of the vseg list (VSL), only complete in reference cluster. 97 99 * 2) It contains the local copy of the generic page table (GPT), only complete in reference. 98 100 * 3) The stack manager dynamically allocates virtual memory space for the STACK vsegs. … … 141 143 142 144 /********************************************************************************************* 143 * This function makes only a partial initialisation of the VMM attached to an user 144 * process: It intializes the STACK and MMAP allocators, and the VSL lock. 145 * - The GPT has been previously created, with the hal_gpt_create() function. 146 * - The "kernel" vsegs are previously registered, by the hal_vmm_kernel_update() function. 147 * - The "code" and "data" vsegs arlmmmmmme registered by the elf_load_process() function. 148 * - The "stack" vsegs are dynamically registered by the thread_user_create() function. 145 * This function makes a partial initialisation of the VMM attached to an user process. 146 * It intializes the STACK and MMAP allocators, the VSL lock, and the instrumentation 147 * counters, but it does not register any vseg in the VSL: 148 * - The "kernel" vsegs are registered, by the hal_vmm_kernel_update() function. 149 * - The "args" & "envs" vsegs are registered by the process_make_exec() function. 150 * - The "code" and "data" vsegs are registered by the elf_load_process() function. 151 * - The "stack" vsegs are registered by process_make_exec() / thread_user_create(). 149 152 * - The "file", "anon", "remote" vsegs are dynamically registered by the mmap() syscall. 150 153 ********************************************************************************************* … … 155 158 156 159 /********************************************************************************************* 157 * This function re-initialises the VMM attached to an user process to prepare a new 158 * call to the vmm_user_init() function after an exec() syscall. 159 * It removes from the VMM of the process identified by the <process> argument all 160 * all user vsegs, by calling the vmm_remove_vseg() function. 161 * - the vsegs are removed from the VSL. 162 * - the corresponding GPT entries are removed from the GPT. 163 * - the physical pages are released to the relevant kmem when they are not shared. 164 * The VSL and the GPT are not modified for the kernel vsegs. 160 * This function is called by the process_make_exec() function to re-initialises the VMM 161 * attached to an user process: 162 * - It removes from the VMM of the process identified by the <process> argument all user 163 * vsegs, by calling the vmm_remove_vseg() function: the vsegs are removed from the VSL, 164 * the corresponding GPT entries are removed from the GPT, and the physical pages are 165 * released to the relevant kmem when they are not shared. 166 * - The VSL and the GPT are not modified for the kernel vsegs. 167 * - Finally, it calls the vmm_user_init() function to re-initialize the STAK and MMAP 168 * allocators, and the lock protecting GPT. 165 169 ********************************************************************************************* 166 170 * @ process : pointer on process descriptor. -
trunk/kernel/mm/vseg.c
r657 r672 142 142 else 143 143 { 144 assert( false , "illegal vseg type\n" );144 assert( __FUNCTION__, false , "illegal vseg type\n" ); 145 145 } 146 146 … … 191 191 default: 192 192 { 193 assert( false, "Illegal vseg type" );193 assert( __FUNCTION__, false, "Illegal vseg type" ); 194 194 break; 195 195 }
Note: See TracChangeset
for help on using the changeset viewer.