Changeset 625 for trunk/kernel/mm
- Timestamp:
- Apr 10, 2019, 10:09:39 AM (6 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/mapper.c
r624 r625 153 153 154 154 #if DEBUG_MAPPER_GET_PAGE 155 uint32_t cycle = (uint32_t)hal_get_cycles(); 155 vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ); 156 uint32_t cycle = (uint32_t)hal_get_cycles(); 156 157 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 157 vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ); 158 vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name ); 159 if( DEBUG_MAPPER_GET_PAGE < cycle ) 160 printk("\n[%s] thread [%x,%x] enter for page %d of <%s> / cycle %d\n", 161 __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle ); 158 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) ) // FAT mapper 159 { 160 printk("\n[%s] thread[%x,%x] enter for page %d of FAT mapper / cycle %d\n", 161 __FUNCTION__, this->process->pid, this->trdid, page_id, cycle ); 162 } 163 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) ) // file mapper 164 { 165 vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name ); 166 printk("\n[%s] thread[%x,%x] enter for page %d of <%s> mapper / cycle %d\n", 167 __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle ); 168 } 162 169 #endif 163 170 … … 235 242 #if DEBUG_MAPPER_GET_PAGE 236 243 cycle = (uint32_t)hal_get_cycles(); 237 if( DEBUG_MAPPER_GET_PAGE < cycle ) 238 printk("\n[%s] thread[%x,%x] exit for page %d of <%s> / ppn %x / cycle %d\n", 239 __FUNCTION__, this->process->pid, this->trdid, 240 page_id, name, ppm_page2ppn( page_xp ), cycle ); 244 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) ) 245 { 246 printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x / cycle %d\n", 247 __FUNCTION__, this->process->pid, this->trdid, page_id, 248 name, ppm_page2ppn(page_xp), cycle ); 249 } 250 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) ) 251 { 252 printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper / ppn %x / cycle %d\n", 253 __FUNCTION__, this->process->pid, this->trdid, page_id, 254 ppm_page2ppn(page_xp), cycle ); 255 } 241 256 #endif 242 257 … … 257 272 258 273 #if DEBUG_MAPPER_HANDLE_MISS 259 uint32_t cycle = (uint32_t)hal_get_cycles();274 uint32_t cycle = (uint32_t)hal_get_cycles(); 260 275 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 261 276 vfs_inode_t * inode = mapper->inode; 262 vfs_inode_get_name( XPTR( local_cxy , inode ) , name ); 263 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 264 printk("\n[%s] enter for page %d in <%s> / cycle %d", 265 __FUNCTION__, page_id, name, cycle ); 266 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) 267 grdxt_display( XPTR( local_cxy , &mapper->rt ) , name ); 277 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) ) 278 { 279 vfs_inode_get_name( XPTR( local_cxy , inode ) , name ); 280 printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / cycle %d", 281 __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle ); 282 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), name ); 283 } 284 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) ) 285 { 286 printk("\n[%s] thread[%x,%x] enter for page %d in FAT / cycle %d", 287 __FUNCTION__, this->process->pid, this->trdid, page_id, cycle ); 288 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), "FAT" ); 289 } 268 290 #endif 269 291 … … 321 343 #if DEBUG_MAPPER_HANDLE_MISS 322 344 cycle = (uint32_t)hal_get_cycles(); 323 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 324 printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d", 325 __FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle ); 326 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) 327 grdxt_display( XPTR( local_cxy , &mapper->rt ) , name ); 345 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) ) 346 { 347 printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / ppn %x / cycle %d", 348 __FUNCTION__, this->process->pid, this->trdid, 349 page_id, name, ppm_page2ppn( *page_xp ), cycle ); 350 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt) , name ); 351 } 352 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) ) 353 { 354 printk("\n[%s] thread[%x,%x] exit for page %d in FAT / ppn %x / cycle %d", 355 __FUNCTION__, this->process->pid, this->trdid, 356 page_id, ppm_page2ppn( *page_xp ), cycle ); 357 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt ), "FAT" ); 358 } 328 359 #endif 329 360 … … 482 513 483 514 #if DEBUG_MAPPER_MOVE_KERNEL 484 uint32_t cycle = (uint32_t)hal_get_cycles(); 485 thread_t * this = CURRENT_THREAD; 515 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 516 uint32_t cycle = (uint32_t)hal_get_cycles(); 517 thread_t * this = CURRENT_THREAD; 518 mapper_t * mapper = GET_PTR( mapper_xp ); 519 vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper->inode ) ); 520 vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name ); 486 521 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 487 printk("\n[%s] thread[%x,%x] enter / to_buf %d / buf_cxy %x / buf_ptr %x/ cycle %d\n",488 __FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );522 printk("\n[%s] thread[%x,%x] enter / %d bytes / offset %d / mapper <%s> / cycle %d\n", 523 __FUNCTION__, this->process->pid, this->trdid, size, file_offset, name, cycle ); 489 524 #endif 490 525 … … 496 531 uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT; 497 532 uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; 498 499 #if (DEBUG_MAPPER_MOVE_KERNEL & 1)500 if( DEBUG_MAPPER_MOVE_KERNEL < cycle )501 printk("\n[%s] first_page %d / last_page %d\n", __FUNCTION__, first, last );502 #endif503 533 504 534 // compute source and destination clusters … … 528 558 else if ( page_id == last ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1; 529 559 else page_count = CONFIG_PPM_PAGE_SIZE; 530 531 #if (DEBUG_MAPPER_MOVE_KERNEL & 1)532 if( DEBUG_MAPPER_MOVE_KERNEL < cycle )533 printk("\n[%s] page_id = %d / offset = %d / bytes = %d\n",534 __FUNCTION__ , page_id , page_offset , page_count );535 #endif536 560 537 561 // get extended pointer on page descriptor … … 560 584 #if (DEBUG_MAPPER_MOVE_KERNEL & 1) 561 585 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 562 printk("\n[%s] src_cxy %x / src_ptr %x / dst_cxy %x / dst_ptr %x\n", 563 __FUNCTION__, src_cxy, src_ptr, dst_cxy, dst_ptr ); 586 { 587 if( to_buffer ) 588 printk("\n[%s] mapper <%s> page %d => buffer(%x,%x) / %d bytes\n", 589 __FUNCTION__, name, page_id, dst_cxy, dst_ptr, page_count ); 590 else 591 printk("\n[%s] buffer(%x,%x) => mapper <%s> page %d / %d bytes\n", 592 __FUNCTION__, src_cxy, src_ptr, name, page_id, page_count ); 593 } 564 594 #endif 565 595 … … 571 601 572 602 #if DEBUG_MAPPER_MOVE_KERNEL 573 cycle = (uint32_t)hal_get_cycles();603 cycle = (uint32_t)hal_get_cycles(); 574 604 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 575 printk("\n[%s] thread[%x,%x] exit / to_buf %d / buf_cxy %x / buf_ptr %x /cycle %d\n",576 __FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr,cycle );605 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 606 __FUNCTION__, this->process->pid, this->trdid, cycle ); 577 607 #endif 578 608 … … 662 692 663 693 // get pointer on radix tree 664 rt 694 rt = &mapper->rt; 665 695 666 696 // initialise loop variable … … 675 705 if( page == NULL ) break; 676 706 677 assert( (page->index == found_key ), __FUNCTION__,"wrong page descriptor index" );678 assert( (page->order == 0), __FUNCTION__,"mapper page order must be 0" );707 assert( (page->index == found_key ), "wrong page descriptor index" ); 708 assert( (page->order == 0), "mapper page order must be 0" ); 679 709 680 710 // build extended pointer on page descriptor … … 730 760 char buffer[4096]; // local buffer 731 761 uint32_t * tabi; // pointer on uint32_t to scan buffer 732 char * tabc; // pointer on char to scan buffer733 762 uint32_t line; // line index 734 763 uint32_t word; // word index 735 uint32_t n; // char index736 764 cxy_t mapper_cxy; // mapper cluster identifier 737 765 mapper_t * mapper_ptr; // mapper local pointer … … 776 804 // display 8 words per line 777 805 tabi = (uint32_t *)buffer; 778 tabc = (char *)buffer;779 806 printk("\n***** <%s> first %d bytes of page %d *****\n", name, nbytes, page_id ); 780 807 for( line = 0 ; line < (nbytes >> 5) ; line++ ) 781 808 { 782 printk("%X : ", line );809 printk("%X : ", line << 5 ); 783 810 for( word = 0 ; word < 8 ; word++ ) printk("%X ", tabi[(line<<3) + word] ); 784 printk(" | ");785 for( n = 0 ; n < 32 ; n++ ) printk("%c", tabc[(line<<5) + n] );786 811 printk("\n"); 787 812 } -
trunk/kernel/mm/mapper.h
r623 r625 123 123 124 124 /******************************************************************************************* 125 * This function move data between a remote mapper, dentified by the <mapper_xp> argument,125 * This function move data between a remote mapper, identified by the <mapper_xp> argument, 126 126 * and a distributed user buffer. It can be called by a thread running in any cluster. 127 127 * It is called by the vfs_user_move() to implement sys_read() and sys_write() syscalls. … … 148 148 149 149 /******************************************************************************************** 150 * This function move data between a remote mapper and a remote kernel buffer.151 * It can be called by a thread running any cluster.150 * This function move data between a remote mapper, identified by the <mapper_xp> argument, 151 * and a localised remote kernel buffer. It can be called by a thread running any cluster. 152 152 * If required, the data transfer is split in "fragments", where one fragment contains 153 153 * contiguous bytes in the same mapper page. … … 215 215 /******************************************************************************************* 216 216 * This function allows to write a single word to a mapper seen as and array of uint32_t. 217 * It has bee designed to support remote access tho the FAT mapper of the FATFS.217 * It has been designed to support remote access to the FAT mapper of the FATFS. 218 218 * It can be called by any thread running in any cluster. 219 219 * In case of miss, it takes the mapper lock in WRITE_MODE, load the missing -
trunk/kernel/mm/page.h
r623 r625 50 50 * test/modify the forks counter or the page flags. 51 51 * - The list entry is used to register the page in a free list or in dirty list. 52 * NOTE: Size is 48 bytes for a 32 bits core.53 * TODO : the refcount use is not defined [AG]52 * The refcount is used for page release to KMEM. 53 * NOTE: the size is 48 bytes for a 32 bits core. 54 54 ************************************************************************************/ 55 55 … … 61 61 uint32_t index; /*! page index in mapper (4) */ 62 62 list_entry_t list; /*! for both dirty pages and free pages (8) */ 63 uint32_t refcount; /*! reference counter TODO ??? [AG](4) */63 int32_t refcount; /*! references counter for page release (4) */ 64 64 uint32_t forks; /*! number of pending forks (4) */ 65 65 remote_busylock_t lock; /*! protect forks or flags modifs (16) */ -
trunk/kernel/mm/ppm.c
r611 r625 349 349 } // end ppm_free_pages() 350 350 351 //////////////////////// ///////352 void ppm_ print( char * string)351 //////////////////////// 352 void ppm_display( void ) 353 353 { 354 354 uint32_t order; … … 361 361 busylock_acquire( &ppm->free_lock ); 362 362 363 printk("\n*** PPM in cluster %x / %s / %d pages ***\n", 364 local_cxy , string, ppm->pages_nr ); 363 printk("\n***** PPM in cluster %x / %d pages\n", local_cxy , ppm->pages_nr ); 365 364 366 365 for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ ) -
trunk/kernel/mm/ppm.h
r623 r625 176 176 * string : character string printed in header 177 177 ****************************************************************************************/ 178 void ppm_ print( char * string);178 void ppm_display( void ); 179 179 180 180 /***************************************************************************************** -
trunk/kernel/mm/vmm.c
r624 r625 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018 )6 * Alain Greiner (2016,2017,2018,2019) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 55 55 extern process_t process_zero; // allocated in cluster.c 56 56 57 /////////////////////////////////////// 58 error_t vmm_init( process_t * process ) 57 //////////////////////////////////////////////////////////////////////////////////////////// 58 // This static function is called by the vmm_create_vseg() function, and implements 59 // the VMM STACK specific allocator. 60 //////////////////////////////////////////////////////////////////////////////////////////// 61 // @ vmm : [in] pointer on VMM. 62 // @ ltid : [in] requested slot == local user thread identifier. 63 // @ vpn_base : [out] first allocated page 64 // @ vpn_size : [out] number of allocated pages 65 //////////////////////////////////////////////////////////////////////////////////////////// 66 static void vmm_stack_alloc( vmm_t * vmm, 67 ltid_t ltid, 68 vpn_t * vpn_base, 69 vpn_t * vpn_size ) 59 70 { 60 error_t error; 71 72 // check ltid argument 73 assert( (ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)), 74 "slot index %d too large for an user stack vseg", ltid ); 75 76 // get stack allocator pointer 77 stack_mgr_t * mgr = &vmm->stack_mgr; 78 79 // get lock on stack allocator 80 busylock_acquire( &mgr->lock ); 81 82 // check requested slot is available 83 assert( (bitmap_state( &mgr->bitmap , ltid ) == false), 84 "slot index %d already allocated", ltid ); 85 86 // update bitmap 87 bitmap_set( &mgr->bitmap , ltid ); 88 89 // release lock on stack allocator 90 busylock_release( &mgr->lock ); 91 92 // returns vpn_base, vpn_size (first page non allocated) 93 *vpn_base = mgr->vpn_base + ltid * CONFIG_VMM_STACK_SIZE + 1; 94 *vpn_size = CONFIG_VMM_STACK_SIZE - 1; 95 96 } // end vmm_stack_alloc() 97 98 //////////////////////////////////////////////////////////////////////////////////////////// 99 // This static function is called by the vmm_remove_vseg() function, and implements 100 // the VMM STACK specific desallocator. 101 //////////////////////////////////////////////////////////////////////////////////////////// 102 // @ vmm : [in] pointer on VMM. 103 // @ vseg : [in] pointer on released vseg. 104 //////////////////////////////////////////////////////////////////////////////////////////// 105 static void vmm_stack_free( vmm_t * vmm, 106 vseg_t * vseg ) 107 { 108 // get stack allocator pointer 109 stack_mgr_t * mgr = &vmm->stack_mgr; 110 111 // compute slot index 112 uint32_t index = (vseg->vpn_base - 1 - mgr->vpn_base) / CONFIG_VMM_STACK_SIZE; 113 114 // check index 115 assert( (index <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)), 116 "slot index %d too large for an user stack vseg", index ); 117 118 // check released slot is allocated 119 assert( (bitmap_state( &mgr->bitmap , index ) == true), 120 "released slot index %d non allocated", index ); 121 122 // get lock on stack allocator 123 busylock_acquire( &mgr->lock ); 124 125 // update stacks_bitmap 126 bitmap_clear( &mgr->bitmap , index ); 127 128 // release lock on stack allocator 129 busylock_release( &mgr->lock ); 130 131 } // end vmm_stack_free() 132 133 //////////////////////////////////////////////////////////////////////////////////////////// 134 // This static function is called by the vmm_create_vseg() function, and implements 135 // the VMM MMAP specific allocator. 136 //////////////////////////////////////////////////////////////////////////////////////////// 137 // @ vmm : [in] pointer on VMM. 138 // @ npages : [in] requested number of pages. 139 // @ vpn_base : [out] first allocated page. 140 // @ vpn_size : [out] actual number of allocated pages. 141 //////////////////////////////////////////////////////////////////////////////////////////// 142 static error_t vmm_mmap_alloc( vmm_t * vmm, 143 vpn_t npages, 144 vpn_t * vpn_base, 145 vpn_t * vpn_size ) 146 { 147 uint32_t order; 148 xptr_t vseg_xp; 149 vseg_t * vseg; 150 vpn_t base; 151 vpn_t size; 152 vpn_t free; 153 154 #if DEBUG_VMM_MMAP_ALLOC 155 thread_t * this = CURRENT_THREAD; 156 uint32_t cycle = (uint32_t)hal_get_cycles(); 157 if( DEBUG_VMM_MMAP_ALLOC < cycle ) 158 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 159 __FUNCTION__, this->process->pid, this->trdid, cycle ); 160 #endif 161 162 // number of allocated pages must be power of 2 163 // compute actual size and order 164 size = POW2_ROUNDUP( npages ); 165 order = bits_log2( size ); 166 167 // get mmap allocator pointer 168 mmap_mgr_t * mgr = &vmm->mmap_mgr; 169 170 // build extended pointer on root of zombi_list[order] 171 xptr_t root_xp = XPTR( local_cxy , &mgr->zombi_list[order] ); 172 173 // take lock protecting zombi_lists 174 busylock_acquire( &mgr->lock ); 175 176 // get vseg from zombi_list or from mmap zone 177 if( xlist_is_empty( root_xp ) ) // from mmap zone 178 { 179 // check overflow 180 free = mgr->first_free_vpn; 181 if( (free + size) > mgr->vpn_size ) return -1; 182 183 // update MMAP allocator 184 mgr->first_free_vpn += size; 185 186 // compute base 187 base = free; 188 } 189 else // from zombi_list 190 { 191 // get pointer on zombi vseg from zombi_list 192 vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); 193 vseg = GET_PTR( vseg_xp ); 194 195 // remove vseg from free-list 196 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 197 198 // compute base 199 base = vseg->vpn_base; 200 } 201 202 // release lock 203 busylock_release( &mgr->lock ); 204 205 #if DEBUG_VMM_MMAP_ALLOC 206 cycle = (uint32_t)hal_get_cycles(); 207 if( DEBUG_VMM_DESTROY < cycle ) 208 printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n", 209 __FUNCTION__, this->process->pid, this->trdid, base, size, cycle ); 210 #endif 211 212 // returns vpn_base, vpn_size 213 *vpn_base = base; 214 *vpn_size = size; 215 return 0; 216 217 } // end vmm_mmap_alloc() 218 219 //////////////////////////////////////////////////////////////////////////////////////////// 220 // This static function is called by the vmm_remove_vseg() function, and implements 221 // the VMM MMAP specific desallocator. 222 //////////////////////////////////////////////////////////////////////////////////////////// 223 // @ vmm : [in] pointer on VMM. 224 // @ vseg : [in] pointer on released vseg. 225 //////////////////////////////////////////////////////////////////////////////////////////// 226 static void vmm_mmap_free( vmm_t * vmm, 227 vseg_t * vseg ) 228 { 229 // get pointer on mmap allocator 230 mmap_mgr_t * mgr = &vmm->mmap_mgr; 231 232 // compute zombi_list order 233 uint32_t order = bits_log2( vseg->vpn_size ); 234 235 // take lock protecting zombi lists 236 busylock_acquire( &mgr->lock ); 237 238 // update relevant zombi_list 239 xlist_add_first( XPTR( local_cxy , &mgr->zombi_list[order] ), 240 XPTR( local_cxy , &vseg->xlist ) ); 241 242 // release lock 243 busylock_release( &mgr->lock ); 244 245 } // end of vmm_mmap_free() 246 247 //////////////////////////////////////////////////////////////////////////////////////////// 248 // This static function registers one vseg in the VSL of a local process descriptor. 249 //////////////////////////////////////////////////////////////////////////////////////////// 250 // vmm : [in] pointer on VMM. 251 // vseg : [in] pointer on vseg. 252 //////////////////////////////////////////////////////////////////////////////////////////// 253 void vmm_attach_vseg_to_vsl( vmm_t * vmm, 254 vseg_t * vseg ) 255 { 256 // update vseg descriptor 257 vseg->vmm = vmm; 258 259 // increment vsegs number 260 vmm->vsegs_nr++; 261 262 // add vseg in vmm list 263 xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ), 264 XPTR( local_cxy , &vseg->xlist ) ); 265 266 } // end vmm_attach_vseg_from_vsl() 267 268 //////////////////////////////////////////////////////////////////////////////////////////// 269 // This static function removes one vseg from the VSL of a local process descriptor. 270 //////////////////////////////////////////////////////////////////////////////////////////// 271 // vmm : [in] pointer on VMM. 272 // vseg : [in] pointer on vseg. 273 //////////////////////////////////////////////////////////////////////////////////////////// 274 void vmm_detach_vseg_from_vsl( vmm_t * vmm, 275 vseg_t * vseg ) 276 { 277 // update vseg descriptor 278 vseg->vmm = NULL; 279 280 // decrement vsegs number 281 vmm->vsegs_nr--; 282 283 // remove vseg from VSL 284 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 285 286 } // end vmm_detach_from_vsl() 287 288 289 290 291 //////////////////////////////////////////// 292 error_t vmm_user_init( process_t * process ) 293 { 61 294 vseg_t * vseg_args; 62 295 vseg_t * vseg_envs; … … 65 298 uint32_t i; 66 299 67 #if DEBUG_VMM_ INIT300 #if DEBUG_VMM_USER_INIT 68 301 thread_t * this = CURRENT_THREAD; 69 302 uint32_t cycle = (uint32_t)hal_get_cycles(); 70 if( DEBUG_VMM_ INIT )303 if( DEBUG_VMM_USER_INIT ) 71 304 printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", 72 305 __FUNCTION__ , this->process->pid, this->trdid, process->pid, local_cxy, cycle ); … … 76 309 vmm_t * vmm = &process->vmm; 77 310 78 // initialize VSL (empty) 79 vmm->vsegs_nr = 0; 80 xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) ); 81 remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) , LOCK_VMM_VSL ); 82 311 // check UTILS zone 83 312 assert( ((CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) <= 84 313 (CONFIG_VMM_ELF_BASE - CONFIG_VMM_UTILS_BASE)) , 85 314 "UTILS zone too small\n" ); 86 315 316 // check STACK zone 87 317 assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <= 88 318 (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , 89 319 "STACK zone too small\n"); 90 320 91 // register argsvseg in VSL321 // register "args" vseg in VSL 92 322 base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_SHIFT; 93 323 size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT; … … 101 331 XPTR_NULL, // mapper_xp unused 102 332 local_cxy ); 103 104 333 if( vseg_args == NULL ) 105 334 { … … 110 339 vmm->args_vpn_base = base; 111 340 112 // register the envsvseg in VSL341 // register "envs" vseg in VSL 113 342 base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_SHIFT; 114 343 size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT; … … 122 351 XPTR_NULL, // mapper_xp unused 123 352 local_cxy ); 124 125 353 if( vseg_envs == NULL ) 126 354 { … … 130 358 131 359 vmm->envs_vpn_base = base; 132 133 // create GPT (empty)134 error = hal_gpt_create( &vmm->gpt );135 136 if( error )137 {138 printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );139 return -1;140 }141 142 // initialize GPT lock143 remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );144 145 // update process VMM with kernel vsegs as required by the hardware architecture146 error = hal_vmm_kernel_update( process );147 148 if( error )149 {150 printk("\n[ERROR] in %s : cannot update GPT for kernel vsegs\n", __FUNCTION__ );151 return -1;152 }153 360 154 361 // initialize STACK allocator … … 162 369 vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; 163 370 busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP ); 164 for( i = 0 ; i < 32 ; i++ ) list_root_init( &vmm->mmap_mgr.zombi_list[i] ); 371 for( i = 0 ; i < 32 ; i++ ) 372 { 373 xlist_root_init( XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] ) ); 374 } 165 375 166 376 // initialize instrumentation counters … … 169 379 hal_fence(); 170 380 171 #if DEBUG_VMM_ INIT381 #if DEBUG_VMM_USER_INIT 172 382 cycle = (uint32_t)hal_get_cycles(); 173 if( DEBUG_VMM_ INIT )383 if( DEBUG_VMM_USER_INIT ) 174 384 printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", 175 385 __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle ); … … 178 388 return 0; 179 389 180 } // end vmm_init() 181 390 } // end vmm_user_init() 182 391 183 392 ////////////////////////////////////////// 184 void vmm_attach_vseg_to_vsl( vmm_t * vmm, 185 vseg_t * vseg ) 393 void vmm_user_reset( process_t * process ) 186 394 { 187 // build extended pointer on rwlock protecting VSL 188 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); 189 190 // get rwlock in write mode 191 remote_rwlock_wr_acquire( lock_xp ); 192 193 // update vseg descriptor 194 vseg->vmm = vmm; 195 196 // increment vsegs number 197 vmm->vsegs_nr++; 198 199 // add vseg in vmm list 200 xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ), 201 XPTR( local_cxy , &vseg->xlist ) ); 202 203 // release rwlock in write mode 204 remote_rwlock_wr_release( lock_xp ); 205 } 206 207 //////////////////////////////////////////// 208 void vmm_detach_vseg_from_vsl( vmm_t * vmm, 209 vseg_t * vseg ) 210 { 211 // get vseg type 212 uint32_t type = vseg->type; 213 214 // build extended pointer on rwlock protecting VSL 215 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); 216 217 // get rwlock in write mode 218 remote_rwlock_wr_acquire( lock_xp ); 219 220 // update vseg descriptor 221 vseg->vmm = NULL; 222 223 // remove vseg from VSL 224 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 225 226 // release rwlock in write mode 227 remote_rwlock_wr_release( lock_xp ); 228 229 // release the stack slot to VMM stack allocator if STACK type 230 if( type == VSEG_TYPE_STACK ) 231 { 232 // get pointer on stack allocator 233 stack_mgr_t * mgr = &vmm->stack_mgr; 234 235 // compute slot index 236 uint32_t index = ((vseg->vpn_base - mgr->vpn_base - 1) / CONFIG_VMM_STACK_SIZE); 237 238 // update stacks_bitmap 239 busylock_acquire( &mgr->lock ); 240 bitmap_clear( &mgr->bitmap , index ); 241 busylock_release( &mgr->lock ); 242 } 243 244 // release the vseg to VMM mmap allocator if MMAP type 245 if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_FILE) || (type == VSEG_TYPE_REMOTE) ) 246 { 247 // get pointer on mmap allocator 248 mmap_mgr_t * mgr = &vmm->mmap_mgr; 249 250 // compute zombi_list index 251 uint32_t index = bits_log2( vseg->vpn_size ); 252 253 // update zombi_list 254 busylock_acquire( &mgr->lock ); 255 list_add_first( &mgr->zombi_list[index] , &vseg->zlist ); 256 busylock_release( &mgr->lock ); 257 } 258 259 // release physical memory allocated for vseg if no MMAP and no kernel type 260 if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) && 261 (type != VSEG_TYPE_KCODE) && (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) ) 262 { 263 vseg_free( vseg ); 264 } 265 266 } // end vmm_remove_vseg_from_vsl() 395 xptr_t vseg_xp; 396 vseg_t * vseg; 397 vseg_type_t vseg_type; 398 399 #if DEBUG_VMM_USER_RESET 400 uint32_t cycle = (uint32_t)hal_get_cycles(); 401 thread_t * this = CURRENT_THREAD; 402 if( DEBUG_VMM_USER_RESET < cycle ) 403 printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", 404 __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy, cycle ); 405 #endif 406 407 #if (DEBUG_VMM_USER_RESET & 1 ) 408 if( DEBUG_VMM_USER_RESET < cycle ) 409 hal_vmm_display( process , true ); 410 #endif 411 412 // get pointer on local VMM 413 vmm_t * vmm = &process->vmm; 414 415 // build extended pointer on VSL root and VSL lock 416 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 417 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 418 419 // take the VSL lock 420 remote_rwlock_wr_acquire( lock_xp ); 421 422 // scan the VSL to delete all non kernel vsegs 423 // (we don't use a FOREACH in case of item deletion) 424 xptr_t iter_xp; 425 xptr_t next_xp; 426 for( iter_xp = hal_remote_l64( root_xp ) ; 427 iter_xp != root_xp ; 428 iter_xp = next_xp ) 429 { 430 // save extended pointer on next item in xlist 431 next_xp = hal_remote_l64( iter_xp ); 432 433 // get pointers on current vseg in VSL 434 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 435 vseg = GET_PTR( vseg_xp ); 436 vseg_type = vseg->type; 437 438 #if( DEBUG_VMM_USER_RESET & 1 ) 439 if( DEBUG_VMM_USER_RESET < cycle ) 440 printk("\n[%s] found %s vseg / vpn_base %x / vpn_size %d\n", 441 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); 442 #endif 443 // delete non kernel vseg 444 if( (vseg_type != VSEG_TYPE_KCODE) && 445 (vseg_type != VSEG_TYPE_KDATA) && 446 (vseg_type != VSEG_TYPE_KDEV ) ) 447 { 448 // remove vseg from VSL 449 vmm_remove_vseg( process , vseg ); 450 451 #if( DEBUG_VMM_USER_RESET & 1 ) 452 if( DEBUG_VMM_USER_RESET < cycle ) 453 printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n", 454 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); 455 #endif 456 } 457 else 458 { 459 460 #if( DEBUG_VMM_USER_RESET & 1 ) 461 if( DEBUG_VMM_USER_RESET < cycle ) 462 printk("\n[%s] keep %s vseg / vpn_base %x / vpn_size %d\n", 463 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); 464 #endif 465 } 466 } // end loop on vsegs in VSL 467 468 // release the VSL lock 469 remote_rwlock_wr_release( lock_xp ); 470 471 // FIXME il faut gérer les process copies... 472 473 #if DEBUG_VMM_USER_RESET 474 cycle = (uint32_t)hal_get_cycles(); 475 if( DEBUG_VMM_USER_RESET < cycle ) 476 printk("\n[%s] thread[%x,%x] exit for process %x in cluster %x / cycle %d\n", 477 __FUNCTION__, this->process->pid, this->trdid, process->pid, local_cxy , cycle ); 478 #endif 479 480 } // end vmm_user_reset() 267 481 268 482 //////////////////////////////////////////////// … … 507 721 cxy_t page_cxy; 508 722 xptr_t forks_xp; // extended pointer on forks counter in page descriptor 509 xptr_t lock_xp; // extended pointer on lock protecting the forks counter510 723 xptr_t parent_root_xp; 511 724 bool_t mapped; … … 528 741 child_vmm = &child_process->vmm; 529 742 530 // get extended pointer on lock protecting the parent VSL 531 parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsegs_lock ); 532 533 // initialize the lock protecting the child VSL 534 remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsegs_lock ), LOCK_VMM_STACK ); 743 // initialize the locks protecting the child VSL and GPT 744 remote_rwlock_init( XPTR( local_cxy , &child_vmm->gpt_lock ) , LOCK_VMM_GPT ); 745 remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsl_lock ) , LOCK_VMM_VSL ); 535 746 536 747 // initialize the child VSL as empty … … 538 749 child_vmm->vsegs_nr = 0; 539 750 540 // create thechild GPT751 // create an empty child GPT 541 752 error = hal_gpt_create( &child_vmm->gpt ); 542 543 753 if( error ) 544 754 { … … 547 757 } 548 758 549 // build extended pointer on parent VSL 759 // build extended pointer on parent VSL root and lock 550 760 parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root ); 761 parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsl_lock ); 551 762 552 763 // take the lock protecting the parent VSL in read mode … … 556 767 XLIST_FOREACH( parent_root_xp , iter_xp ) 557 768 { 558 // get local and extendedpointers on current parent vseg769 // get pointers on current parent vseg 559 770 parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 560 771 parent_vseg = GET_PTR( parent_vseg_xp ); … … 587 798 vseg_init_from_ref( child_vseg , parent_vseg_xp ); 588 799 800 // build extended pointer on VSL lock 801 xptr_t lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock ); 802 803 // take the VSL lock in write mode 804 remote_rwlock_wr_acquire( lock_xp ); 805 589 806 // register child vseg in child VSL 590 807 vmm_attach_vseg_to_vsl( child_vmm , child_vseg ); 808 809 // release the VSL lock 810 remote_rwlock_wr_release( lock_xp ); 591 811 592 812 #if DEBUG_VMM_FORK_COPY … … 597 817 hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); 598 818 #endif 599 600 // copy DATA, MMAP, REMOTE, FILE parent GPT entries to child GPT 819 // copy DATA, ANON, REMOTE, FILE parent GPT entries to child GPT 601 820 if( type != VSEG_TYPE_CODE ) 602 821 { 603 // activate the COW for DATA, MMAP, REMOTE vsegs only822 // activate the COW for DATA, ANON, REMOTE vsegs only 604 823 cow = ( type != VSEG_TYPE_FILE ); 605 824 … … 611 830 { 612 831 error = hal_gpt_pte_copy( &child_vmm->gpt, 832 vpn, 613 833 XPTR( parent_cxy , &parent_vmm->gpt ), 614 834 vpn, … … 677 897 child_vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; 678 898 child_vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; 679 for( i = 0 ; i < 32 ; i++ ) list_root_init( &child_vmm->mmap_mgr.zombi_list[i] ); 899 for( i = 0 ; i < 32 ; i++ ) 900 { 901 xlist_root_init( XPTR( local_cxy , &child_vmm->mmap_mgr.zombi_list[i] ) ); 902 } 680 903 681 904 // initialize instrumentation counters … … 726 949 vmm_t * vmm = &process->vmm; 727 950 728 // get extended pointer on VSL root and VSL lock 729 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 951 // build extended pointer on VSL root, VSL lock and GPT lock 952 xptr_t vsl_root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 953 xptr_t vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 954 xptr_t gpt_lock_xp = XPTR( local_cxy , &vmm->gpt_lock ); 955 956 // take the VSL lock 957 remote_rwlock_wr_acquire( vsl_lock_xp ); 730 958 731 959 // scan the VSL to delete all registered vsegs 732 // (don't use a FOREACH for item deletion in xlist) 733 734 while( !xlist_is_empty( root_xp ) ) 960 // (we don't use a FOREACH in case of item deletion) 961 xptr_t iter_xp; 962 xptr_t next_xp; 963 for( iter_xp = hal_remote_l64( vsl_root_xp ) ; 964 iter_xp != vsl_root_xp ; 965 iter_xp = next_xp ) 735 966 { 736 // get pointer on first vseg in VSL 737 vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); 738 vseg = GET_PTR( vseg_xp ); 967 // save extended pointer on next item in xlist 968 next_xp = hal_remote_l64( iter_xp ); 969 970 // get pointers on current vseg in VSL 971 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 972 vseg = GET_PTR( vseg_xp ); 739 973 740 974 // delete vseg and release physical pages 741 vmm_ delete_vseg( process->pid , vseg->min);975 vmm_remove_vseg( process , vseg ); 742 976 743 977 #if( DEBUG_VMM_DESTROY & 1 ) … … 749 983 } 750 984 751 // remove all vsegs from zombi_lists in MMAP allocator 985 // release the VSL lock 986 remote_rwlock_wr_release( vsl_lock_xp ); 987 988 // remove all registered MMAP vsegs 989 // from zombi_lists in MMAP allocator 752 990 uint32_t i; 753 991 for( i = 0 ; i<32 ; i++ ) 754 992 { 755 while( !list_is_empty( &vmm->mmap_mgr.zombi_list[i] ) ) 993 // build extended pointer on zombi_list[i] 994 xptr_t root_xp = XPTR( local_cxy , &vmm->mmap_mgr.zombi_list[i] ); 995 996 // scan zombi_list[i] 997 while( !xlist_is_empty( root_xp ) ) 756 998 { 757 vseg = LIST_FIRST( &vmm->mmap_mgr.zombi_list[i] , vseg_t , zlist ); 999 vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); 1000 vseg = GET_PTR( vseg_xp ); 758 1001 759 1002 #if( DEBUG_VMM_DESTROY & 1 ) … … 765 1008 vseg->vmm = NULL; 766 1009 767 // remove vseg from xlist1010 // remove vseg from zombi_list 768 1011 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 769 1012 … … 779 1022 } 780 1023 1024 // take the GPT lock 1025 remote_rwlock_wr_acquire( gpt_lock_xp ); 1026 781 1027 // release memory allocated to the GPT itself 782 1028 hal_gpt_destroy( &vmm->gpt ); 1029 1030 // release the GPT lock 1031 remote_rwlock_wr_release( gpt_lock_xp ); 783 1032 784 1033 #if DEBUG_VMM_DESTROY … … 816 1065 } // end vmm_check_conflict() 817 1066 818 //////////////////////////////////////////////////////////////////////////////////////////// 819 // This static function is called by the vmm_create_vseg() function, and implements 820 // the VMM stack_vseg specific allocator. 821 //////////////////////////////////////////////////////////////////////////////////////////// 822 // @ vmm : pointer on VMM. 823 // @ vpn_base : (return value) first allocated page 824 // @ vpn_size : (return value) number of allocated pages 825 //////////////////////////////////////////////////////////////////////////////////////////// 826 static error_t vmm_stack_alloc( vmm_t * vmm, 827 vpn_t * vpn_base, 828 vpn_t * vpn_size ) 829 { 830 // get stack allocator pointer 831 stack_mgr_t * mgr = &vmm->stack_mgr; 832 833 // get lock on stack allocator 834 busylock_acquire( &mgr->lock ); 835 836 // get first free slot index in bitmap 837 int32_t index = bitmap_ffc( &mgr->bitmap , 4 ); 838 if( (index < 0) || (index > 31) ) 839 { 840 busylock_release( &mgr->lock ); 841 return 0xFFFFFFFF; 842 } 843 844 // update bitmap 845 bitmap_set( &mgr->bitmap , index ); 846 847 // release lock on stack allocator 848 busylock_release( &mgr->lock ); 849 850 // returns vpn_base, vpn_size (one page non allocated) 851 *vpn_base = mgr->vpn_base + index * CONFIG_VMM_STACK_SIZE + 1; 852 *vpn_size = CONFIG_VMM_STACK_SIZE - 1; 853 return 0; 854 855 } // end vmm_stack_alloc() 856 857 //////////////////////////////////////////////////////////////////////////////////////////// 858 // This static function is called by the vmm_create_vseg() function, and implements 859 // the VMM MMAP specific allocator. 860 //////////////////////////////////////////////////////////////////////////////////////////// 861 // @ vmm : [in] pointer on VMM. 862 // @ npages : [in] requested number of pages. 863 // @ vpn_base : [out] first allocated page. 864 // @ vpn_size : [out] actual number of allocated pages. 865 //////////////////////////////////////////////////////////////////////////////////////////// 866 static error_t vmm_mmap_alloc( vmm_t * vmm, 867 vpn_t npages, 868 vpn_t * vpn_base, 869 vpn_t * vpn_size ) 870 { 871 uint32_t index; 872 vseg_t * vseg; 873 vpn_t base; 874 vpn_t size; 875 vpn_t free; 876 877 #if DEBUG_VMM_MMAP_ALLOC 878 thread_t * this = CURRENT_THREAD; 879 uint32_t cycle = (uint32_t)hal_get_cycles(); 880 if( DEBUG_VMM_MMAP_ALLOC < cycle ) 881 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 882 __FUNCTION__, this->process->pid, this->trdid, cycle ); 883 #endif 884 885 // vseg size must be power of 2 886 // compute actual size and index in zombi_list array 887 size = POW2_ROUNDUP( npages ); 888 index = bits_log2( size ); 889 890 // get mmap allocator pointer 891 mmap_mgr_t * mgr = &vmm->mmap_mgr; 892 893 // get lock on mmap allocator 894 busylock_acquire( &mgr->lock ); 895 896 // get vseg from zombi_list or from mmap zone 897 if( list_is_empty( &mgr->zombi_list[index] ) ) // from mmap zone 898 { 899 // check overflow 900 free = mgr->first_free_vpn; 901 if( (free + size) > mgr->vpn_size ) return -1; 902 903 // update MMAP allocator 904 mgr->first_free_vpn += size; 905 906 // compute base 907 base = free; 908 } 909 else // from zombi_list 910 { 911 // get pointer on zombi vseg from zombi_list 912 vseg = LIST_FIRST( &mgr->zombi_list[index] , vseg_t , zlist ); 913 914 // remove vseg from free-list 915 list_unlink( &vseg->zlist ); 916 917 // compute base 918 base = vseg->vpn_base; 919 } 920 921 // release lock on mmap allocator 922 busylock_release( &mgr->lock ); 923 924 #if DEBUG_VMM_MMAP_ALLOC 925 cycle = (uint32_t)hal_get_cycles(); 926 if( DEBUG_VMM_DESTROY < cycle ) 927 printk("\n[%s] thread[%x,%x] exit / vpn_base %x / vpn_size %x / cycle %d\n", 928 __FUNCTION__, this->process->pid, this->trdid, base, size, cycle ); 929 #endif 930 931 // returns vpn_base, vpn_size 932 *vpn_base = base; 933 *vpn_size = size; 934 return 0; 935 936 } // end vmm_mmap_alloc() 1067 937 1068 938 1069 //////////////////////////////////////////////// … … 968 1099 { 969 1100 // get vpn_base and vpn_size from STACK allocator 970 error = vmm_stack_alloc( vmm , &vpn_base , &vpn_size ); 971 if( error ) 972 { 973 printk("\n[ERROR] in %s : no space for stack vseg / process %x in cluster %x\n", 974 __FUNCTION__ , process->pid , local_cxy ); 975 return NULL; 976 } 1101 vmm_stack_alloc( vmm , base , &vpn_base , &vpn_size ); 977 1102 978 1103 // compute vseg base and size from vpn_base and vpn_size … … 1072 1197 cxy ); 1073 1198 1199 // build extended pointer on VSL lock 1200 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 1201 1202 // take the VSL lock in write mode 1203 remote_rwlock_wr_acquire( lock_xp ); 1204 1074 1205 // attach vseg to VSL 1075 1206 vmm_attach_vseg_to_vsl( vmm , vseg ); 1207 1208 // release the VSL lock 1209 remote_rwlock_wr_release( lock_xp ); 1076 1210 1077 1211 #if DEBUG_VMM_CREATE_VSEG … … 1086 1220 } // vmm_create_vseg() 1087 1221 1088 /////////////////////////////////// 1089 void vmm_delete_vseg( pid_t pid, 1090 intptr_t vaddr ) 1222 1223 ////////////////////////////////////////// 1224 void vmm_remove_vseg( process_t * process, 1225 vseg_t * vseg ) 1091 1226 { 1092 process_t * process; // local pointer on local process 1093 vmm_t * vmm; // local pointer on local process VMM 1094 vseg_t * vseg; // local pointer on local vseg containing vaddr 1095 gpt_t * gpt; // local pointer on local process GPT 1227 vmm_t * vmm; // local pointer on process VMM 1228 bool_t is_ref; // local process is reference process 1229 uint32_t vseg_type; // vseg type 1096 1230 vpn_t vpn; // VPN of current PTE 1097 1231 vpn_t vpn_min; // VPN of first PTE … … 1103 1237 cxy_t page_cxy; // page descriptor cluster 1104 1238 page_t * page_ptr; // page descriptor pointer 1105 xptr_t forks_xp; // extended pointer on pending forks counter 1106 xptr_t lock_xp; // extended pointer on lock protecting forks counter 1107 uint32_t forks; // actual number of pendinf forks 1108 uint32_t vseg_type; // vseg type 1109 1110 #if DEBUG_VMM_DELETE_VSEG 1111 uint32_t cycle = (uint32_t)hal_get_cycles(); 1112 thread_t * this = CURRENT_THREAD; 1113 if( DEBUG_VMM_DELETE_VSEG < cycle ) 1114 printk("\n[%s] thread[%x,%x] enter / process %x / vaddr %x / cycle %d\n", 1115 __FUNCTION__, this->process->pid, this->trdid, pid, vaddr, cycle ); 1116 #endif 1117 1118 // get local pointer on local process descriptor 1119 process = cluster_get_local_process_from_pid( pid ); 1120 1121 if( process == NULL ) 1122 { 1123 printk("\n[ERRORR] in %s : cannot get local process descriptor\n", 1124 __FUNCTION__ ); 1125 return; 1126 } 1127 1128 // get pointers on local process VMM an GPT 1239 xptr_t count_xp; // extended pointer on page refcount 1240 uint32_t count; // current value of page refcount 1241 1242 // check arguments 1243 assert( (process != NULL), "process argument is NULL" ); 1244 assert( (vseg != NULL), "vseg argument is NULL" ); 1245 1246 // compute is_ref 1247 is_ref = (GET_CXY( process->ref_xp ) == local_cxy); 1248 1249 // get pointers on local process VMM 1129 1250 vmm = &process->vmm; 1130 gpt = &process->vmm.gpt;1131 1132 // get local pointer on vseg containing vaddr1133 vseg = vmm_vseg_from_vaddr( vmm , vaddr );1134 1135 if( vseg == NULL )1136 {1137 printk("\n[ERRORR] in %s : cannot get vseg descriptor\n",1138 __FUNCTION__ );1139 return;1140 }1141 1251 1142 1252 // get relevant vseg infos … … 1145 1255 vpn_max = vpn_min + vseg->vpn_size; 1146 1256 1147 // loop to invalidate all vseg PTEs in GPT 1257 #if DEBUG_VMM_REMOVE_VSEG 1258 uint32_t cycle = (uint32_t)hal_get_cycles(); 1259 thread_t * this = CURRENT_THREAD; 1260 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1261 printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n", 1262 __FUNCTION__, this->process->pid, this->trdid, 1263 process->pid, vseg_type_str(vseg->type), vseg->min, cycle ); 1264 #endif 1265 1266 // loop on PTEs in GPT 1148 1267 for( vpn = vpn_min ; vpn < vpn_max ; vpn++ ) 1149 1268 { 1150 // get ppn and attr from GPT entry1151 hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn );1152 1153 if( attr & GPT_MAPPED ) // entryis mapped1269 // get ppn and attr 1270 hal_gpt_get_pte( XPTR( local_cxy , &vmm->gpt ) , vpn , &attr , &ppn ); 1271 1272 if( attr & GPT_MAPPED ) // PTE is mapped 1154 1273 { 1155 1274 1156 #if( DEBUG_VMM_ DELETE_VSEG & 1 )1157 if( DEBUG_VMM_ DELETE_VSEG < cycle )1158 printk("- unmap vpn %x / ppn %x / vseg %s \n" , vpn , ppn, vseg_type_str(vseg->type) );1275 #if( DEBUG_VMM_REMOVE_VSEG & 1 ) 1276 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1277 printk("- unmap vpn %x / ppn %x / %s" , vpn , ppn, vseg_type_str(vseg_type) ); 1159 1278 #endif 1160 1279 // unmap GPT entry in local GPT 1161 hal_gpt_reset_pte( gpt , vpn ); 1162 1163 // the allocated page is not released to for kernel vseg 1164 if( (vseg_type != VSEG_TYPE_KCODE) && 1165 (vseg_type != VSEG_TYPE_KDATA) && 1166 (vseg_type != VSEG_TYPE_KDEV ) ) 1280 hal_gpt_reset_pte( &vmm->gpt , vpn ); 1281 1282 // get pointers on physical page descriptor 1283 page_xp = ppm_ppn2page( ppn ); 1284 page_cxy = GET_CXY( page_xp ); 1285 page_ptr = GET_PTR( page_xp ); 1286 1287 // decrement page refcount 1288 count_xp = XPTR( page_cxy , &page_ptr->refcount ); 1289 count = hal_remote_atomic_add( count_xp , -1 ); 1290 1291 // compute the ppn_release condition depending on vseg type 1292 bool_t ppn_release; 1293 if( (vseg_type == VSEG_TYPE_FILE) || 1294 (vseg_type == VSEG_TYPE_KCODE) || 1295 (vseg_type == VSEG_TYPE_KDATA) || 1296 (vseg_type == VSEG_TYPE_KDEV) ) 1167 1297 { 1168 // get extended pointer on physical page descriptor 1169 page_xp = ppm_ppn2page( ppn ); 1170 page_cxy = GET_CXY( page_xp ); 1171 page_ptr = GET_PTR( page_xp ); 1172 1173 // FIXME This code must be re-written, as the actual release depends on vseg type, 1174 // the reference cluster, the page refcount and/or the forks counter... 1175 1176 // get extended pointers on forks and lock fields 1177 forks_xp = XPTR( page_cxy , &page_ptr->forks ); 1178 lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1179 1180 // get the lock protecting the page 1298 // no physical page release for FILE and KERNEL 1299 ppn_release = false; 1300 } 1301 else if( (vseg_type == VSEG_TYPE_CODE) || 1302 (vseg_type == VSEG_TYPE_STACK) ) 1303 { 1304 // always release physical page for private vsegs 1305 ppn_release = true; 1306 } 1307 else if( (vseg_type == VSEG_TYPE_ANON) || 1308 (vseg_type == VSEG_TYPE_REMOTE) ) 1309 { 1310 // release physical page if reference cluster 1311 ppn_release = is_ref; 1312 } 1313 else if( is_ref ) // vseg_type == DATA in reference cluster 1314 { 1315 // get extended pointers on forks and lock field in page descriptor 1316 xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); 1317 xptr_t lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1318 1319 // take lock protecting "forks" counter 1181 1320 remote_busylock_acquire( lock_xp ); 1182 1321 1183 // get pending forks counter 1184 forks = hal_remote_l32( forks_xp ); 1185 1186 if( forks ) // decrement pending forks counter 1322 // get number of pending forks from page descriptor 1323 uint32_t forks = hal_remote_l32( forks_xp ); 1324 1325 // decrement pending forks counter if required 1326 if( forks ) hal_remote_atomic_add( forks_xp , -1 ); 1327 1328 // release lock protecting "forks" counter 1329 remote_busylock_release( lock_xp ); 1330 1331 // release physical page if forks == 0 1332 ppn_release = (forks == 0); 1333 } 1334 else // vseg_type == DATA not in reference cluster 1335 { 1336 // no physical page release if not in reference cluster 1337 ppn_release = false; 1338 } 1339 1340 // release physical page to relevant kmem when required 1341 if( ppn_release ) 1342 { 1343 if( page_cxy == local_cxy ) 1187 1344 { 1188 // update forks counter 1189 hal_remote_atomic_add( forks_xp , -1 ); 1190 1191 // release the lock protecting the page 1192 remote_busylock_release( lock_xp ); 1193 } 1194 else // release physical page to relevant cluster 1345 req.type = KMEM_PAGE; 1346 req.ptr = page_ptr; 1347 kmem_free( &req ); 1348 } 1349 else 1195 1350 { 1196 // release the lock protecting the page 1197 remote_busylock_release( lock_xp ); 1198 1199 // release the page to kmem 1200 if( page_cxy == local_cxy ) // local cluster 1201 { 1202 req.type = KMEM_PAGE; 1203 req.ptr = page_ptr; 1204 kmem_free( &req ); 1205 } 1206 else // remote cluster 1207 { 1208 rpc_pmem_release_pages_client( page_cxy , page_ptr ); 1209 } 1210 1211 #if( DEBUG_VMM_DELETE_VSEG & 1 ) 1212 if( DEBUG_VMM_DELETE_VSEG < cycle ) 1213 printk("- release ppn %x\n", ppn ); 1214 #endif 1351 rpc_pmem_release_pages_client( page_cxy , page_ptr ); 1215 1352 } 1216 1217 1353 } 1354 1355 #if( DEBUG_VMM_REMOVE_VSEG & 1 ) 1356 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1357 { 1358 if( ppn_release ) printk(" / released to kmem\n" ); 1359 else printk("\n"); 1360 } 1361 #endif 1218 1362 } 1219 1363 } 1220 1364 1221 // remove vseg from VSL and release vseg descriptor (if not MMAP)1365 // remove vseg from VSL 1222 1366 vmm_detach_vseg_from_vsl( vmm , vseg ); 1223 1367 1224 #if DEBUG_VMM_DELETE_VSEG 1368 // release vseg descriptor depending on vseg type 1369 if( vseg_type == VSEG_TYPE_STACK ) 1370 { 1371 // release slot to local stack allocator 1372 vmm_stack_free( vmm , vseg ); 1373 1374 // release vseg descriptor to local kmem 1375 vseg_free( vseg ); 1376 } 1377 else if( (vseg_type == VSEG_TYPE_ANON) || 1378 (vseg_type == VSEG_TYPE_FILE) || 1379 (vseg_type == VSEG_TYPE_REMOTE) ) 1380 { 1381 // release vseg to local mmap allocator 1382 vmm_mmap_free( vmm , vseg ); 1383 } 1384 else 1385 { 1386 // release vseg descriptor to local kmem 1387 vseg_free( vseg ); 1388 } 1389 1390 #if DEBUG_VMM_REMOVE_VSEG 1225 1391 cycle = (uint32_t)hal_get_cycles(); 1226 if( DEBUG_VMM_DELETE_VSEG < cycle ) 1227 printk("\n[%s] thread[%x,%x] exit / process %x / vseg %s / base %x / cycle %d\n", 1228 __FUNCTION__, this->process->pid, this->trdid, pid, vseg_type_str(vseg->type), vaddr, cycle ); 1229 #endif 1230 1231 } // end vmm_delete_vseg() 1392 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1393 printk("[%s] thread[%x,%x] exit / process %x / %s / base %x / cycle %d\n", 1394 __FUNCTION__, this->process->pid, this->trdid, 1395 process->pid, vseg_type_str(vseg->type), vseg->min, cycle ); 1396 #endif 1397 1398 } // end vmm_remove_vseg() 1399 1400 1401 /////////////////////////////////// 1402 void vmm_delete_vseg( pid_t pid, 1403 intptr_t vaddr ) 1404 { 1405 process_t * process; // local pointer on local process 1406 vseg_t * vseg; // local pointer on local vseg containing vaddr 1407 1408 // get local pointer on local process descriptor 1409 process = cluster_get_local_process_from_pid( pid ); 1410 1411 if( process == NULL ) 1412 { 1413 printk("\n[WARNING] in %s : cannot get local process descriptor\n", 1414 __FUNCTION__ ); 1415 return; 1416 } 1417 1418 // get local pointer on local vseg containing vaddr 1419 vseg = vmm_vseg_from_vaddr( &process->vmm , vaddr ); 1420 1421 if( vseg == NULL ) 1422 { 1423 printk("\n[WARNING] in %s : cannot get vseg descriptor\n", 1424 __FUNCTION__ ); 1425 return; 1426 } 1427 1428 // call relevant function 1429 vmm_remove_vseg( process , vseg ); 1430 1431 } // end vmm_delete_vseg 1432 1232 1433 1233 1434 ///////////////////////////////////////////// … … 1235 1436 intptr_t vaddr ) 1236 1437 { 1237 xptr_t iter_xp;1238 1438 xptr_t vseg_xp; 1239 1439 vseg_t * vseg; 1440 xptr_t iter_xp; 1240 1441 1241 1442 // get extended pointers on VSL lock and root 1242 xptr_t lock_xp = XPTR( local_cxy , &vmm->vs egs_lock );1443 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 1243 1444 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 1244 1445 … … 1249 1450 XLIST_FOREACH( root_xp , iter_xp ) 1250 1451 { 1452 // get pointers on vseg 1251 1453 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 1252 1454 vseg = GET_PTR( vseg_xp ); 1253 1455 1456 // return success when match 1254 1457 if( (vaddr >= vseg->min) && (vaddr < vseg->max) ) 1255 1458 { … … 1262 1465 // return failure 1263 1466 remote_rwlock_rd_release( lock_xp ); 1264 1265 1467 return NULL; 1266 1468 … … 1462 1664 vseg_init_from_ref( vseg , vseg_xp ); 1463 1665 1666 // build extended pointer on VSL lock 1667 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 1668 1669 // take the VSL lock in write mode 1670 remote_rwlock_wr_acquire( lock_xp ); 1671 1464 1672 // register local vseg in local VSL 1465 1673 vmm_attach_vseg_to_vsl( vmm , vseg ); 1674 1675 // release the VSL lock 1676 remote_rwlock_wr_release( lock_xp ); 1466 1677 } 1467 1678 … … 1486 1697 uint32_t cycle = (uint32_t)hal_get_cycles(); 1487 1698 thread_t * this = CURRENT_THREAD; 1488 xptr_t this_xp = XPTR( local_cxy , this );1489 1699 if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) 1490 1700 printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", … … 1717 1927 error_t error; // value returned by called functions 1718 1928 1929 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1930 uint32_t cycle = (uint32_t)hal_get_cycles(); 1931 thread_t * this = CURRENT_THREAD; 1932 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1933 printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", 1934 __FUNCTION__, this->process->pid, this->trdid, vpn, cycle ); 1935 hal_vmm_display( process , true ); 1936 #endif 1937 1719 1938 // get local vseg (access to reference VSL can be required) 1720 1939 error = vmm_get_vseg( process, … … 1723 1942 if( error ) 1724 1943 { 1725 printk("\n[ERROR] in %s : vpn %x in process %x not in a registered vseg\n",1726 __FUNCTION__ , vpn , process->pid );1944 printk("\n[ERROR] in %s : vpn %x in process %x not in registered vseg / cycle %d\n", 1945 __FUNCTION__ , vpn , process->pid, (uint32_t)hal_get_cycles() ); 1727 1946 1728 1947 return EXCP_USER_ERROR; 1729 1948 } 1730 1949 1731 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1732 uint32_t cycle = (uint32_t)hal_get_cycles(); 1733 thread_t * this = CURRENT_THREAD; 1950 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1951 cycle = (uint32_t)hal_get_cycles(); 1734 1952 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1735 printk("\n[%s] threadr[%x,%x] enter for vpn %x /%s / cycle %d\n",1736 __FUNCTION__, this->process->pid, this->trdid, v pn, vseg_type_str(vseg->type), cycle );1953 printk("\n[%s] threadr[%x,%x] found vseg %s / cycle %d\n", 1954 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type), cycle ); 1737 1955 #endif 1738 1956 … … 1971 2189 error_t error; 1972 2190 2191 thread_t * this = CURRENT_THREAD; 2192 1973 2193 #if DEBUG_VMM_HANDLE_COW 1974 2194 uint32_t cycle = (uint32_t)hal_get_cycles(); 1975 thread_t * this = CURRENT_THREAD;1976 xptr_t this_xp = XPTR( local_cxy , this );1977 2195 if( DEBUG_VMM_HANDLE_COW < cycle ) 1978 2196 printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n", 1979 2197 __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); 2198 hal_vmm_display( process , true ); 1980 2199 #endif 1981 2200 … … 1991 2210 if( error ) 1992 2211 { 1993 printk("\n[PANIC] in %s : vpn %x in process %xnot in a registered vseg\n",1994 __FUNCTION__, vpn, process->pid );2212 printk("\n[PANIC] in %s vpn %x in thread[%x,%x] not in a registered vseg\n", 2213 __FUNCTION__, vpn, process->pid, this->trdid ); 1995 2214 1996 2215 return EXCP_KERNEL_PANIC; -
trunk/kernel/mm/vmm.h
r624 r625 48 48 * Each slot can contain one user stack vseg. The first 4 Kbytes page in the slot is not 49 49 * mapped to detect stack overflow. 50 * The slot index can be computed form the slot base address, and reversely. 51 * All allocation / release operations are registered in the stack_bitmap, that completely 52 * define the STACK zone status. 50 * In this implementation, the slot index is defined by the user thead LTID. 51 * All allocated stacks are registered in a bitmap defining the STACK zone state: 52 * - The allocator checks that the requested slot has not been already allocated, and set the 53 * corresponding bit in the bitmap. 54 * - The de-allocator function reset the corresponding bit in the bitmap. 53 55 ********************************************************************************************/ 54 56 … … 57 59 busylock_t lock; /*! lock protecting STACK allocator */ 58 60 vpn_t vpn_base; /*! first page of STACK zone */ 59 bitmap_t bitmap; /*! bit bector of allocated stacks */61 bitmap_t bitmap; /*! bit vector of allocated stacks */ 60 62 } 61 63 stack_mgr_t; … … 84 86 vpn_t vpn_size; /*! number of pages in MMAP zone */ 85 87 vpn_t first_free_vpn; /*! first free page in MMAP zone */ 86 list_entry_tzombi_list[32]; /*! array of roots of released vsegs lists */88 xlist_entry_t zombi_list[32]; /*! array of roots of released vsegs lists */ 87 89 } 88 90 mmap_mgr_t; … … 109 111 typedef struct vmm_s 110 112 { 111 remote_rwlock_t vs egs_lock;/*! lock protecting the local VSL */113 remote_rwlock_t vsl_lock; /*! lock protecting the local VSL */ 112 114 xlist_entry_t vsegs_root; /*! Virtual Segment List (complete in reference) */ 113 115 uint32_t vsegs_nr; /*! total number of local vsegs */ … … 132 134 133 135 /********************************************************************************************* 134 * This function initialises the virtual memory manager attached to an user process. 136 * This function mkkes a partial initialisation of the VMM attached to an user process. 137 * The GPT must have been previously created, with the hal_gpt_create() function. 138 * - It registers "args", "envs" vsegs in the VSL. 135 139 * - It initializes the STACK and MMAP allocators. 136 * - It registers the "kentry", "args", "envs" vsegs in the VSL. 137 * - It initializes the generic page table, calling the HAL specific hal_gpt_init() function. 138 * - For TSAR it map all pages for the "kentry" vseg, that must be identity mapping. 139 ******************************************************a************************************** 140 * Implementation notes: 140 * Note: 141 141 * - The "code" and "data" vsegs are registered by the elf_load_process() function. 142 * - The "stack" vsegs are dynamically created by the thread_user_create() function.143 * - The "file", "anon", "remote" vsegs are dynamically created by the mmap() syscall.142 * - The "stack" vsegs are dynamically registered by the thread_user_create() function. 143 * - The "file", "anon", "remote" vsegs are dynamically registered by the mmap() syscall. 144 144 ********************************************************************************************* 145 145 * @ process : pointer on process descriptor 146 146 * @ return 0 if success / return -1 if failure. 147 147 ********************************************************************************************/ 148 error_t vmm_init( struct process_s * process ); 149 150 /********************************************************************************************* 151 * This function displays on TXY0 the list or registered vsegs for a given <process>. 152 * It must be executed by a thread running in reference cluster. 153 * If the <mapping> argument is true, it displays for each vseg all mapped PTEs in GPT. 148 error_t vmm_user_init( struct process_s * process ); 149 150 /********************************************************************************************* 151 * This function re-initialises the VMM attached to an user process to prepare a new 152 * call to the vmm_user_init() function after an exec() syscall. 153 * It removes from the VMM of the process identified by the <process> argument all 154 * non kernel vsegs (i.e. all user vsegs), by calling the vmm_remove_vseg() function. 155 * - the vsegs are removed from the VSL. 156 * - the corresponding GPT entries are removed from the GPT. 157 * - the physical pages are released to the relevant kmem when they are not shared. 158 * The VSL and the GPT are not modified for the kernel vsegs. 154 159 ********************************************************************************************* 155 160 * @ process : pointer on process descriptor. 156 * @ mapping : detailed mapping if true. 157 ********************************************************************************************/ 158 void hal_vmm_display( struct process_s * process, 159 bool_t mapping ); 161 ********************************************************************************************/ 162 void vmm_user_reset( struct process_s * process ); 160 163 161 164 /********************************************************************************************* 162 165 * This function is called by the process_make_fork() function. It partially copies 163 166 * the content of a remote parent process VMM to the local child process VMM: 164 * - all DATA, MMAP, REMOTE vsegs registered in the parent VSL are registered in the child165 * VSL, and all valid GPT entries in parent GPT are copied to the child GPT.166 * The WRITABLE flag is reset and the COW flag is set in child GPT.167 * - all CODE vsegs registered in the parent VSL are registered in the child VSL, but the168 * GPT entries are not copied in the chil f GPT, thatwill be dynamically updated from167 * - All DATA, ANON, REMOTE vsegs registered in the parent VSL are registered in the 168 * child VSL. All valid PTEs in parent GPT are copied to the child GPT, but the 169 * WRITABLE flag is reset and the COW flag is set. 170 * - All CODE vsegs registered in the parent VSL are registered in the child VSL, but the 171 * GPT entries are not copied in the child GPT, and will be dynamically updated from 169 172 * the .elf file when a page fault is reported. 170 * - all FILE vsegs registered in the parent VSL are registered in the child VSL, and all173 * - All FILE vsegs registered in the parent VSL are registered in the child VSL, and all 171 174 * valid GPT entries in parent GPT are copied to the child GPT. The COW flag is not set. 172 * - no STACK vseg is copied from parent VMM to child VMM, because the child STACKvseg175 * - No STACK vseg is copied from parent VMM to child VMM, because the child stack vseg 173 176 * must be copied later from the cluster containing the user thread requesting the fork(). 177 * - The KERNEL vsegs required by the target architecture are re-created in the child 178 * VMM, from the local kernel process VMM, using the hal_vmm_kernel_update() function. 174 179 ********************************************************************************************* 175 180 * @ child_process : local pointer on local child process descriptor. … … 196 201 197 202 /********************************************************************************************* 198 * This global function modifies a GPT entry identified by the <process> and <vpn>199 * argumentsin all clusters containing a process copy.203 * This function modifies a GPT entry identified by the <process> and <vpn> arguments 204 * in all clusters containing a process copy. 200 205 * It must be called by a thread running in the reference cluster. 201 206 * It updates all copies of the process in all clusters, to maintain coherence in GPT copies, … … 240 245 /********************************************************************************************* 241 246 * This function allocates memory for a vseg descriptor, initialises it, and register it 242 * in the VMM of the local process descriptor, that must be the reference process. 243 * For the 'stack", "file", "anon", & "remote" types, it does not use the <base> argument, 244 * but uses the STACK and MMAP virtual memory allocators. 247 * in the VSL of the local process descriptor, that must be the reference process. 248 * - For the FILE, ANON, & REMOTE types, it does not use the <base> and <size> arguments, 249 * but uses the specific MMAP virtual memory allocator. 250 * - For the STACK type, it does not use the <size> argument, and the <base> argument 251 * defines the user thread LTID used by the specific STACK virtual memory allocator. 245 252 * It checks collision with all pre-existing vsegs. 246 * To comply with the "on-demand" paging policy, this function does NOT modify the page table,253 * To comply with the "on-demand" paging policy, this function does NOT modify the GPT, 247 254 * and does not allocate physical memory for vseg data. 248 255 * It should be called by a local thread (could be a RPC thread if the client thread is not 249 * running in the re gerence cluster).256 * running in the reference cluster). 250 257 ********************************************************************************************* 251 258 * @ process : pointer on local processor descriptor. 252 259 * @ type : vseg type. 253 * @ base : vseg base address ( not used for dynamically allocated vsegs).260 * @ base : vseg base address (or user thread ltid for an user stack vseg). 254 261 * @ size : vseg size (bytes). 255 262 * @ file_offset : offset in file for CODE, DATA, FILE types. … … 269 276 270 277 /********************************************************************************************* 271 * This function removes from the local VMM of a process descriptor identified by the <pid> 272 * argument a local vseg identified by its base address <vaddr> in user space. 273 * It can be used for any type of vseg, but must be called by a local thread. 274 * Use the RPC_VMM_DELETE_VSEG if the client thread is not local. 275 * It does nothing if the process is not registered in the local cluster. 276 * It does nothing if the vseg is not registered in the local process VSL. 277 * - It removes from the local GPT all registered PTEs. If it is executed in the reference 278 * cluster, it releases the referenced physical pages, to the relevant kmem allocator, 279 * depending on vseg type and the pending forks counter. 280 * - It removes the vseg from the local VSL, and release the vseg descriptor if not MMAP. 281 ********************************************************************************************* 282 * @ process : process identifier. 283 * @ vaddr : vseg base address in user space. 278 * This function removes from the VMM of a process descriptor identified by the <process> 279 * argument the vseg identified by the <vseg> argument. It can be used for any type of vseg. 280 * As it uses local pointers, it must be called by a local thread. 281 * It is called by the vmm_user_reset(), vmm_delete_vseg() and vmm_destroy() functions. 282 * It makes a kernel panic if the process is not registered in the local cluster, 283 * or if the vseg is not registered in the process VSL. 284 * For all vseg types, the vseg is detached from local VSL, and all associated PTEs are 285 * unmapped from local GPT. Other actions depend on the vseg type: 286 * - Regarding the vseg descriptor release: 287 * . for ANON and REMOTE, the vseg is not released, but registered in local zombi_list. 288 * . for STACK the vseg is released to the local stack allocator. 289 * . for all other types, the vseg is released to the local kmem. 290 * - Regarding the physical pages release: 291 * . for KERNEL and FILE, the pages are not released to kmem. 292 * . for CODE and STACK, the pages are released to local kmem when they are not COW. 293 * . for DATA, ANON and REMOTE, the pages are released to relevant kmem only when 294 * the local cluster is the reference cluster. 295 * The lock protecting the VSL must be taken by the caller. 296 ********************************************************************************************* 297 * @ process : local pointer on process. 298 * @ vseg : local pointer on vseg. 299 ********************************************************************************************/ 300 void vmm_remove_vseg( struct process_s * process, 301 struct vseg_s * vseg ); 302 303 /********************************************************************************************* 304 * This function call the vmm_remove vseg() function to remove from the VMM of a local 305 * process descriptor, identified by the <pid> argument the vseg identified by the <vaddr> 306 * virtual address in user space. 307 * Use the RPC_VMM_DELETE_VSEG to remove a vseg from a remote process descriptor. 308 ********************************************************************************************* 309 * @ pid : process identifier. 310 * @ vaddr : virtual address in user space. 284 311 ********************************************************************************************/ 285 312 void vmm_delete_vseg( pid_t pid, 286 313 intptr_t vaddr ); 287 288 /*********************************************************************************************289 * This function insert a new <vseg> descriptor in the VSL identifed by the <vmm> argument.290 * and updates the vmm field in the vseg descriptor.291 * It takes the lock protecting VSL.292 *********************************************************************************************293 * @ vmm : local pointer on local VMM.294 * @ vseg : local pointer on local vseg descriptor.295 ********************************************************************************************/296 void vmm_attach_vseg_to_vsl( vmm_t * vmm,297 vseg_t * vseg );298 299 /*********************************************************************************************300 * This function removes a vseg identified by the <vseg> argument from the local VSL301 * identified by the <vmm> argument and release the memory allocated to vseg descriptor,302 * for all vseg types, BUT the MMAP type (i.e. ANON or REMOTE).303 * - If the vseg has not the STACK or MMAP type, it is simply removed from the VSL,304 * and vseg descriptor is released.305 * - If the vseg has the STACK type, it is removed from VSL, vseg descriptor is released,306 * and the stack slot is returned to the local VMM_STACK allocator.307 * - If the vseg has the MMAP type, it is removed from VSL and is registered in zombi_list308 * of the VMM_MMAP allocator for future reuse. The vseg descriptor is NOT released.309 *********************************************************************************************310 * @ vmm : local pointer on local VMM.311 * @ vseg : local pointer on local vseg to be removed.312 ********************************************************************************************/313 void vmm_detach_vseg_from_vsl( vmm_t * vmm,314 vseg_t * vseg );315 314 316 315 /********************************************************************************************* -
trunk/kernel/mm/vseg.c
r623 r625 61 61 } 62 62 63 ///////////////////// 63 /////////////////////////// 64 64 vseg_t * vseg_alloc( void ) 65 65 { -
trunk/kernel/mm/vseg.h
r623 r625 70 70 /******************************************************************************************* 71 71 * This structure defines a virtual segment descriptor. 72 * -The VSL contains only local vsegs, but is implemented as an xlist, because it can be73 * accessed bythread running in a remote cluster.74 * - The zombi list is used by the local MMAP allocator. It is implemented as a local list.72 * The VSL contains only local vsegs, but is implemented as an xlist, because it can be 73 * accessed by a thread running in a remote cluster. 74 * The xlist field is also used to implement the zombi lists used by the MMAP allocator. 75 75 ******************************************************************************************/ 76 76 … … 78 78 { 79 79 xlist_entry_t xlist; /*! all vsegs in same VSL */ 80 list_entry_t zlist; /*! all vsegs in same zombi list */81 80 struct vmm_s * vmm; /*! pointer on associated VM manager */ 82 81 uint32_t type; /*! vseg type */
Note: See TracChangeset
for help on using the changeset viewer.