Changeset 407 for trunk/kernel/mm
- Timestamp:
- Nov 7, 2017, 3:08:12 PM (7 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 12 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/kcm.c
r406 r407 47 47 kcm_page_t * kcm_page ) 48 48 { 49 kcm_dmsg("\n[D MSG] %s : enters for %s / page %x / count = %d / active = %d\n",49 kcm_dmsg("\n[DBG] %s : enters for %s / page %x / count = %d / active = %d\n", 50 50 __FUNCTION__ , kmem_type_str( kcm->type ) , 51 51 (intptr_t)kcm_page , kcm_page->count , kcm_page->active ); … … 80 80 + (index * kcm->block_size) ); 81 81 82 kcm_dmsg("\n[D MSG] %s : allocated one block %s / ptr = %p / page = %x / count = %d\n",82 kcm_dmsg("\n[DBG] %s : allocated one block %s / ptr = %p / page = %x / count = %d\n", 83 83 __FUNCTION__ , kmem_type_str( kcm->type ) , ptr , 84 84 (intptr_t)kcm_page , kcm_page->count ); … … 231 231 kcm->blocks_nr = blocks_nr; 232 232 233 kcm_dmsg("\n[D MSG] %s : KCM %s initialised / block_size = %d / blocks_nr = %d\n",233 kcm_dmsg("\n[DBG] %s : KCM %s initialised / block_size = %d / blocks_nr = %d\n", 234 234 __FUNCTION__ , kmem_type_str( type ) , kcm->block_size , kcm->blocks_nr ); 235 235 } … … 301 301 kcm_page->active = 1; 302 302 303 kcm_dmsg("\n[D MSG] %s : enters for type %s at cycle %d / new page = %x / count = %d\n",303 kcm_dmsg("\n[DBG] %s : enters for type %s at cycle %d / new page = %x / count = %d\n", 304 304 __FUNCTION__ , kmem_type_str( kcm->type ) , hal_get_cycles() , 305 305 (intptr_t)kcm_page , kcm_page->count ); … … 311 311 kcm_page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list ); 312 312 313 kcm_dmsg("\n[D MSG] %s : enters for type %s at cycle %d / page = %x / count = %d\n",313 kcm_dmsg("\n[DBG] %s : enters for type %s at cycle %d / page = %x / count = %d\n", 314 314 __FUNCTION__ , kmem_type_str( kcm->type ) , hal_get_cycles() , 315 315 (intptr_t)kcm_page , kcm_page->count ); -
trunk/kernel/mm/kmem.c
r406 r407 145 145 assert( ((type > 1) && (type < KMEM_TYPES_NR) ) , __FUNCTION__ , "illegal KCM type" ); 146 146 147 kmem_dmsg("\n[D MSG] %s : enters / KCM type %s missing in cluster %x\n",147 kmem_dmsg("\n[DBG] %s : enters / KCM type %s missing in cluster %x\n", 148 148 __FUNCTION__ , kmem_type_str( type ) , local_cxy ); 149 149 … … 169 169 hal_fence(); 170 170 171 kmem_dmsg("\n[D MSG] %s : exit / KCM type %s created in cluster %x\n",171 kmem_dmsg("\n[DBG] %s : exit / KCM type %s created in cluster %x\n", 172 172 __FUNCTION__ , kmem_type_str( type ) , local_cxy ); 173 173 … … 192 192 assert( (type < KMEM_TYPES_NR) , __FUNCTION__ , "illegal KMEM request type" ); 193 193 194 kmem_dmsg("\n[D MSG] %s : enters in cluster %x for type %s\n",194 kmem_dmsg("\n[DBG] %s : enters in cluster %x for type %s\n", 195 195 __FUNCTION__ , local_cxy , kmem_type_str( type ) ); 196 196 … … 210 210 if( flags & AF_ZERO ) page_zero( (page_t *)ptr ); 211 211 212 kmem_dmsg("\n[D MSG] %s : exit in cluster %x for type %s / page = %x / base = %x\n",212 kmem_dmsg("\n[DBG] %s : exit in cluster %x for type %s / page = %x / base = %x\n", 213 213 __FUNCTION__, local_cxy , kmem_type_str( type ) , 214 214 (intptr_t)ptr , (intptr_t)ppm_page2base( ptr ) ); … … 228 228 if( flags & AF_ZERO ) memset( ptr , 0 , size ); 229 229 230 kmem_dmsg("\n[D MSG] %s : exit in cluster %x for type %s / base = %x / size = %d\n",230 kmem_dmsg("\n[DBG] %s : exit in cluster %x for type %s / base = %x / size = %d\n", 231 231 __FUNCTION__, local_cxy , kmem_type_str( type ) , 232 232 (intptr_t)ptr , req->size ); … … 255 255 if( flags & AF_ZERO ) memset( ptr , 0 , kmem_type_size( type ) ); 256 256 257 kmem_dmsg("\n[D MSG] %s : exit in cluster %x for type %s / base = %x / size = %d\n",257 kmem_dmsg("\n[DBG] %s : exit in cluster %x for type %s / base = %x / size = %d\n", 258 258 __FUNCTION__, local_cxy , kmem_type_str( type ) , 259 259 (intptr_t)ptr , kmem_type_size( type ) ); -
trunk/kernel/mm/mapper.c
r406 r407 143 143 error_t error; 144 144 145 mapper_dmsg("\n[DMSG] %s :enters for page %d / mapper %x\n",146 __FUNCTION__, index , mapper );145 mapper_dmsg("\n[DBG] %s : core[%x,%d] enters for page %d / mapper %x\n", 146 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , index , mapper ); 147 147 148 148 thread_t * this = CURRENT_THREAD; … … 170 170 if ( page == NULL ) // missing page => create it and load it from file system 171 171 { 172 mapper_dmsg("\n[DMSG] %s : missing page => load from device\n", __FUNCTION__ ); 172 173 mapper_dmsg("\n[DBG] %s : core[%x,%d] missing page => load from device\n", 174 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); 173 175 174 176 // allocate one page from PPM … … 188 190 // initialize the page descriptor 189 191 page_init( page ); 190 page_set_flag( page , PG_INIT ); 191 page_set_flag( page , PG_INLOAD ); 192 page_set_flag( page , PG_INIT | PG_INLOAD ); 192 193 page_refcount_up( page ); 193 194 page->mapper = mapper; … … 215 216 error = vfs_mapper_move_page( page, 216 217 true ); // to mapper 217 218 218 if( error ) 219 219 { … … 231 231 page_clear_flag( page , PG_INLOAD ); 232 232 233 mapper_dmsg("\n[DBG] %s : missing page loaded / ppn = %x\n", 234 __FUNCTION__ , ppm_page2ppn(XPTR(local_cxy,page)) ); 235 233 236 } 234 237 else if( page_is_flag( page , PG_INLOAD ) ) // page is loaded by another thread … … 244 247 245 248 // deschedule 246 sched_yield( NULL);249 sched_yield(); 247 250 } 248 251 } … … 254 257 } 255 258 256 mapper_dmsg("\n[DMSG] %s : exit for page %d / mapper %x / page_desc = %x\n",257 259 mapper_dmsg("\n[DBG] %s : exit for page %d / mapper %x / page_desc = %x\n", 260 __FUNCTION__ , index , mapper , page ); 258 261 259 262 return page; … … 310 313 uint8_t * buf_ptr; // current buffer address 311 314 312 mapper_dmsg("\n[D MSG] %s : enters / to_buf = %d / buffer = %x\n",315 mapper_dmsg("\n[DBG] %s : enters / to_buf = %d / buffer = %x\n", 313 316 __FUNCTION__ , to_buffer , buffer ); 314 317 … … 336 339 else page_count = CONFIG_PPM_PAGE_SIZE; 337 340 338 mapper_dmsg("\n[D MSG] %s : index = %d / offset = %d / count = %d\n",341 mapper_dmsg("\n[DBG] %s : index = %d / offset = %d / count = %d\n", 339 342 __FUNCTION__ , index , page_offset , page_count ); 340 343 … … 351 354 buf_ptr = (uint8_t *)buffer + done; 352 355 353 mapper_dmsg("\n[D MSG] %s : index = %d / buf_ptr = %x / map_ptr = %x\n",356 mapper_dmsg("\n[DBG] %s : index = %d / buf_ptr = %x / map_ptr = %x\n", 354 357 __FUNCTION__ , index , buf_ptr , map_ptr ); 355 358 … … 368 371 } 369 372 370 mapper_dmsg("\n[D MSG] %s : exit for buffer %x\n",373 mapper_dmsg("\n[DBG] %s : exit for buffer %x\n", 371 374 __FUNCTION__, buffer ); 372 375 … … 397 400 uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp ); 398 401 399 mapper_dmsg("\n[DMSG] %s :to_buf = %d / buf_cxy = %x / buf_ptr = %x / size = %x\n",400 __FUNCTION__ , to_buffer , buffer_cxy , buffer_ptr, size );402 mapper_dmsg("\n[DBG] %s : core[%x,%d] / to_buf = %d / buf_cxy = %x / buf_ptr = %x / size = %x\n", 403 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, to_buffer, buffer_cxy, buffer_ptr, size ); 401 404 402 405 // compute offsets of first and last bytes in file … … 408 411 uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; 409 412 410 mapper_dmsg("\n[DMSG] %s :first_page = %d / last_page = %d\n",411 __FUNCTION__ , first, last );413 mapper_dmsg("\n[DBG] %s : core[%x,%d] / first_page = %d / last_page = %d\n", 414 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, first, last ); 412 415 413 416 // compute source and destination clusters … … 438 441 else page_count = CONFIG_PPM_PAGE_SIZE; 439 442 440 mapper_dmsg("\n[DMSG] %s :page_index = %d / offset = %d / bytes = %d\n",441 __FUNCTION__ , index , page_offset, page_count );443 mapper_dmsg("\n[DBG] %s : core[%x;%d] / page_index = %d / offset = %d / bytes = %d\n", 444 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, index, page_offset, page_count ); 442 445 443 446 // get page descriptor … … 470 473 } 471 474 472 mapper_dmsg("\n[DMSG] %s :exit / buf_cxy = %x / buf_ptr = %x / size = %x\n",473 __FUNCTION__ , buffer_cxy , buffer_ptr, size );475 mapper_dmsg("\n[DBG] %s : core_cxy[%x,%d] / exit / buf_cxy = %x / buf_ptr = %x / size = %x\n", 476 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, buffer_cxy, buffer_ptr, size ); 474 477 475 478 return 0; -
trunk/kernel/mm/mapper.h
r315 r407 131 131 * @ to_buffer : mapper -> buffer if true / buffer -> mapper if false. 132 132 * @ file_offset : first byte to move in file. 133 * @ buffer: user space pointer on user buffer.133 * @ u_buf : user space pointer on user buffer. 134 134 * @ size : number of bytes to move. 135 135 * returns O if success / returns EINVAL if error. … … 138 138 bool_t to_buffer, 139 139 uint32_t file_offset, 140 void * buffer,140 void * u_buf, 141 141 uint32_t size ); 142 142 -
trunk/kernel/mm/page.c
r315 r407 46 46 page->mapper = NULL; 47 47 page->index = 0; 48 page->fork_nr = 0;49 48 page->refcount = 0; 50 49 … … 181 180 // deschedule the calling thread 182 181 thread_block( thread , THREAD_BLOCKED_PAGE ); 183 sched_yield( NULL);182 sched_yield(); 184 183 } 185 184 else // page is not locked -
trunk/kernel/mm/page.h
r315 r407 67 67 xlist_entry_t wait_root; /*! root of list of waiting threads (16) */ 68 68 uint32_t refcount; /*! reference counter (4) */ 69 uint32_t fork_nr; /*! number of forked processes(4) */69 uint32_t reserved; /*! UNUSED (4) */ 70 70 spinlock_t lock; /*! only used to set the PG_LOCKED flag (16) */ 71 71 } -
trunk/kernel/mm/ppm.c
r406 r407 154 154 page_t * pages_tbl = ppm->pages_tbl; 155 155 156 assert( !page_is_flag( page , PG_FREE ) , __FUNCTION__ , "page already freed" ); 157 assert( !page_is_flag( page , PG_RESERVED ) , __FUNCTION__ , "freeing reserved page" ); 156 assert( !page_is_flag( page , PG_FREE ) , __FUNCTION__ , 157 "page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) ); 158 159 assert( !page_is_flag( page , PG_RESERVED ) , __FUNCTION__ , 160 "reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) ); 158 161 159 162 // update released page descriptor flags … … 201 204 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 202 205 203 assert( (order < CONFIG_PPM_MAX_ORDER) , __FUNCTION__ , "illegal order argument" ); 206 assert( (order < CONFIG_PPM_MAX_ORDER) , __FUNCTION__ , 207 "illegal order argument = %x\n" , order ); 204 208 205 209 page_t * block = NULL; 206 210 207 ppm_dmsg("\n[D MSG] %s : enters / order = %d\n",211 ppm_dmsg("\n[DBG] %s : enters / order = %d\n", 208 212 __FUNCTION__ , order ); 209 213 … … 256 260 spinlock_unlock( &ppm->free_lock ); 257 261 258 ppm_dmsg("\n[D MSG] %s : base = %x / order = %d\n",262 ppm_dmsg("\n[DBG] %s : base = %x / order = %d\n", 259 263 __FUNCTION__ , (uint32_t)ppm_page2base( block ) , order ); 260 264 … … 289 293 290 294 printk("\n*** PPM in cluster %x : %d pages / &pages_tbl = %x / vaddr_base = %x ***\n", 291 295 local_cxy , ppm->pages_nr , (intptr_t)ppm->pages_tbl , (intptr_t)ppm->vaddr_base ); 292 296 293 297 for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ ) … … 316 320 page_t * page; 317 321 318 for( order=0; order < CONFIG_PPM_MAX_ORDER; order++)322 for( order=0 ; order < CONFIG_PPM_MAX_ORDER ; order++ ) 319 323 { 320 324 if( list_is_empty( &ppm->free_pages_root[order] ) ) continue; -
trunk/kernel/mm/ppm.h
r315 r407 127 127 128 128 /***************************************************************************************** 129 * Get extended pointer on page base from Global PPN.129 * Get extended pointer on page base from global PPN. 130 130 ***************************************************************************************** 131 131 * @ ppn : global physical page number. -
trunk/kernel/mm/vmm.c
r406 r407 58 58 vseg_t * vseg_args; 59 59 vseg_t * vseg_envs; 60 vseg_t * vseg_heap;61 60 intptr_t base; 62 61 intptr_t size; 63 62 64 vmm_dmsg("\n[DMSG] %s : enter for process %x\n", __FUNCTION__ , process->pid ); 63 vmm_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x\n", 64 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid ); 65 65 66 66 // get pointer on VMM 67 67 vmm_t * vmm = &process->vmm; 68 69 // initialize local list of vsegs 70 vmm->vsegs_nr = 0; 71 list_root_init( &vmm->vsegs_root ); 72 rwlock_init( &vmm->vsegs_lock ); 68 73 69 74 assert( ((CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) … … 77 82 "STACK zone too small\n"); 78 83 79 // initialize the rwlock protecting the vsegs list80 rwlock_init( &vmm->vsegs_lock );81 82 // initialize local list of vsegs and radix-tree83 vmm->vsegs_nr = 0;84 list_root_init( &vmm->vsegs_root );85 86 84 // register kentry vseg in VMM 87 85 base = CONFIG_VMM_KENTRY_BASE << CONFIG_PPM_PAGE_SHIFT; 88 86 size = CONFIG_VMM_KENTRY_SIZE << CONFIG_PPM_PAGE_SHIFT; 89 87 90 vseg_kentry = vmm_create_vseg( process , base , size , VSEG_TYPE_CODE ); 88 vseg_kentry = vmm_create_vseg( process, 89 VSEG_TYPE_CODE, 90 base, 91 size, 92 0, // file_offset unused 93 0, // file_size unused 94 XPTR_NULL, // mapper_xp unused 95 local_cxy ); 91 96 92 97 assert( (vseg_kentry != NULL) , __FUNCTION__ , "cannot register kentry vseg\n" ); … … 99 104 size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT; 100 105 101 vseg_args = vmm_create_vseg( process , base , size , VSEG_TYPE_DATA ); 102 103 assert( (vseg_args != NULL) , __FUNCTION__ , "cannot register args vseg\n" ); 106 vseg_args = vmm_create_vseg( process, 107 VSEG_TYPE_DATA, 108 base, 109 size, 110 0, // file_offset unused 111 0, // file_size unused 112 XPTR_NULL, // mapper_xp unused 113 local_cxy ); 114 115 assert( (vseg_args != NULL) , __FUNCTION__ , "cannot create args vseg\n" ); 104 116 105 117 vmm->args_vpn_base = base; … … 111 123 size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT; 112 124 113 vseg_envs = vmm_create_vseg( process , base , size , VSEG_TYPE_DATA ); 114 115 assert( (vseg_envs != NULL) , __FUNCTION__ , "cannot register envs vseg\n" ); 125 vseg_envs = vmm_create_vseg( process, 126 VSEG_TYPE_DATA, 127 base, 128 size, 129 0, // file_offset unused 130 0, // file_size unused 131 XPTR_NULL, // mapper_xp unused 132 local_cxy ); 133 134 assert( (vseg_envs != NULL) , __FUNCTION__ , "cannot create envs vseg\n" ); 116 135 117 136 vmm->envs_vpn_base = base; 118 119 // register the heap vseg in VMM120 base = CONFIG_VMM_HEAP_BASE << CONFIG_PPM_PAGE_SHIFT;121 size = (CONFIG_VMM_MMAP_BASE-CONFIG_VMM_HEAP_BASE) << CONFIG_PPM_PAGE_SHIFT;122 123 vseg_heap = vmm_create_vseg( process , base , size , VSEG_TYPE_HEAP );124 125 assert( (vseg_heap != NULL) , __FUNCTION__ , "cannot register heap vseg\n" );126 127 vmm->heap_vpn_base = base;128 137 129 138 // initialize generic page table … … 137 146 138 147 // initialize MMAP allocator 139 vmm->mmap_mgr.vpn_base = CONFIG_VMM_ MMAP_BASE;140 vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_ MMAP_BASE;141 vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_ MMAP_BASE;148 vmm->mmap_mgr.vpn_base = CONFIG_VMM_HEAP_BASE; 149 vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; 150 vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; 142 151 uint32_t i; 143 152 for( i = 0 ; i < 32 ; i++ ) list_root_init( &vmm->mmap_mgr.zombi_list[i] ); … … 150 159 hal_fence(); 151 160 152 vmm_dmsg("\n[DMSG] %s : exit for process %x / entry_point = %x\n", 153 __FUNCTION__ , process->pid , process->vmm.entry_point ); 161 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x / entry_point = %x\n", 162 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , 163 process->pid , process->vmm.entry_point ); 154 164 155 165 } // end vmm_init() 166 167 ////////////////////////////////////// 168 void vmm_display( process_t * process, 169 bool_t mapping ) 170 { 171 vmm_t * vmm = &process->vmm; 172 gpt_t * gpt = &vmm->gpt; 173 174 printk("\n***** VSL and GPT for process %x\n\n", 175 process->pid ); 176 177 // get lock protecting the vseg list 178 rwlock_rd_lock( &vmm->vsegs_lock ); 179 180 // scan the list of vsegs 181 list_entry_t * iter; 182 vseg_t * vseg; 183 LIST_FOREACH( &vmm->vsegs_root , iter ) 184 { 185 vseg = LIST_ELEMENT( iter , vseg_t , list ); 186 printk(" - %s : base = %X / size = %X / npages = %d\n", 187 vseg_type_str( vseg->type ) , vseg->min , vseg->max - vseg->min , vseg->vpn_size ); 188 189 if( mapping ) 190 { 191 vpn_t vpn; 192 ppn_t ppn; 193 uint32_t attr; 194 vpn_t base = vseg->vpn_base; 195 vpn_t size = vseg->vpn_size; 196 for( vpn = base ; vpn < (base+size) ; vpn++ ) 197 { 198 hal_gpt_get_pte( gpt , vpn , &attr , &ppn ); 199 if( attr & GPT_MAPPED ) 200 { 201 printk(" . vpn = %X / attr = %X / ppn = %X\n", vpn , attr , ppn ); 202 } 203 } 204 } 205 } 206 207 // release the lock 208 rwlock_rd_unlock( &vmm->vsegs_lock ); 209 } 156 210 157 211 ////////////////////////////////////////// … … 170 224 rwlock_init( &dst_vmm->vsegs_lock ); 171 225 172 // initialize the dst_vmm vsegs list and the radix tree226 // initialize the dst_vmm vsegs list 173 227 dst_vmm->vsegs_nr = 0; 174 228 list_root_init( &dst_vmm->vsegs_root ); 175 229 176 // loop on src_vmm list of vsegs to create 177 // and register vsegs copies in dst_vmm 230 // initialize generic page table 231 error = hal_gpt_create( &dst_vmm->gpt ); 232 233 if( error ) 234 { 235 printk("\n[ERROR] in %s : cannot initialize page table\n", __FUNCTION__ ); 236 return ENOMEM; 237 } 238 239 // loop on SRC VSL to register vsegs copies in DST VSL 240 // and copy valid PTEs from SRC GPT to DST GPT 178 241 list_entry_t * iter; 179 242 vseg_t * src_vseg; … … 201 264 vseg_init_from_ref( dst_vseg , XPTR( local_cxy , src_vseg ) ); 202 265 203 // register dst_vseg in dst_vmm266 // register dst_vseg in DST VSL 204 267 vseg_attach( dst_vmm , dst_vseg ); 268 269 // copy SRC GPT to DST GPT / set COW for all writable vsegs, but the FILE type 270 bool_t cow = (src_vseg->type != VSEG_TYPE_FILE) && (src_vseg->flags & VSEG_WRITE); 271 error = hal_gpt_copy( &dst_vmm->gpt, 272 &src_vmm->gpt, 273 src_vseg->vpn_base, 274 src_vseg->vpn_size, 275 cow ); 276 if( error ) 277 { 278 printk("\n[ERROR] in %s : cannot copy page GPT\n", __FUNCTION__ ); 279 hal_gpt_destroy( &dst_vmm->gpt ); 280 return ENOMEM; 281 } 205 282 } 206 283 207 284 // release the src_vmm vsegs_lock 208 285 rwlock_wr_unlock( &src_vmm->vsegs_lock ); 209 210 // initialize generic page table211 error = hal_gpt_create( &dst_vmm->gpt );212 213 if( error )214 {215 printk("\n[ERROR] in %s : cannot initialize page table\n", __FUNCTION__ );216 return ENOMEM;217 }218 286 219 287 // initialize STACK allocator … … 222 290 223 291 // initialize MMAP allocator 224 dst_vmm->mmap_mgr.vpn_base = CONFIG_VMM_ MMAP_BASE;225 dst_vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_ MMAP_BASE;226 dst_vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_ MMAP_BASE;292 dst_vmm->mmap_mgr.vpn_base = CONFIG_VMM_HEAP_BASE; 293 dst_vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; 294 dst_vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; 227 295 uint32_t i; 228 296 for( i = 0 ; i < 32 ; i++ ) list_root_init( &dst_vmm->mmap_mgr.zombi_list[i] ); … … 242 310 243 311 dst_vmm->entry_point = src_vmm->entry_point; 244 245 // HEAP TODO : new heap for child ???246 dst_vmm->heap_vseg = src_vmm->heap_vseg;247 248 // initialize generic page table249 error = hal_gpt_create( &dst_vmm->gpt );250 251 if( error )252 {253 printk("\n[ERROR] in %s : cannot initialize page table\n", __FUNCTION__ );254 return ENOMEM;255 }256 257 // copy GPT content from src_vmm to dst_vmm, activating "Copy-On-Write"258 // TODO register Copy-On_Write in page descriptors259 bool_t cow = true;260 hal_gpt_copy( &dst_vmm->gpt , &src_vmm->gpt , cow );261 312 262 313 hal_fence(); … … 431 482 } // end vmm_mmap_alloc() 432 483 433 ////////////////////////////////////////////// 434 vseg_t * vmm_create_vseg( process_t * process, 435 intptr_t base, 436 intptr_t size, 437 uint32_t type ) 484 //////////////////////////////////////////////// 485 vseg_t * vmm_create_vseg( process_t * process, 486 vseg_type_t type, 487 intptr_t base, 488 uint32_t size, 489 uint32_t file_offset, 490 uint32_t file_size, 491 xptr_t mapper_xp, 492 cxy_t cxy ) 438 493 { 439 494 vseg_t * vseg; // created vseg pointer … … 442 497 error_t error; 443 498 444 // get pointer on VMM 445 vmm_t * vmm = &process->vmm; 446 447 vmm_dmsg("\n[DMSG] %s : enter for process %x / base = %x / size = %x / type = %s\n", 448 __FUNCTION__ , process->pid , base , size , vseg_type_str(type) ); 499 vmm_dmsg("\n[DBG] %s : core[%x,%d] enters / process %x / base %x / size %x / %s / cxy = %x\n", 500 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , 501 process->pid , base , size , vseg_type_str(type) , cxy ); 502 503 // get pointer on VMM 504 vmm_t * vmm = &process->vmm; 449 505 450 506 // compute base, size, vpn_base, vpn_size, depending on vseg type 451 // we use the VMM specific allocators for STACK and MMAPvsegs507 // we use the VMM specific allocators for "stack", "file", "anon", & "remote" vsegs 452 508 if( type == VSEG_TYPE_STACK ) 453 509 { … … 456 512 if( error ) 457 513 { 458 printk("\n[ERROR] in %s : no vspace for stack vseg / process %x in cluster %x\n",459 514 printk("\n[ERROR] in %s : no space for stack vseg / process %x in cluster %x\n", 515 __FUNCTION__ , process->pid , local_cxy ); 460 516 return NULL; 461 517 } … … 498 554 printk("\n[ERROR] in %s for process %x : new vseg [vpn_base = %x / vpn_size = %x]\n" 499 555 " overlap existing vseg [vpn_base = %x / vpn_size = %x]\n", 500 __FUNCTION__ , process->pid, vpn_base, vpn_size, 501 vseg->vpn_base, vseg->vpn_size ); 556 __FUNCTION__ , process->pid, vpn_base, vpn_size, vseg->vpn_base, vseg->vpn_size ); 502 557 return NULL; 503 558 } … … 508 563 { 509 564 printk("\n[ERROR] in %s for process %x : cannot allocate memory for vseg\n", 510 565 __FUNCTION__ , process->pid ); 511 566 return NULL; 512 567 } 513 568 514 569 // initialize vseg descriptor 515 vseg_init( vseg , base, size , vpn_base , vpn_size , type , local_cxy ); 516 517 // update "heap_vseg" in VMM 518 if( type == VSEG_TYPE_HEAP ) process->vmm.heap_vseg = vseg; 570 vseg_init( vseg, 571 type, 572 base, 573 size, 574 vpn_base, 575 vpn_size, 576 file_offset, 577 file_size, 578 mapper_xp, 579 cxy ); 519 580 520 581 // attach vseg to vmm … … 523 584 rwlock_wr_unlock( &vmm->vsegs_lock ); 524 585 525 vmm_dmsg("\n[DMSG] %s : exit for process %x / vseg [%x, %x] registered\n", 526 __FUNCTION__ , process->pid , vseg->min , vseg->max ); 586 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit / process %x / base %x / size %x / type %s\n", 587 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , 588 process->pid , base , size , vseg_type_str(type) ); 527 589 528 590 return vseg; … … 579 641 vseg_free( vseg ); 580 642 } 581 } 643 } // end vmm_remove_vseg() 582 644 583 645 ////////////////////////////////////////////// … … 655 717 } 656 718 657 /////////////////////////////////////////////////////////////////////////////////////// 719 ////////////////////////////////////////////////////////////////////////////////////////// 658 720 // This low-level static function is called by the vmm_get_vseg() and vmm_resize_vseg() 659 721 // functions. It scan the list of registered vsegs to find the unique vseg containing 660 722 // a given virtual address. 661 /////////////////////////////////////////////////////////////////////////////////////// 723 ////////////////////////////////////////////////////////////////////////////////////////// 662 724 // @ vmm : pointer on the process VMM. 663 725 // @ vaddr : virtual address. 664 726 // @ return vseg pointer if success / return NULL if not found. 665 /////////////////////////////////////////////////////////////////////////////////////// 727 ////////////////////////////////////////////////////////////////////////////////////////// 666 728 static vseg_t * vseg_from_vaddr( vmm_t * vmm, 667 729 intptr_t vaddr ) … … 755 817 756 818 // create new vseg 757 new = vmm_create_vseg( process , addr_min , (vseg->max - addr_max) , vseg->type ); 819 new = vmm_create_vseg( process, 820 vseg->type, 821 addr_min, 822 (vseg->max - addr_max), 823 vseg->file_offset, 824 vseg->file_size, 825 vseg->mapper_xp, 826 vseg->cxy ); 827 758 828 if( new == NULL ) error = EINVAL; 759 829 else error = 0; … … 814 884 } // end vmm_get_vseg() 815 885 886 ////////////////////////////////////////////////////////////////////////////////////// 887 // This static function compute the target cluster to allocate a physical page 888 // for a given <vpn> in a given <vseg>, allocates the page (with an RPC if required) 889 // and returns an extended pointer on the allocated page descriptor. 890 // The vseg cannot have the FILE type. 891 ////////////////////////////////////////////////////////////////////////////////////// 892 static xptr_t vmm_page_allocate( vseg_t * vseg, 893 vpn_t vpn ) 894 { 895 // compute target cluster 896 page_t * page_ptr; 897 cxy_t page_cxy; 898 kmem_req_t req; 899 900 uint32_t type = vseg->type; 901 uint32_t flags = vseg->flags; 902 903 assert( ( type != VSEG_TYPE_FILE ) , __FUNCTION__ , "illegal vseg type\n" ); 904 905 if( flags & VSEG_DISTRIB ) // distributed => cxy depends on vpn LSB 906 { 907 uint32_t x_size = LOCAL_CLUSTER->x_size; 908 uint32_t y_size = LOCAL_CLUSTER->y_size; 909 uint32_t y_width = LOCAL_CLUSTER->y_width; 910 uint32_t index = vpn & ((x_size * y_size) - 1); 911 uint32_t x = index / y_size; 912 uint32_t y = index % y_size; 913 page_cxy = (x<<y_width) + y; 914 } 915 else // other cases => cxy specified in vseg 916 { 917 page_cxy = vseg->cxy; 918 } 919 920 // allocate a physical page from target cluster 921 if( page_cxy == local_cxy ) // target cluster is the local cluster 922 { 923 req.type = KMEM_PAGE; 924 req.size = 0; 925 req.flags = AF_NONE; 926 page_ptr = (page_t *)kmem_alloc( &req ); 927 } 928 else // target cluster is not the local cluster 929 { 930 rpc_pmem_get_pages_client( page_cxy , 0 , &page_ptr ); 931 } 932 933 if( page_ptr == NULL ) return XPTR_NULL; 934 else return XPTR( page_cxy , page_ptr ); 935 936 } // end vmm_page_allocate() 937 816 938 //////////////////////////////////////// 817 939 error_t vmm_get_one_ppn( vseg_t * vseg, … … 820 942 { 821 943 error_t error; 822 cxy_t page_cxy; // physical page cluster944 xptr_t page_xp; // extended pointer on physical page descriptor 823 945 page_t * page_ptr; // local pointer on physical page descriptor 824 946 uint32_t index; // missing page index in vseg mapper … … 828 950 index = vpn - vseg->vpn_base; 829 951 830 vmm_dmsg("\n[DMSG] %s : core[%x,%d] enter for vpn = %x / type = %s / index = %d\n",831 952 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter for vpn = %x / type = %s / index = %d\n", 953 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, vseg_type_str(type), index ); 832 954 833 955 // FILE type : get the physical page from the file mapper … … 835 957 { 836 958 // get extended pointer on mapper 837 xptr_t 959 xptr_t mapper_xp = vseg->mapper_xp; 838 960 839 961 assert( (mapper_xp != XPTR_NULL), __FUNCTION__, … … 856 978 if ( page_ptr == NULL ) return EINVAL; 857 979 858 page_ cxy = mapper_cxy;980 page_xp = XPTR( mapper_cxy , page_ptr ); 859 981 } 860 982 861 983 // Other types : allocate a physical page from target cluster, 984 // as defined by vseg type and vpn value 862 985 else 863 986 { 864 uint32_t flags = vseg->flags; 865 866 // get target cluster for physical page 867 if( flags & VSEG_DISTRIB ) // depends on VPN LSB 868 { 869 uint32_t x_size = LOCAL_CLUSTER->x_size; 870 uint32_t y_size = LOCAL_CLUSTER->y_size; 871 page_cxy = vpn & ((x_size * y_size) - 1); 872 } 873 else // defined in vseg descriptor 874 { 875 page_cxy = vseg->cxy; 876 } 877 878 // allocate a physical page in target cluster 879 kmem_req_t req; 880 if( page_cxy == local_cxy ) // target cluster is the local cluster 881 { 882 req.type = KMEM_PAGE; 883 req.size = 0; 884 req.flags = AF_NONE; 885 page_ptr = (page_t *)kmem_alloc( &req ); 886 } 887 else // target cluster is not the local cluster 888 { 889 rpc_pmem_get_pages_client( page_cxy , 0 , &page_ptr ); 890 } 891 892 if( page_ptr == NULL ) return ENOMEM; 987 // allocate physical page 988 page_xp = vmm_page_allocate( vseg , vpn ); 989 990 if( page_xp == XPTR_NULL ) return ENOMEM; 893 991 894 992 // initialise missing page from .elf file mapper for DATA and CODE types … … 912 1010 uint32_t elf_offset = vseg->file_offset + offset; 913 1011 914 vmm_dmsg("\n[DMSG] %s : core[%x,%d] for vpn = %x / elf_offset = %x\n",915 1012 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / elf_offset = %x\n", 1013 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, elf_offset ); 916 1014 917 1015 // compute extended pointer on page base 918 xptr_t base_xp = ppm_page2base( XPTR( page_cxy , page_ptr ));1016 xptr_t base_xp = ppm_page2base( page_xp ); 919 1017 920 1018 // file_size (in .elf mapper) can be smaller than vseg_size (BSS) … … 923 1021 if( file_size < offset ) // missing page fully in BSS 924 1022 { 925 vmm_dmsg("\n[DMSG] %s : core[%x,%d] for vpn = %x / fully in BSS\n",926 927 928 if( page_cxy== local_cxy )1023 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / fully in BSS\n", 1024 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn ); 1025 1026 if( GET_CXY( page_xp ) == local_cxy ) 929 1027 { 930 1028 memset( GET_PTR( base_xp ) , 0 , CONFIG_PPM_PAGE_SIZE ); … … 937 1035 else if( file_size >= (offset + CONFIG_PPM_PAGE_SIZE) ) // fully in mapper 938 1036 { 939 vmm_dmsg("\n[DMSG] %s : core[%x,%d] for vpn = %x / fully in mapper\n", 940 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn ); 1037 1038 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / fully in mapper\n", 1039 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn ); 941 1040 942 1041 if( mapper_cxy == local_cxy ) … … 965 1064 // - (page_size + offset - file_size) bytes from BSS 966 1065 { 967 vmm_dmsg("\n[DMSG] %s : core[%x,%d] for vpn = %x / both mapper & BSS\n" 968 " %d bytes from mapper / %d bytes from BSS\n", 969 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, 970 file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size ); 1066 1067 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / both mapper & BSS\n" 1068 " %d bytes from mapper / %d bytes from BSS\n", 1069 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, 1070 file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size ); 971 1071 972 1072 // initialize mapper part … … 993 1093 994 1094 // initialize BSS part 995 if( page_cxy== local_cxy )1095 if( GET_CXY( page_xp ) == local_cxy ) 996 1096 { 997 1097 memset( GET_PTR( base_xp ) + file_size - offset , 0 , … … 1008 1108 1009 1109 // return ppn 1010 *ppn = ppm_page2ppn( XPTR( page_cxy , page_ptr ));1011 1012 vmm_dmsg("\n[DMSG] %s : core[%x,%d] exit for vpn = %x / ppn = %x\n",1013 1110 *ppn = ppm_page2ppn( page_xp ); 1111 1112 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for vpn = %x / ppn = %x\n", 1113 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , *ppn ); 1014 1114 1015 1115 return 0; … … 1020 1120 error_t vmm_get_pte( process_t * process, 1021 1121 vpn_t vpn, 1022 uint32_t * ret_attr, 1023 ppn_t * ret_ppn ) 1024 { 1025 vseg_t * vseg; // pointer on vseg containing VPN 1026 ppn_t ppn; // physical page number 1027 uint32_t attr; // attributes from GPT entry 1122 bool_t cow, 1123 uint32_t * attr, 1124 ppn_t * ppn ) 1125 { 1126 vseg_t * vseg; // pointer on vseg containing VPN 1127 ppn_t old_ppn; // current PTE_PPN 1128 uint32_t old_attr; // current PTE_ATTR 1129 ppn_t new_ppn; // new PTE_PPN 1130 uint32_t new_attr; // new PTE_ATTR 1131 xptr_t page_xp; // extended pointer on allocated page descriptor 1028 1132 error_t error; 1029 1133 … … 1032 1136 "not called in the reference cluster\n" ); 1033 1137 1034 vmm_dmsg("\n[DMSG] %s : core[%x,%d] enter for vpn = %x in process %x\n",1035 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , process->pid);1138 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter for vpn = %x in process %x / cow = %d\n", 1139 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , process->pid , %d); 1036 1140 1037 1141 // get VMM pointer 1038 1142 vmm_t * vmm = &process->vmm; 1039 1143 1040 // access GPT to get PTE attributes and PPN 1041 hal_gpt_get_pte( &vmm->gpt , vpn , &attr , &ppn ); 1042 1043 // if PTE is unmapped 1044 // 1) get VSEG containing the missing VPN 1045 // 2) get & initialize physical page (depending on vseg type), 1046 // 3) register the PTE in reference GPT 1047 if( (attr & GPT_MAPPED) == 0 ) 1048 { 1049 vmm_dmsg("\n[DMSG] %s : core[%x,%d] page %x unmapped => try to map it\n", 1050 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 1051 1052 // 1. get vseg pointer 1053 error = vmm_get_vseg( process , vpn<<CONFIG_PPM_PAGE_SHIFT , &vseg ); 1144 // get vseg pointer from ref VSL 1145 error = vmm_get_vseg( process , vpn<<CONFIG_PPM_PAGE_SHIFT , &vseg ); 1146 1147 if( error ) 1148 { 1149 printk("\n[ERROR] in %s : out of segment / process = %x / vpn = %x\n", 1150 __FUNCTION__ , process->pid , vpn ); 1151 return error; 1152 } 1153 1154 vmm_dmsg("\n[DBG] %s : core[%x,%d] found vseg %s / vpn_base = %x / vpn_size = %x\n", 1155 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , 1156 vseg_type_str(vseg->type) , vseg->vpn_base , vseg->vpn_size ); 1157 1158 // access GPT to get current PTE attributes and PPN 1159 hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn ); 1160 1161 // for both copy_on_write and page_fault events, we allocate a physical page, 1162 // initialize it, register it in the GPT, and return the new_ppn and new_attr 1163 1164 if( cow ) ////////////// copy_on_write request /////////// 1165 { 1166 assert( (*attr & GPT_MAPPED) , __FUNCTION__ , 1167 "PTE must be mapped for a copy-on-write\n" ); 1168 1169 vmm_dmsg("\n[DBG] %s : core[%x,%d] page %x must be copied => do it\n", 1170 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 1171 1172 // allocate a physical page depending on vseg type 1173 page_xp = vmm_page_allocate( vseg , vpn ); 1174 1175 if( page_xp == XPTR_NULL ) 1176 { 1177 printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", 1178 __FUNCTION__ , process->pid , vpn ); 1179 return ENOMEM; 1180 } 1181 1182 // compute allocated page PPN 1183 new_ppn = ppm_page2ppn( page_xp ); 1184 1185 // copy old page content to new page 1186 xptr_t old_base_xp = ppm_ppn2base( old_ppn ); 1187 xptr_t new_base_xp = ppm_ppn2base( new_ppn ); 1188 memcpy( GET_PTR( new_base_xp ), 1189 GET_PTR( old_base_xp ), 1190 CONFIG_PPM_PAGE_SIZE ); 1191 1192 // update attributes: reset COW and set WRITABLE 1193 new_attr = old_attr & ~GPT_COW; 1194 new_attr = new_attr | GPT_WRITABLE; 1195 1196 // register PTE in GPT 1197 error = hal_gpt_set_pte( &vmm->gpt , vpn , new_ppn , new_attr ); 1054 1198 1055 1199 if( error ) 1056 1200 { 1057 printk("\n[ERROR] in %s : out of segment/ process = %x / vpn = %x\n",1058 1201 printk("\n[ERROR] in %s : cannot update GPT / process = %x / vpn = %x\n", 1202 __FUNCTION__ , process->pid , vpn ); 1059 1203 return error; 1060 1204 } 1061 1062 vmm_dmsg("\n[DMSG] %s : core[%x,%d] found vseg %s / vpn_base = %x / vpn_size = %x\n", 1063 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , 1064 vseg_type_str(vseg->type) , vseg->vpn_base , vseg->vpn_size ); 1065 1066 // 2. get physical page number, depending on vseg type 1067 error = vmm_get_one_ppn( vseg , vpn , &ppn ); 1068 1069 if( error ) 1205 } 1206 else //////////////////// page_fault request /////////// 1207 { 1208 if( (old_attr & GPT_MAPPED) == 0 ) // PTE unmapped in ref GPT 1070 1209 { 1071 printk("\n[ERROR] in %s : cannot allocate memory / process = %x / vpn = %x\n", 1072 __FUNCTION__ , process->pid , vpn ); 1073 return error; 1210 1211 vmm_dmsg("\n[DBG] %s : core[%x,%d] page %x unmapped => try to map it\n", 1212 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 1213 1214 // allocate one physical page, depending on vseg type 1215 error = vmm_get_one_ppn( vseg , vpn , &new_ppn ); 1216 1217 if( error ) 1218 { 1219 printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", 1220 __FUNCTION__ , process->pid , vpn ); 1221 return error; 1222 } 1223 1224 // define attributes from vseg flags 1225 new_attr = GPT_MAPPED | GPT_SMALL; 1226 if( vseg->flags & VSEG_USER ) new_attr |= GPT_USER; 1227 if( vseg->flags & VSEG_WRITE ) new_attr |= GPT_WRITABLE; 1228 if( vseg->flags & VSEG_EXEC ) new_attr |= GPT_EXECUTABLE; 1229 if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE; 1230 1231 // register PTE in GPT 1232 error = hal_gpt_set_pte( &vmm->gpt , vpn , new_ppn , new_attr ); 1233 1234 if( error ) 1235 { 1236 printk("\n[ERROR] in %s : cannot update GPT / process = %x / vpn = %x\n", 1237 __FUNCTION__ , process->pid , vpn ); 1238 return error; 1239 } 1074 1240 } 1075 1076 // 3. define attributes from vseg flags and register in GPT 1077 attr = GPT_MAPPED | GPT_SMALL; 1078 if( vseg->flags & VSEG_USER ) attr |= GPT_USER; 1079 if( vseg->flags & VSEG_WRITE ) attr |= GPT_WRITABLE; 1080 if( vseg->flags & VSEG_EXEC ) attr |= GPT_EXECUTABLE; 1081 if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE; 1082 1083 error = hal_gpt_set_pte( &vmm->gpt , vpn , ppn , attr ); 1084 1085 if( error ) 1241 else 1086 1242 { 1087 printk("\n[ERROR] in %s : cannot register PTE / process = %x / vpn = %x\n", 1088 __FUNCTION__ , process->pid , vpn ); 1089 return error; 1243 new_attr = old_attr; 1244 new_ppn = old_ppn; 1090 1245 } 1091 } // end new PTE1092 1093 vmm_dmsg("\n[DMSG] %s : core[%x,%d] exit for vpn = %x / ppn= %x\n",1094 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , ppn);1095 1096 * ret_ppn =ppn;1097 * ret_attr =attr;1246 } 1247 1248 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for vpn = %x / ppn = %x / attr = %x\n", 1249 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , new_ppn , new_attr ); 1250 1251 *ppn = new_ppn; 1252 *attr = new_attr; 1098 1253 return 0; 1099 1254 … … 1106 1261 uint32_t attr; // missing page attributes 1107 1262 ppn_t ppn; // missing page PPN 1108 error_t error; // return value1263 error_t error; 1109 1264 1110 1265 // get reference process cluster and local pointer … … 1113 1268 1114 1269 // get missing PTE attributes and PPN from reference cluster 1115 if( local_cxy != ref_cxy ) // local cluster is not the reference cluster 1116 { 1117 rpc_vmm_get_pte_client( ref_cxy , ref_ptr , vpn , &attr , &ppn , &error ); 1118 } 1119 else // local cluster is the reference cluster 1120 { 1121 error = vmm_get_pte( process , vpn , &attr , &ppn ); 1270 if( local_cxy != ref_cxy ) 1271 { 1272 rpc_vmm_get_pte_client( ref_cxy, 1273 ref_ptr, 1274 vpn, 1275 false, // page_fault 1276 &attr, 1277 &ppn, 1278 &error ); 1279 1280 // get local VMM pointer 1281 vmm_t * vmm = &process->vmm; 1282 1283 // update local GPT 1284 error |= hal_gpt_set_pte( &vmm->gpt , vpn , ppn , attr ); 1285 } 1286 else // local cluster is the reference cluster 1287 { 1288 error = vmm_get_pte( process, 1289 vpn, 1290 false, // page-fault 1291 &attr, 1292 &ppn ); 1122 1293 } 1123 1294 … … 1126 1297 } // end vmm_handle_page_fault() 1127 1298 1299 /////////////////////////////////////////////// 1300 error_t vmm_copy_on_write( process_t * process, 1301 vpn_t vpn ) 1302 { 1303 uint32_t attr; // missing page attributes 1304 ppn_t ppn; // missing page PPN 1305 error_t error; 1306 1307 // get reference process cluster and local pointer 1308 cxy_t ref_cxy = GET_CXY( process->ref_xp ); 1309 process_t * ref_ptr = (process_t *)GET_PTR( process->ref_xp ); 1310 1311 // get new PTE attributes and PPN from reference cluster 1312 if( local_cxy != ref_cxy ) 1313 { 1314 rpc_vmm_get_pte_client( ref_cxy, 1315 ref_ptr, 1316 vpn, 1317 true, // copy-on-write 1318 &attr, 1319 &ppn, 1320 &error ); 1321 1322 // get local VMM pointer 1323 vmm_t * vmm = &process->vmm; 1324 1325 // update local GPT 1326 error |= hal_gpt_set_pte( &vmm->gpt , vpn , ppn , attr ); 1327 } 1328 else // local cluster is the reference cluster 1329 { 1330 error = vmm_get_pte( process, 1331 vpn, 1332 true, // copy-on-write 1333 &attr, 1334 &ppn ); 1335 } 1336 1337 return error; 1338 1339 } // end vmm_copy_on_write() 1128 1340 1129 1341 /////////////////////////////////////////// … … 1152 1364 if( local_cxy == GET_CXY( process->ref_xp) ) // calling process is reference process 1153 1365 { 1154 error = vmm_get_pte( process, vpn , &attr , &ppn );1366 error = vmm_get_pte( process, vpn , false , &attr , &ppn ); 1155 1367 } 1156 1368 else // calling process is not reference process … … 1158 1370 cxy_t ref_cxy = GET_CXY( process->ref_xp ); 1159 1371 process_t * ref_ptr = (process_t *)GET_PTR( process->ref_xp ); 1160 rpc_vmm_get_pte_client( ref_cxy , ref_ptr , vpn , &attr , &ppn , &error );1372 rpc_vmm_get_pte_client( ref_cxy , ref_ptr , vpn , false , &attr , &ppn , &error ); 1161 1373 } 1162 1374 -
trunk/kernel/mm/vmm.h
r406 r407 40 40 41 41 /********************************************************************************************* 42 * This structure defines the STACK allocator used by the VMM to dynamically allocate43 * STACK vsegs requested or released by theuser process.44 * This allocator handles a fixed size array of fixed size slots storesin the STACK zone.42 * This structure defines the STACK allocator used by the VMM to dynamically handle 43 * a STACK vseg requested or released by an user process. 44 * This allocator handles a fixed size array of fixed size slots in the STACK zone. 45 45 * The stack size and the number of slots are defined by the CONFIG_VMM_STACK_SIZE, and 46 * CONFIG_ THREAD46 * CONFIG_VMM_STACK_BASE parameters. 47 47 * Each slot can contain one user stack vseg. The first page in the slot is not allocated 48 48 * to detect stack overflow. … … 50 50 * All allocation / release operations are registered in the stack_bitmap, that completely 51 51 * define the STACK zone state. 52 * In this implementation, the max number of slots is 32.53 52 ********************************************************************************************/ 54 53 … … 62 61 63 62 /********************************************************************************************* 64 * This structure defines the MMAP allocator used by the VMM to dynamically allocate63 * This structure defines the MMAP allocator used by the VMM to dynamically handle 65 64 * MMAP vsegs requested or released by an user process. 66 65 * This allocator should be only used in the reference cluster. … … 92 91 * This local VMM provides three main services: 93 92 * 1) It registers all vsegs statically or dynamically defined in the vseg list. 94 * 2) It allocates virtual memory space for the STACKS and MMAP vsegs .93 * 2) It allocates virtual memory space for the STACKS and MMAP vsegs (FILE/ANON/REMOTE). 95 94 * 3) It contains the local copy of the generic page table descriptor. 96 95 ********************************************************************************************/ … … 98 97 typedef struct vmm_s 99 98 { 100 rwlock_t vsegs_lock; /*! lock protecting the vsegs list & radix tree*/99 rwlock_t vsegs_lock; /*! lock protecting the vsegs list */ 101 100 list_entry_t vsegs_root; /*! all vsegs in same process and same cluster */ 102 101 uint32_t vsegs_nr; /*! total number of local vsegs */ … … 107 106 mmap_mgr_t mmap_mgr; /*! embedded MMAP vsegs allocator */ 108 107 109 uint32_t pgfault_nr; /*! page fault counter 108 uint32_t pgfault_nr; /*! page fault counter (instrumentation) */ 110 109 uint32_t u_err_nr; /*! TODO ??? [AG] */ 111 110 uint32_t m_err_nr; /*! TODO ??? [AG] */ … … 119 118 120 119 intptr_t entry_point; /*! main thread entry point */ 121 122 vseg_t * heap_vseg; /*! pointer on local heap vseg descriptor */123 120 } 124 121 vmm_t; 125 122 126 123 /********************************************************************************************* 127 * This structure is used to store the arguments of the mmap() system call.128 ********************************************************************************************/129 130 typedef struct mmap_attr_s131 {132 void * addr; /*! requested virtual address (unused : should be NULL) */133 uint32_t length; /*! requested vseg size (bytes) */134 uint32_t prot; /*! access modes */135 uint32_t flags; /*! only MAP_FILE / MAP_ANON / MAP_PRIVATE / MAP_SHARED */136 fdid_t fdid; /*! file descriptor index (if MAP_FILE is set) */137 int32_t offset; /*! file offset (if MAP_FILE is set) */138 }139 mmap_attr_t;140 141 /*********************************************************************************************142 124 * This function initialises the virtual memory manager attached to an user process. 143 * - It registers the "kentry", "args", "envs" and "heap" vsegs in the vsegs list. 144 * The "code" and "data" vsegs are registered by the elf_load_process() function, 145 * the "stack" vsegs are registered by the thread_user_create() function, and the 146 * "mmap" vsegs are dynamically created by syscalls. 125 * - It initializes the STACK and MMAP allocators. 126 * - It registers the "kentry", "args", "envs" vsegs in the VSL. 127 * - The "code" and "data" vsegs are registered by the elf_load_process() function. 128 * - The "stack" vsegs are dynamically created by the thread_user_create() function. 129 * - The "file", "anon", "remote" vsegs are dynamically created by the mmap() syscalls. 147 130 * - It initializes the generic page table, calling the HAL specific hal_gpt_init() function. 148 * For TSAR it map all pages for the "kentry" vseg, that must be identity mapping. 149 * - It initializes the STAK and MMAP allocators. 131 * - For TSAR it map all pages for the "kentry" vseg, that must be identity mapping. 150 132 * TODO : Any error in this function gives a kernel panic => improve error handling. 151 133 ********************************************************************************************* … … 155 137 156 138 /********************************************************************************************* 157 * This function copies the content of a source VMM to a destination VMM. 139 * This function displays on TXY0 the list or registered vsegs for a given <process>. 140 * If the <mapping> argument is true, it displays for each vesg all mapped PTEs in GPT. 141 ********************************************************************************************* 142 * @ process : pointer on process descriptor. 143 * @ mapping : detailed mapping if true. 144 ********************************************************************************************/ 145 void vmm_display( struct process_s * process, 146 bool_t mapping ); 147 148 /********************************************************************************************* 149 * This function is called by the sys_fork() system call. 150 * It copies the content of a parent process descriptor VMM to a child process VMM. 151 * - All vsegs registered in the source VSL are copied in the destination VSL. 152 * - All PTEs registered in the source GPT are copied in destination GPT. For all writable 153 * PTEs - but the FILE vsegs - the WRITABLE flag is reset and the COW flag is set in 154 * the destination GPT. 158 155 ********************************************************************************************* 159 156 * @ dst_process : pointer on destination process descriptor. … … 187 184 /********************************************************************************************* 188 185 * This function allocates memory for a vseg descriptor, initialises it, and register it 189 * in the VMM of the process. It checks the collision with pre-existing vsegs in VMM. 190 * For STACK and MMAP types vseg, it does not use the base argument, but uses the VMM STACK 191 * and MMAP specific allocators to get a base address in virtual space. 192 * To comply with the "on-demand" paging policy, this function does NOT modify the 193 * page table, and does not allocate physical memory for vseg data. 194 ********************************************************************************************* 195 * @ vmm : pointer on process descriptor. 196 * @ base : vseg base address 197 * @ size : vseg size (bytes) 198 * @ type : vseg type 199 * @ returns pointer on vseg if success / returns NULL if no memory or conflict. 186 * in the VMM of the local process descriptor, that should be the reference process. 187 * For the 'stack", "file", "anon", & "remote" types, it does not use the <base> argument, 188 * but uses the STACK and MMAP virtual memory allocators. 189 * It checks collision with all pre-existing vsegs. 190 * To comply with the "on-demand" paging policy, this function does NOT modify the page table, 191 * and does not allocate physical memory for vseg data. 192 * It should be called by a local thread (could be a RPC thread if the client thread is not 193 * running in the regerence cluster). 194 ********************************************************************************************* 195 * @ process : pointer on local processor descriptor. 196 * @ type : vseg type. 197 * @ base : vseg base address (not used for dynamically allocated vsegs). 198 * @ size : vseg size (bytes). 199 * @ file_offset : offset in file for CODE, DATA, FILE types. 200 * @ file_size : can be smaller than "size" for DATA type. 201 * @ mapper_xp : extended pointer on mapper for CODE, DATA, FILE types. 202 * @ cxy : physical mapping cluster (for non distributed vsegs). 203 * @ returns pointer on vseg if success / returns NULL if no memory, or conflict. 200 204 ********************************************************************************************/ 201 205 vseg_t * vmm_create_vseg( struct process_s * process, 206 vseg_type_t type, 202 207 intptr_t base, 203 intptr_t size, 204 uint32_t type ); 208 uint32_t size, 209 uint32_t file_offset, 210 uint32_t file_size, 211 xptr_t mapper_xp, 212 cxy_t cxy ); 205 213 206 214 /********************************************************************************************* … … 245 253 /********************************************************************************************* 246 254 * This function removes a given region (defined by a base address and a size) from 247 * the VMM of a given process descriptor. This can modify severalvsegs:255 * the VMM of a given process descriptor. This can modify the number of vsegs: 248 256 * (a) if the region is not entirely mapped in an existing vseg, it's an error. 249 257 * (b) if the region has same base and size as an existing vseg, the vseg is removed. … … 251 259 * (d) if the removed region cut the vseg in three parts, it is modified, and a new 252 260 * vseg is created with same type. 261 * FIXME [AG] this function must be called by a thread running in the reference cluster, 262 * and the VMM must be updated in all process descriptors copies. 253 263 ********************************************************************************************* 254 264 * @ process : pointer on process descriptor … … 279 289 280 290 /********************************************************************************************* 281 * This function is called by the generic exception handler when a page fault291 * This function is called by the generic exception handler when a page-fault event 282 292 * has been detected in a given cluster. 293 * - If the local cluster is the reference, it call directly the vmm_get_pte() function. 283 294 * - If the local cluster is not the reference cluster, it send a RPC_VMM_GET_PTE 284 * to the reference cluster to get the missing PTE attributes and PPN, and update 285 * the local page table. 286 * - If the local cluster is the reference, it call directly the vmm_get_pte() function. 295 * to the reference cluster to get the missing PTE attributes and PPN, 296 * and update the local page table. 287 297 ********************************************************************************************* 288 298 * @ process : pointer on process descriptor. … … 294 304 295 305 /********************************************************************************************* 296 * This function returns in the "attr" and "ppn" arguments the PTE associated to a given 297 * VPN for a given process. This function must be called by a thread running in the 298 * reference cluster. To get the PTE from another cluster, use the RPC_VMM_GET_PTE. 299 * The vseg containing the searched VPN should be registered in the reference VMM. 300 * If the PTE in the reference page table is unmapped, this function allocates the missing 301 * physical page from the target cluster defined by the vseg type, initialize it, 302 * and update the reference page table. It calls the RPC_PMEM_GET_PAGES to get and 303 * initialize the missing physical page, if the target cluster is not the reference cluster. 306 * This function is called by the generic exception handler when a copy-on-write event 307 * has been detected in a given cluster. 308 * - If the local cluster is the reference, it call directly the vmm_get_pte() function. 309 * - If the local cluster is not the reference cluster, it send a RPC_VMM_GET_PTE 310 * to the reference cluster to get the missing PTE attributes and PPN, 311 * and update the local page table. 312 ********************************************************************************************* 313 * @ process : pointer on process descriptor. 314 * @ vpn : VPN of the missing PTE. 315 * @ returns 0 if success / returns ENOMEM if no memory. 316 ********************************************************************************************/ 317 error_t vmm_copy_on_write( struct process_s * process, 318 vpn_t vpn ); 319 320 /********************************************************************************************* 321 * This function is called when a new PTE (GPT entry) is required because a "page-fault", 322 * or "copy-on_write" event has been detected for a given <vpn> in a given <process>. 323 * The <cow> argument defines the type of event to be handled. 324 * This function must be called by a thread running in reference cluster, and the vseg 325 * containing the searched VPN should be registered in the reference VMM. 326 * - for an actual page-fault, it allocates the missing physical page from the target cluster 327 * defined by the vseg type, initialize it, and update the reference page table. 328 * - for a copy-on-write, it allocates a new physical page from the target cluster, 329 * initialise it from the old physical page, and update the reference page table. 330 * In both cases, it calls the RPC_PMEM_GET_PAGES to get the new physical page if the 331 * target cluster is not the reference cluster. 332 * It returns in the <attr> and <ppn> arguments the accessed or modified PTE. 304 333 ********************************************************************************************* 305 334 * @ process : [in] pointer on process descriptor. 306 335 * @ vpn : [in] VPN defining the missing PTE. 336 * @ cow : [in] "copy_on_write" if true / "page_fault" if false. 307 337 * @ attr : [out] PTE attributes. 308 338 * @ ppn : [out] PTE ppn. … … 311 341 error_t vmm_get_pte( struct process_s * process, 312 342 vpn_t vpn, 343 bool_t cow, 313 344 uint32_t * attr, 314 345 ppn_t * ppn ); -
trunk/kernel/mm/vseg.c
r406 r407 52 52 if ( vseg_type == VSEG_TYPE_CODE ) return "CODE"; 53 53 else if( vseg_type == VSEG_TYPE_DATA ) return "DATA"; 54 else if( vseg_type == VSEG_TYPE_HEAP ) return "HEAP"; 55 else if( vseg_type == VSEG_TYPE_STACK ) return "STACK"; 54 else if( vseg_type == VSEG_TYPE_STACK ) return "STAK"; 56 55 else if( vseg_type == VSEG_TYPE_ANON ) return "ANON"; 57 56 else if( vseg_type == VSEG_TYPE_FILE ) return "FILE"; 58 else if( vseg_type == VSEG_TYPE_REMOTE ) return "REMOTE"; 59 else if( vseg_type == VSEG_TYPE_KCODE ) return "KCODE"; 60 else if( vseg_type == VSEG_TYPE_KDATA ) return "KDATA"; 61 else if( vseg_type == VSEG_TYPE_KDEV ) return "KDEV"; 57 else if( vseg_type == VSEG_TYPE_REMOTE ) return "REMO"; 62 58 else return "undefined"; 63 59 } … … 87 83 /////////////////////////////////// 88 84 void vseg_init( vseg_t * vseg, 85 vseg_type_t type, 89 86 intptr_t base, 90 intptr_t size,87 uint32_t size, 91 88 vpn_t vpn_base, 92 89 vpn_t vpn_size, 93 uint32_t type, 90 uint32_t file_offset, 91 uint32_t file_size, 92 xptr_t mapper_xp, 94 93 cxy_t cxy ) 95 94 { 96 vseg->type = type; 97 vseg->min = base; 98 vseg->max = base + size; 99 vseg->vpn_base = vpn_base; 100 vseg->vpn_size = vpn_size; 101 vseg->mapper_xp = XPTR_NULL; 102 vseg->cxy = cxy; 95 vseg->type = type; 96 vseg->min = base; 97 vseg->max = base + size; 98 vseg->vpn_base = vpn_base; 99 vseg->vpn_size = vpn_size; 100 vseg->file_offset = file_offset; 101 vseg->file_size = file_size; 102 vseg->mapper_xp = mapper_xp; 103 vseg->cxy = cxy; 103 104 104 105 // set vseg flags depending on type … … 124 125 VSEG_DISTRIB ; 125 126 } 126 else if( type == VSEG_TYPE_HEAP )127 {128 vseg->flags = VSEG_USER |129 VSEG_WRITE |130 VSEG_CACHE |131 VSEG_DISTRIB ;132 }133 127 else if( type == VSEG_TYPE_REMOTE ) 134 128 { … … 141 135 vseg->flags = VSEG_USER | 142 136 VSEG_WRITE | 143 VSEG_CACHE | 144 VSEG_DISTRIB ; 137 VSEG_CACHE; 145 138 } 146 139 else if( type == VSEG_TYPE_FILE ) … … 161 154 VSEG_CACHE | 162 155 VSEG_PRIVATE ; 156 } 157 else if( type == VSEG_TYPE_KDEV ) 158 { 159 vseg->flags = VSEG_WRITE ; 163 160 } 164 161 else … … 171 168 ////////////////////////////////////////// 172 169 void vseg_init_from_ref( vseg_t * vseg, 173 xptr_t ref )170 xptr_t ref_xp ) 174 171 { 175 172 // get remote vseg cluster and pointer 176 cxy_t cxy = (cxy_t )GET_CXY( ref );177 vseg_t * ptr = (vseg_t *)GET_PTR( ref );173 cxy_t cxy = (cxy_t )GET_CXY( ref_xp ); 174 vseg_t * ptr = (vseg_t *)GET_PTR( ref_xp ); 178 175 179 176 // initialize vseg with remote_read access 180 vseg->type = hal_remote_lw ( XPTR( cxy , &ptr->type ) ); 181 vseg->min = (intptr_t)hal_remote_lpt( XPTR( cxy , &ptr->min ) ); 182 vseg->max = (intptr_t)hal_remote_lpt( XPTR( cxy , &ptr->max ) ); 183 vseg->vpn_base = hal_remote_lw ( XPTR( cxy , &ptr->vpn_base ) ); 184 vseg->vpn_size = hal_remote_lw ( XPTR( cxy , &ptr->vpn_size ) ); 185 vseg->flags = hal_remote_lw ( XPTR( cxy , &ptr->flags ) ); 186 vseg->mapper_xp = (xptr_t) hal_remote_lwd( XPTR( cxy , &ptr->mapper_xp ) ); 177 vseg->type = hal_remote_lw ( XPTR( cxy , &ptr->type ) ); 178 vseg->min = (intptr_t)hal_remote_lpt( XPTR( cxy , &ptr->min ) ); 179 vseg->max = (intptr_t)hal_remote_lpt( XPTR( cxy , &ptr->max ) ); 180 vseg->vpn_base = hal_remote_lw ( XPTR( cxy , &ptr->vpn_base ) ); 181 vseg->vpn_size = hal_remote_lw ( XPTR( cxy , &ptr->vpn_size ) ); 182 vseg->flags = hal_remote_lw ( XPTR( cxy , &ptr->flags ) ); 183 vseg->file_offset = hal_remote_lw ( XPTR( cxy , &ptr->file_offset ) ); 184 vseg->file_size = hal_remote_lw ( XPTR( cxy , &ptr->file_size ) ); 185 vseg->mapper_xp = (xptr_t) hal_remote_lwd( XPTR( cxy , &ptr->mapper_xp ) ); 187 186 } 188 187 -
trunk/kernel/mm/vseg.h
r406 r407 36 36 37 37 /********************************************************************************************** 38 * This enum defines the vseg types 38 * This enum defines the vseg types for an user process. 39 39 *********************************************************************************************/ 40 40 41 enum41 typedef enum 42 42 { 43 VSEG_TYPE_CODE = 0, /*! executable code / private / localized */ 44 VSEG_TYPE_DATA = 1, /*! initialized data / public / distributed */ 45 VSEG_TYPE_HEAP = 2, /*! standard malloc / public / distributed */ 46 VSEG_TYPE_STACK = 3, /*! execution stack / private / localized */ 47 VSEG_TYPE_ANON = 4, /*! anonymous mmap / public / localized */ 48 VSEG_TYPE_FILE = 5, /*! file mmap / public / localized */ 49 VSEG_TYPE_REMOTE = 6, /*! remote mmap / public / localized */ 50 VSEG_TYPE_KCODE = 7, /*! kernel code / private / localized */ 51 VSEG_TYPE_KDATA = 8, /*! kernel data / private / localized */ 52 VSEG_TYPE_KDEV = 9, /*! device segment / public / localized */ 43 VSEG_TYPE_CODE = 0, /*! executable user code / private / localized */ 44 VSEG_TYPE_DATA = 1, /*! initialized user data / public / distributed */ 45 VSEG_TYPE_STACK = 2, /*! execution user stack / private / localized */ 46 VSEG_TYPE_ANON = 3, /*! anonymous mmap / public / localized */ 47 VSEG_TYPE_FILE = 4, /*! file mmap / public / localized */ 48 VSEG_TYPE_REMOTE = 5, /*! remote mmap / public / localized */ 53 49 54 VSEG_TYPES_NR = 10, 55 }; 50 VSEG_TYPE_KDATA = 10, 51 VSEG_TYPE_KCODE = 11, 52 VSEG_TYPE_KDEV = 12, 53 } 54 vseg_type_t; 56 55 57 56 … … 81 80 vpn_t vpn_size; /*! number of pages occupied */ 82 81 uint32_t flags; /*! vseg attributes */ 83 xptr_t mapper_xp; /*! xptr on remote mapper (for types CODE / DATA / FILE)*/84 intptr_t file_offset; /*! vseg offset in file (for types CODE/DATA )*/82 xptr_t mapper_xp; /*! xptr on remote mapper (for types CODE/DATA/FILE) */ 83 intptr_t file_offset; /*! vseg offset in file (for types CODE/DATA/FILE */ 85 84 intptr_t file_size; /*! max segment size in mapper (for type CODE/DATA) */ 86 85 cxy_t cxy; /*! physical mapping (for non distributed vseg) */ … … 125 124 *********************************************************************************************/ 126 125 void vseg_init( vseg_t * vseg, 126 vseg_type_t type, 127 127 intptr_t base, 128 intptr_t size,128 uint32_t size, 129 129 vpn_t vpn_base, 130 130 vpn_t vpn_size, 131 uint32_t type, 131 uint32_t file_offset, 132 uint32_t file_size, 133 xptr_t mapper_xp, 132 134 cxy_t cxy ); 133 135
Note: See TracChangeset
for help on using the changeset viewer.