Changeset 433 for trunk/kernel/mm
- Timestamp:
- Feb 14, 2018, 3:40:19 PM (7 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/kcm.c
r407 r433 47 47 kcm_page_t * kcm_page ) 48 48 { 49 kcm_dmsg("\n[DBG] %s : enters for %s / page %x / count = %d / active = %d\n", 50 __FUNCTION__ , kmem_type_str( kcm->type ) , 51 (intptr_t)kcm_page , kcm_page->count , kcm_page->active ); 49 50 #if CONFIG_DEBUG_KCM_ALLOC 51 uint32_t cycle = (uint32_t)hal_get_cycles(); 52 if( CONFIG_DEBUG_KCM_ALLOC < cycle ) 53 printk("\n[DBG] %s : thread %x enters for %s / page %x / count %d / active %d\n", 54 __FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) , 55 (intptr_t)kcm_page , kcm_page->count , kcm_page->active ); 56 #endif 52 57 53 58 assert( kcm_page->active , __FUNCTION__ , "kcm_page should be active" ); … … 80 85 + (index * kcm->block_size) ); 81 86 82 kcm_dmsg("\n[DBG] %s : allocated one block %s / ptr = %p / page = %x / count = %d\n", 83 __FUNCTION__ , kmem_type_str( kcm->type ) , ptr , 84 (intptr_t)kcm_page , kcm_page->count ); 87 #if CONFIG_DEBUG_KCM_ALLOC 88 cycle = (uint32_t)hal_get_cycles(); 89 if( CONFIG_DEBUG_KCM_ALLOC < cycle ) 90 printk("\n[DBG] %s : thread %x exit / type %s / ptr %p / page %x / count %d\n", 91 __FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) , ptr , 92 (intptr_t)kcm_page , kcm_page->count ); 93 #endif 85 94 86 95 return ptr; … … 300 309 kcm->active_pages_nr ++; 301 310 kcm_page->active = 1; 302 303 kcm_dmsg("\n[DBG] %s : enters for type %s at cycle %d / new page = %x / count = %d\n",304 __FUNCTION__ , kmem_type_str( kcm->type ) , hal_get_cycles() ,305 (intptr_t)kcm_page , kcm_page->count );306 307 311 } 308 312 else // get first page from active list … … 310 314 // get page pointer from active list 311 315 kcm_page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list ); 312 313 kcm_dmsg("\n[DBG] %s : enters for type %s at cycle %d / page = %x / count = %d\n",314 __FUNCTION__ , kmem_type_str( kcm->type ) , hal_get_cycles() ,315 (intptr_t)kcm_page , kcm_page->count );316 316 } 317 317 -
trunk/kernel/mm/kmem.c
r429 r433 198 198 if( type == KMEM_PAGE ) // PPM allocator 199 199 { 200 201 #if CONFIG_DEBUG_KMEM_ALLOC 202 if( CONFIG_DEBUG_KMEM_ALLOC < (uint32_t)hal_get_cycles() ) 203 printk("\n[DBG] in %s : thread %x enter for %d page(s)\n", 204 __FUNCTION__ , CURRENT_THREAD , 1<<size ); 205 #endif 206 200 207 // allocate the number of requested pages 201 208 ptr = (void *)ppm_alloc_pages( size ); … … 213 220 __FUNCTION__, local_cxy , kmem_type_str( type ) , 214 221 (intptr_t)ptr , (intptr_t)ppm_page2base( ptr ) ); 222 223 #if CONFIG_DEBUG_KMEM_ALLOC 224 if( CONFIG_DEBUG_KMEM_ALLOC < (uint32_t)hal_get_cycles() ) 225 printk("\n[DBG] in %s : thread %x exit / %d page(s) allocated / ppn = %x\n", 226 __FUNCTION__ , CURRENT_THREAD , 1<<size , ppm_page2ppn( XPTR( local_cxy , ptr ) ) ); 227 #endif 228 215 229 } 216 230 else if( type == KMEM_GENERIC ) // KHM allocator -
trunk/kernel/mm/page.c
r408 r433 47 47 page->index = 0; 48 48 page->refcount = 0; 49 page->fork _nr= 0;49 page->forks = 0; 50 50 51 51 spinlock_init( &page->lock ); -
trunk/kernel/mm/page.h
r408 r433 56 56 * This structure defines a physical page descriptor. 57 57 * Size is 64 bytes for a 32 bits core... 58 * TODO : the list of waiting threads seems to be unused [AG] 59 $ TODO : the spinlock use has to be clarified [AG] 58 60 ************************************************************************************/ 59 61 … … 67 69 xlist_entry_t wait_root; /*! root of list of waiting threads (16) */ 68 70 uint32_t refcount; /*! reference counter (4) */ 69 uint32_t fork _nr;/*! number of pending forks (4) */70 spinlock_t lock; /*! only used to set the PG_LOCKED flag(16) */71 uint32_t forks; /*! number of pending forks (4) */ 72 spinlock_t lock; /*! To Be Defined [AG] (16) */ 71 73 } 72 74 page_t; -
trunk/kernel/mm/ppm.c
r407 r433 193 193 list_add_first( &ppm->free_pages_root[current_order] , ¤t->list ); 194 194 ppm->free_pages_nr[current_order] ++; 195 } 195 196 } // end ppm_free_pages_nolock() 196 197 197 198 //////////////////////////////////////////// … … 201 202 page_t * remaining_block; 202 203 uint32_t current_size; 204 205 #if CONFIG_DEBUG_PPM_ALLOC_PAGES 206 uint32_t cycle = (uint32_t)hal_get_cycles(); 207 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle ) 208 printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n", 209 __FUNCTION__ , CURRENT_THREAD , 1<<order, cycle ); 210 #endif 211 212 #if(CONFIG_DEBUG_PPM_ALLOC_PAGES & 0x1) 213 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle ) 214 ppm_print(); 215 #endif 203 216 204 217 ppm_t * ppm = &LOCAL_CLUSTER->ppm; … … 208 221 209 222 page_t * block = NULL; 210 211 ppm_dmsg("\n[DBG] %s : enters / order = %d\n",212 __FUNCTION__ , order );213 223 214 224 // take lock protecting free lists … … 231 241 spinlock_unlock( &ppm->free_lock ); 232 242 243 #if CONFIG_DEBUG_PPM_ALLOC_PAGES 244 cycle = (uint32_t)hal_get_cycles(); 245 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle ) 246 printk("\n[DBG] in %s : thread %x cannot allocate %d page(s) at cycle %d\n", 247 __FUNCTION__ , CURRENT_THREAD , 1<<order, cycle ); 248 #endif 249 233 250 return NULL; 234 251 } … … 260 277 spinlock_unlock( &ppm->free_lock ); 261 278 262 ppm_dmsg("\n[DBG] %s : base = %x / order = %d\n", 263 __FUNCTION__ , (uint32_t)ppm_page2base( block ) , order ); 279 #if CONFIG_DEBUG_PPM_ALLOC_PAGES 280 cycle = (uint32_t)hal_get_cycles(); 281 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle ) 282 printk("\n[DBG] in %s : thread %x exit / %d page(s) allocated / ppn = %x / cycle %d\n", 283 __FUNCTION__, CURRENT_THREAD, 1<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle ); 284 #endif 264 285 265 286 return block; 266 } 287 288 } // end ppm_alloc_pages() 267 289 268 290 … … 272 294 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 273 295 296 #if CONFIG_DEBUG_PPM_FREE_PAGES 297 uint32_t cycle = (uint32_t)hal_get_cycles(); 298 if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle ) 299 printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n", 300 __FUNCTION__ , CURRENT_THREAD , 1<<page->order , cycle ); 301 #endif 302 303 #if(CONFIG_DEBUG_PPM_FREE_PAGES & 0x1) 304 if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle ) 305 ppm_print(); 306 #endif 307 274 308 // get lock protecting free_pages[] array 275 309 spinlock_lock( &ppm->free_lock ); … … 279 313 // release lock protecting free_pages[] array 280 314 spinlock_unlock( &ppm->free_lock ); 315 316 #if CONFIG_DEBUG_PPM_FREE_PAGES 317 cycle = (uint32_t)hal_get_cycles(); 318 if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle ) 319 printk("\n[DBG] in %s : thread %x exit / %d page(s) released / ppn = %x / cycle %d\n", 320 __FUNCTION__, CURRENT_THREAD, 1<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle ); 321 #endif 322 281 323 } 282 324 283 //////////////////////////// 284 void ppm_print( ppm_t * ppm, 285 char * string ) 325 //////////////// 326 void ppm_print() 286 327 { 287 328 uint32_t order; … … 289 330 page_t * page; 290 331 332 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 333 291 334 // get lock protecting free lists 292 335 spinlock_lock( &ppm->free_lock ); 293 336 294 printk("\n*** PPM in cluster %x : %d pages / &pages_tbl = %x / vaddr_base = %x ***\n", 295 local_cxy , ppm->pages_nr , (intptr_t)ppm->pages_tbl , (intptr_t)ppm->vaddr_base ); 337 printk("\n*** PPM in cluster %x : %d pages ***\n", local_cxy , ppm->pages_nr ); 296 338 297 339 for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ ) 298 340 { 299 printk("- order = %d / free_pages = %d [",341 printk("- order = %d / free_pages = %d\t: ", 300 342 order , ppm->free_pages_nr[order] ); 301 343 … … 303 345 { 304 346 page = LIST_ELEMENT( iter , page_t , list ); 305 printk("% d," , page - ppm->pages_tbl );347 printk("%x," , page - ppm->pages_tbl ); 306 348 } 307 349 308 printk(" ]\n", NULL);350 printk("\n"); 309 351 } 310 352 -
trunk/kernel/mm/ppm.h
r409 r433 52 52 * from the "kernel_heap" section. 53 53 * This low-level allocator implements the buddy algorithm: an allocated block is 54 * an integer number n of 4 Kbytespages, and n (called order) is a power of 2.54 * an integer number n of 4 small pages, and n (called order) is a power of 2. 55 55 ****************************************************************************************/ 56 56 … … 163 163 164 164 /***************************************************************************************** 165 * This function prints the PPM allocator status. 166 ***************************************************************************************** 167 * @ ppm : pointer on PPM allocator. 168 * @ string : define context of display. 165 * This function prints the PPM allocator status in the calling thread cluster. 169 166 ****************************************************************************************/ 170 void ppm_print( ppm_t * ppm, 171 char * string ); 167 void ppm_print(); 172 168 173 169 /***************************************************************************************** -
trunk/kernel/mm/vmm.c
r429 r433 63 63 intptr_t size; 64 64 65 vmm_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x\n", 66 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid ); 65 #if CONFIG_DEBUG_VMM_INIT 66 uint32_t cycle = (uint32_t)hal_get_cycles(); 67 if( CONFIG_DEBUG_VMM_INIT ) 68 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 69 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); 70 #endif 67 71 68 72 // get pointer on VMM … … 179 183 hal_fence(); 180 184 181 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x / entry_point = %x\n", 182 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , 183 process->pid , process->vmm.entry_point ); 185 #if CONFIG_DEBUG_VMM_INIT 186 cycle = (uint32_t)hal_get_cycles(); 187 if( CONFIG_DEBUG_VMM_INIT ) 188 printk("\n[DBG] %s : thread %x exit for process %x / entry_point = %x / cycle %d\n", 189 __FUNCTION__ , CURRENT_THREAD , process->pid , process->vmm.entry_point , cycle ); 190 #endif 184 191 185 192 return 0; … … 211 218 { 212 219 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 213 vseg = (vseg_t *)GET_PTR( vseg_xp );220 vseg = GET_PTR( vseg_xp ); 214 221 215 222 printk(" - %s : base = %X / size = %X / npages = %d\n", … … 239 246 } // vmm_display() 240 247 241 /////////////////////i//////////////////// 242 void vmm_ update_pte( process_t * process,243 vpn_t vpn,244 uint32_t attr,245 ppn_t ppn )248 /////////////////////i////////////////////////// 249 void vmm_global_update_pte( process_t * process, 250 vpn_t vpn, 251 uint32_t attr, 252 ppn_t ppn ) 246 253 { 247 254 … … 258 265 cxy_t owner_cxy; 259 266 lpid_t owner_lpid; 267 268 #if CONFIG_DEBUG_VMM_UPDATE_PTE 269 uint32_t cycle = (uint32_t)hal_get_cycles(); 270 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle ) 271 printk("\n[DBG] %s : thread %x enter for process %x / vpn %x / cycle %d\n", 272 __FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle ); 273 #endif 274 275 // check cluster is reference 276 assert( (GET_CXY( process->ref_xp ) == local_cxy) , __FUNCTION__, 277 "not called in reference cluster\n"); 260 278 261 279 // get extended pointer on root of process copies xlist in owner cluster … … 271 289 // get cluster and local pointer on remote process 272 290 remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); 273 remote_process_ptr = (process_t *)GET_PTR( remote_process_xp );291 remote_process_ptr = GET_PTR( remote_process_xp ); 274 292 remote_process_cxy = GET_CXY( remote_process_xp ); 293 294 #if (CONFIG_DEBUG_VMM_UPDATE_PTE & 0x1) 295 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle ) 296 printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n", 297 __FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy ); 298 #endif 275 299 276 300 // get extended pointer on remote gpt 277 301 remote_gpt_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.gpt ); 278 302 279 hal_gpt_update_pte( remote_gpt_xp, 280 vpn, 281 attr, 282 ppn ); 303 // update remote GPT 304 hal_gpt_update_pte( remote_gpt_xp, vpn, attr, ppn ); 283 305 } 284 } // end vmm_update_pte() 306 307 #if CONFIG_DEBUG_VMM_UPDATE_PTE 308 cycle = (uint32_t)hal_get_cycles(); 309 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle ) 310 printk("\n[DBG] %s : thread %x exit for process %x / vpn %x / cycle %d\n", 311 __FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle ); 312 #endif 313 314 } // end vmm_global_update_pte() 285 315 286 316 /////////////////////////////////////// … … 308 338 lpid_t owner_lpid; 309 339 310 vmm_dmsg("\n[DBG] %s : core[%x,%d] enters for process %x\n", 311 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid ); 340 #if CONFIG_DEBUG_VMM_SET_COW 341 uint32_t cycle = (uint32_t)hal_get_cycles(); 342 if( CONFIG_DEBUG_VMM_SET_COW < cycle ) 343 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 344 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); 345 #endif 312 346 313 347 // check cluster is reference … … 333 367 // get cluster and local pointer on remote process 334 368 remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); 335 remote_process_ptr = (process_t *)GET_PTR( remote_process_xp );369 remote_process_ptr = GET_PTR( remote_process_xp ); 336 370 remote_process_cxy = GET_CXY( remote_process_xp ); 337 371 338 vmm_dmsg("\n[DBG] %s : core[%x,%d] handling process %x in cluster %x\n", 339 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid , remote_process_cxy ); 372 #if (CONFIG_DEBUG_VMM_SET_COW &0x1) 373 if( CONFIG_DEBUG_VMM_SET_COW < cycle ) 374 printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n", 375 __FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy ); 376 #endif 340 377 341 378 // get extended pointer on remote gpt … … 347 384 // get pointer on vseg 348 385 vseg_xp = XLIST_ELEMENT( vseg_iter_xp , vseg_t , xlist ); 349 vseg = (vseg_t *)GET_PTR( vseg_xp );386 vseg = GET_PTR( vseg_xp ); 350 387 351 388 assert( (GET_CXY( vseg_xp ) == local_cxy) , __FUNCTION__, … … 357 394 vpn_t vpn_size = vseg->vpn_size; 358 395 359 vmm_dmsg("\n[DBG] %s : core[%x,%d] handling vseg %s / vpn_base = %x / vpn_size = %x\n", 360 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vseg_type_str(type), vpn_base, vpn_size ); 361 362 // set COW flag on the remote GPT depending on vseg type 396 #if (CONFIG_DEBUG_VMM_SET_COW & 0x1) 397 if( CONFIG_DEBUG_VMM_SET_COW < cycle ) 398 printk("\n[DBG] %s : thread %x handling vseg %s / vpn_base = %x / vpn_size = %x\n", 399 __FUNCTION__, CURRENT_THREAD , vseg_type_str(type), vpn_base, vpn_size ); 400 #endif 401 // only DATA, ANON and REMOTE vsegs 363 402 if( (type == VSEG_TYPE_DATA) || 364 403 (type == VSEG_TYPE_ANON) || 365 404 (type == VSEG_TYPE_REMOTE) ) 366 405 { 367 hal_gpt_flip_cow( true, // set_cow 368 remote_gpt_xp, 369 vpn_base, 370 vpn_size ); 371 } 372 } // en loop on vsegs 406 vpn_t vpn; 407 uint32_t attr; 408 ppn_t ppn; 409 xptr_t page_xp; 410 cxy_t page_cxy; 411 page_t * page_ptr; 412 xptr_t forks_xp; 413 414 // update flags in remote GPT 415 hal_gpt_set_cow( remote_gpt_xp, 416 vpn_base, 417 vpn_size ); 418 419 // atomically increment pending forks counter in physical pages, 420 // for all vseg pages that are mapped in reference cluster 421 if( remote_process_cxy == local_cxy ) 422 { 423 // the reference GPT is the local GPT 424 gpt_t * gpt = GET_PTR( remote_gpt_xp ); 425 426 // scan all pages in vseg 427 for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) 428 { 429 // get page attributes and PPN from reference GPT 430 hal_gpt_get_pte( gpt , vpn , &attr , &ppn ); 431 432 // atomically update pending forks counter if page is mapped 433 if( attr & GPT_MAPPED ) 434 { 435 page_xp = ppm_ppn2page( ppn ); 436 page_cxy = GET_CXY( page_xp ); 437 page_ptr = GET_PTR( page_xp ); 438 forks_xp = XPTR( page_cxy , &page_ptr->forks ); 439 hal_remote_atomic_add( forks_xp , 1 ); 440 } 441 } // end loop on vpn 442 } // end if local 443 } // end if vseg type 444 } // end loop on vsegs 373 445 } // end loop on process copies 374 446 375 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for process %x\n", 376 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , process->pid ); 447 #if CONFIG_DEBUG_VMM_SET_COW 448 cycle = (uint32_t)hal_get_cycles(); 449 if( CONFIG_DEBUG_VMM_SET_COW < cycle ) 450 printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n", 451 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); 452 #endif 377 453 378 454 } // end vmm_set-cow() … … 404 480 ppn_t ppn; 405 481 406 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter\n", 407 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); 482 #if CONFIG_DEBUG_VMM_FORK_COPY 483 uint32_t cycle = (uint32_t)hal_get_cycles(); 484 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle ) 485 printk("\n[DBG] %s : thread %x enter / cycle %d\n", 486 __FUNCTION__ , CURRENT_THREAD, cycle ); 487 #endif 408 488 409 489 // get parent process cluster and local pointer 410 490 parent_cxy = GET_CXY( parent_process_xp ); 411 parent_process = (process_t *)GET_PTR( parent_process_xp );491 parent_process = GET_PTR( parent_process_xp ); 412 492 413 493 // get local pointers on parent and child VMM … … 445 525 // get local and extended pointers on current parent vseg 446 526 parent_vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 447 parent_vseg = (vseg_t *)GET_PTR( parent_vseg_xp );527 parent_vseg = GET_PTR( parent_vseg_xp ); 448 528 449 529 // get vseg type 450 530 type = hal_remote_lw( XPTR( parent_cxy , &parent_vseg->type ) ); 451 531 452 453 vmm_dmsg("\n[DBG] %s : core[%x,%d] found parent vseg %s / vpn_base = %x\n", 454 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vseg_type_str(type), 455 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) ); 532 #if CONFIG_DEBUG_VMM_FORK_COPY 533 cycle = (uint32_t)hal_get_cycles(); 534 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle ) 535 printk("\n[DBG] %s : thread %x found parent vseg %s / vpn_base = %x / cycle %d\n", 536 __FUNCTION__ , CURRENT_THREAD, vseg_type_str(type), 537 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); 538 #endif 456 539 457 540 // all parent vsegs - but STACK - must be copied in child VSL … … 473 556 vseg_attach( child_vmm , child_vseg ); 474 557 475 vmm_dmsg("\n[DBG] %s : core[%x,%d] copied to child VSL : vseg %s / vpn_base = %x\n", 476 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vseg_type_str(type), 477 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) ); 558 #if CONFIG_DEBUG_VMM_FORK_COPY 559 cycle = (uint32_t)hal_get_cycles(); 560 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle ) 561 printk("\n[DBG] %s : thread %x copied vseg %s / vpn_base = %x to child VSL / cycle %d\n", 562 __FUNCTION__ , CURRENT_THREAD , vseg_type_str(type), 563 hal_remote_lw( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); 564 #endif 478 565 479 566 // copy DATA, MMAP, REMOTE, FILE parent GPT entries to child GPT … … 502 589 } 503 590 504 // increment p age descriptor fork_nr for the referencedpage if mapped591 // increment pending forks counter in page if mapped 505 592 if( mapped ) 506 593 { 507 594 page_xp = ppm_ppn2page( ppn ); 508 595 page_cxy = GET_CXY( page_xp ); 509 page_ptr = (page_t *)GET_PTR( page_xp ); 510 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->fork_nr ) , 1 ); 511 512 vmm_dmsg("\n[DBG] %s : core[%x,%d] copied to child GPT : vpn %x\n", 513 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 596 page_ptr = GET_PTR( page_xp ); 597 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 ); 598 599 #if CONFIG_DEBUG_VMM_FORK_COPY 600 cycle = (uint32_t)hal_get_cycles(); 601 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle ) 602 printk("\n[DBG] %s : thread %x copied vpn %x to child GPT / cycle %d\n", 603 __FUNCTION__ , CURRENT_THREAD , vpn , cycle ); 604 #endif 514 605 515 606 } … … 558 649 hal_fence(); 559 650 651 #if CONFIG_DEBUG_VMM_FORK_COPY 652 cycle = (uint32_t)hal_get_cycles(); 653 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle ) 654 printk("\n[DBG] %s : thread %x exit successfully / cycle %d\n", 655 __FUNCTION__ , CURRENT_THREAD , cycle ); 656 #endif 657 560 658 return 0; 561 659 … … 568 666 vseg_t * vseg; 569 667 570 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter\n", 571 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); 572 573 // get pointer on VMM 668 #if CONFIG_DEBUG_VMM_DESTROY 669 uint32_t cycle = (uint32_t)hal_get_cycles(); 670 if( CONFIG_DEBUG_VMM_DESTROY < cycle ) 671 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 672 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); 673 #endif 674 675 // get pointer on local VMM 574 676 vmm_t * vmm = &process->vmm; 575 677 … … 586 688 // get pointer on first vseg in VSL 587 689 vseg_xp = XLIST_FIRST_ELEMENT( root_xp , vseg_t , xlist ); 588 vseg = (vseg_t *)GET_PTR( vseg_xp );589 590 // unmap and release all pages690 vseg = GET_PTR( vseg_xp ); 691 692 // unmap rand release physical pages if required) 591 693 vmm_unmap_vseg( process , vseg ); 592 694 … … 598 700 } 599 701 600 // release lock 702 // release lock protecting VSL 601 703 remote_rwlock_wr_unlock( lock_xp ); 602 704 … … 616 718 hal_gpt_destroy( &vmm->gpt ); 617 719 618 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit\n", 619 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); 720 #if CONFIG_DEBUG_VMM_DESTROY 721 cycle = (uint32_t)hal_get_cycles(); 722 if( CONFIG_DEBUG_VMM_DESTROY < cycle ) 723 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 724 __FUNCTION__ , CURRENT_THREAD , cycle ); 725 #endif 620 726 621 727 } // end vmm_destroy() … … 637 743 { 638 744 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 639 vseg = (vseg_t *)GET_PTR( vseg_xp );745 vseg = GET_PTR( vseg_xp ); 640 746 641 747 if( ((vpn_base + vpn_size) > vseg->vpn_base) && … … 766 872 error_t error; 767 873 768 vmm_dmsg("\n[DBG] %s : core[%x,%d] enters / process %x / base %x / size %x / %s / cxy = %x\n", 769 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , 770 process->pid , base , size , vseg_type_str(type) , cxy ); 874 #if CONFIG_DEBUG_VMM_CREATE_VSEG 875 uint32_t cycle = (uint32_t)hal_get_cycles(); 876 if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle ) 877 printk("\n[DBG] %s : thread %x enter / process %x / base %x / size %x / %s / cxy %x / cycle %d\n", 878 __FUNCTION__, CURRENT_THREAD, process->pid, base, size, vseg_type_str(type), cxy, cycle ); 879 #endif 771 880 772 881 // get pointer on VMM … … 854 963 remote_rwlock_wr_unlock( lock_xp ); 855 964 856 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit / process %x / base %x / size %x / type %s\n", 857 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , 858 process->pid , base , size , vseg_type_str(type) ); 965 #if CONFIG_DEBUG_VMM_CREATE_VSEG 966 cycle = (uint32_t)hal_get_cycles(); 967 if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle ) 968 printk("\n[DBG] %s : thread %x exit / process %x / %s / cxy %x / cycle %d\n", 969 __FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str(type), cxy, cycle ); 970 #endif 859 971 860 972 return vseg; … … 985 1097 cxy_t page_cxy; // page descriptor cluster 986 1098 page_t * page_ptr; // page descriptor pointer 987 988 vmm_dmsg("\n[DBG] %s : core[%x, %d] enter / process %x / vseg %s / base %x / cycle %d\n", 989 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, process->pid , 990 vseg_type_str( vseg->type ), vseg->vpn_base, (uint32_t)hal_get_cycles() ); 991 992 // get pointer on process GPT 1099 xptr_t forks_xp; // extended pointer on pending forks counter 1100 uint32_t count; // actual number of pendinf forks 1101 1102 #if CONFIG_DEBUG_VMM_UNMAP_VSEG 1103 uint32_t cycle = (uint32_t)hal_get_cycles(); 1104 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle ) 1105 printk("\n[DBG] %s : thread %x enter / process %x / vseg %s / base %x / cycle %d\n", 1106 __FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle ); 1107 #endif 1108 1109 // get pointer on local GPT 993 1110 gpt_t * gpt = &process->vmm.gpt; 994 1111 … … 1007 1124 "an user vseg must use small pages" ); 1008 1125 1009 // unmap GPT entry 1126 // unmap GPT entry in all GPT copies 1010 1127 hal_gpt_reset_pte( gpt , vpn ); 1011 1128 1012 // release memory if not identity mapped 1013 if( (vseg->flags & VSEG_IDENT) == 0 ) 1129 // handle pending forks counter if 1130 // 1) not identity mapped 1131 // 2) running in reference cluster 1132 if( ((vseg->flags & VSEG_IDENT) == 0) && 1133 (GET_CXY( process->ref_xp ) == local_cxy) ) 1014 1134 { 1015 // get extended pointer on p age descriptor1135 // get extended pointer on physical page descriptor 1016 1136 page_xp = ppm_ppn2page( ppn ); 1017 1137 page_cxy = GET_CXY( page_xp ); 1018 page_ptr = (page_t *)GET_PTR( page_xp ); 1019 1020 // release physical page to relevant cluster 1021 if( page_cxy == local_cxy ) // local cluster 1138 page_ptr = GET_PTR( page_xp ); 1139 1140 // FIXME lock the physical page 1141 1142 // get extended pointer on pending forks counter 1143 forks_xp = XPTR( page_cxy , &page_ptr->forks ); 1144 1145 // get pending forks counter 1146 count = hal_remote_lw( forks_xp ); 1147 1148 if( count ) // decrement pending forks counter 1022 1149 { 1023 req.type = KMEM_PAGE; 1024 req.ptr = page_ptr; 1025 kmem_free( &req ); 1150 hal_remote_atomic_add( forks_xp , -1 ); 1151 } 1152 else // release physical page to relevant cluster 1153 { 1154 if( page_cxy == local_cxy ) // local cluster 1155 { 1156 req.type = KMEM_PAGE; 1157 req.ptr = page_ptr; 1158 kmem_free( &req ); 1159 } 1160 else // remote cluster 1161 { 1162 rpc_pmem_release_pages_client( page_cxy , page_ptr ); 1163 } 1026 1164 } 1027 else // remote cluster 1028 { 1029 rpc_pmem_release_pages_client( page_cxy , page_ptr ); 1030 } 1165 1166 // FIXME unlock the physical page 1031 1167 } 1032 1168 } 1033 1169 } 1170 1171 #if CONFIG_DEBUG_VMM_UNMAP_VSEG 1172 cycle = (uint32_t)hal_get_cycles(); 1173 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle ) 1174 printk("\n[DBG] %s : thread %x exit / process %x / vseg %s / base %x / cycle %d\n", 1175 __FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle ); 1176 #endif 1177 1034 1178 } // end vmm_unmap_vseg() 1035 1179 … … 1061 1205 { 1062 1206 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 1063 vseg = (vseg_t *)GET_PTR( vseg_xp );1207 vseg = GET_PTR( vseg_xp ); 1064 1208 if( (vaddr >= vseg->min) && (vaddr < vseg->max) ) 1065 1209 { … … 1185 1329 // get cluster and local pointer on reference process 1186 1330 cxy_t ref_cxy = GET_CXY( ref_xp ); 1187 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp );1331 process_t * ref_ptr = GET_PTR( ref_xp ); 1188 1332 1189 1333 if( local_cxy == ref_cxy ) return -1; // local cluster is the reference … … 1224 1368 vpn_t vpn ) 1225 1369 { 1370 1371 #if CONFIG_DEBUG_VMM_ALLOCATE_PAGE 1372 if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) 1373 printk("\n[DBG] in %s : thread %x enter for vpn %x\n", 1374 __FUNCTION__ , CURRENT_THREAD, vpn ); 1375 #endif 1376 1226 1377 // compute target cluster 1227 1378 page_t * page_ptr; … … 1262 1413 } 1263 1414 1415 #if CONFIG_DEBUG_VMM_ALLOCATE_PAGE 1416 if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) 1417 printk("\n[DBG] in %s : thread %x exit for vpn = %d / ppn = %x\n", 1418 __FUNCTION__ , CURRENT_THREAD, vpn, ppm_page2ppn( XPTR( page_cxy , page_ptr ) ) ); 1419 #endif 1420 1264 1421 if( page_ptr == NULL ) return XPTR_NULL; 1265 1422 else return XPTR( page_cxy , page_ptr ); … … 1281 1438 index = vpn - vseg->vpn_base; 1282 1439 1283 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter for vpn = %x / type = %s / index = %d\n", 1284 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, vseg_type_str(type), index ); 1440 #if CONFIG_DEBUG_VMM_GET_ONE_PPN 1441 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1442 printk("\n[DBG] %s : thread %x enter for vpn = %x / type = %s / index = %d\n", 1443 __FUNCTION__, CURRENT_THREAD, vpn, vseg_type_str(type), index ); 1444 #endif 1285 1445 1286 1446 // FILE type : get the physical page from the file mapper … … 1295 1455 // get mapper cluster and local pointer 1296 1456 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 1297 mapper_t * mapper_ptr = (mapper_t *)GET_PTR( mapper_xp );1457 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 1298 1458 1299 1459 // get page descriptor from mapper … … 1316 1476 else 1317 1477 { 1318 // allocate physical page1478 // allocate one physical page 1319 1479 page_xp = vmm_page_allocate( vseg , vpn ); 1320 1480 … … 1322 1482 1323 1483 // initialise missing page from .elf file mapper for DATA and CODE types 1324 // => the mapper_xp field is an extended pointer on the .elf file mapper1484 // (the vseg->mapper_xp field is an extended pointer on the .elf file mapper) 1325 1485 if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) ) 1326 1486 { … … 1333 1493 // get mapper cluster and local pointer 1334 1494 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 1335 mapper_t * mapper_ptr = (mapper_t *)GET_PTR( mapper_xp );1495 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 1336 1496 1337 1497 // compute missing page offset in vseg … … 1341 1501 uint32_t elf_offset = vseg->file_offset + offset; 1342 1502 1343 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / elf_offset = %x\n", 1344 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, elf_offset ); 1503 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1) 1504 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1505 printk("\n[DBG] %s : thread %x for vpn = %x / elf_offset = %x\n", 1506 __FUNCTION__, CURRENT_THREAD, vpn, elf_offset ); 1507 #endif 1345 1508 1346 1509 // compute extended pointer on page base … … 1352 1515 if( file_size < offset ) // missing page fully in BSS 1353 1516 { 1354 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / fully in BSS\n", 1355 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn ); 1517 1518 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1) 1519 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1520 printk("\n[DBG] %s : thread%x for vpn = %x / fully in BSS\n", 1521 __FUNCTION__, CURRENT_THREAD, vpn ); 1522 #endif 1356 1523 1357 1524 if( GET_CXY( page_xp ) == local_cxy ) … … 1367 1534 { 1368 1535 1369 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / fully in mapper\n", 1370 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn ); 1536 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1) 1537 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1538 printk("\n[DBG] %s : thread %x, for vpn = %x / fully in mapper\n", 1539 __FUNCTION__, CURRENT_THREAD, vpn ); 1540 #endif 1371 1541 1372 1542 if( mapper_cxy == local_cxy ) … … 1396 1566 { 1397 1567 1398 vmm_dmsg("\n[DBG] %s : core[%x,%d] for vpn = %x / both mapper & BSS\n" 1399 " %d bytes from mapper / %d bytes from BSS\n", 1400 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, 1568 #if (CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1) 1569 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1570 printk("\n[DBG] %s : thread %x for vpn = %x / both mapper & BSS\n" 1571 " %d bytes from mapper / %d bytes from BSS\n", 1572 __FUNCTION__, CURRENT_THREAD, vpn, 1401 1573 file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size ); 1402 1574 #endif 1403 1575 // initialize mapper part 1404 1576 if( mapper_cxy == local_cxy ) … … 1441 1613 *ppn = ppm_page2ppn( page_xp ); 1442 1614 1443 vmm_dmsg("\n[DBG] %s : core[%x,%d] exit for vpn = %x / ppn = %x\n", 1444 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , *ppn ); 1615 #if CONFIG_DEBUG_VMM_GET_ONE_PPN 1616 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1617 printk("\n[DBG] %s : thread %x exit for vpn = %x / ppn = %x\n", 1618 __FUNCTION__ , CURRENT_THREAD , vpn , *ppn ); 1619 #endif 1445 1620 1446 1621 return 0; … … 1455 1630 ppn_t * ppn ) 1456 1631 { 1457 vseg_t * vseg; // pointer onvseg containing VPN1632 vseg_t * vseg; // vseg containing VPN 1458 1633 ppn_t old_ppn; // current PTE_PPN 1459 1634 uint32_t old_attr; // current PTE_ATTR … … 1466 1641 "not called in the reference cluster\n" ); 1467 1642 1468 vmm_dmsg("\n[DBG] %s : core[%x,%d] enter for vpn = %x in process %x / cow = %d\n", 1469 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , process->pid , cow ); 1643 #if CONFIG_DEBUG_VMM_GET_PTE 1644 uint32_t cycle = (uint32_t)hal_get_cycles(); 1645 if( CONFIG_DEBUG_VMM_GET_PTE > cycle ) 1646 printk("\n[DBG] %s : thread %x enter for vpn = %x / process %x / cow = %d / cycle %d\n", 1647 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cow , cycle ); 1648 #endif 1470 1649 1471 1650 // get VMM pointer 1472 1651 vmm_t * vmm = &process->vmm; 1473 1652 1474 // get vseg pointer from ref VSL1653 // get vseg pointer from reference VSL 1475 1654 error = vmm_get_vseg( process , vpn<<CONFIG_PPM_PAGE_SHIFT , &vseg ); 1476 1655 … … 1482 1661 } 1483 1662 1484 vmm_dmsg("\n[DBG] %s : core[%x,%d] found vseg %s / vpn_base = %x / vpn_size = %x\n", 1485 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , 1486 vseg_type_str(vseg->type) , vseg->vpn_base , vseg->vpn_size ); 1663 #if CONFIG_DEBUG_VMM_GET_PTE 1664 cycle = (uint32_t)hal_get_cycles(); 1665 if( CONFIG_DEBUG_VMM_GET_PTE < cycle ) 1666 printk("\n[DBG] %s : thread %x found vseg %s / vpn_base = %x / vpn_size = %x\n", 1667 __FUNCTION__, CURRENT_THREAD, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size ); 1668 #endif 1487 1669 1488 1670 // access GPT to get current PTE attributes and PPN … … 1493 1675 // clusters containing a copy, and return the new_ppn and new_attr 1494 1676 1495 if( cow ) ////////////// copy_on_write request///////////1677 if( cow ) /////////////////////////// copy_on_write request ////////////////////// 1496 1678 { 1497 1679 assert( (old_attr & GPT_MAPPED) , __FUNCTION__ , 1498 1680 "PTE must be mapped for a copy-on-write exception\n" ); 1499 1681 1500 excp_dmsg("\n[DBG] %s : core[%x,%d] handling COW for vpn %x\n", 1501 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 1502 1503 // get extended pointer, cluster and local pointer on page descriptor 1682 #if CONFIG_DEBUG_VMM_GET_PTE 1683 cycle = (uint32_t)hal_get_cycles(); 1684 if( CONFIG_DEBUG_VMM_GET_PTE < cycle ) 1685 printk("\n[DBG] %s : thread %x handling COW for vpn %x in process %x\n", 1686 __FUNCTION__, CURRENT_THREAD, vpn, process->pid ); 1687 #endif 1688 1689 // get extended pointer, cluster and local pointer on physical page descriptor 1504 1690 xptr_t page_xp = ppm_ppn2page( old_ppn ); 1505 1691 cxy_t page_cxy = GET_CXY( page_xp ); 1506 page_t * page_ptr = (page_t *)GET_PTR( page_xp );1692 page_t * page_ptr = GET_PTR( page_xp ); 1507 1693 1508 1694 // get number of pending forks in page descriptor 1509 uint32_t count = hal_remote_lw( XPTR( page_cxy , &page_ptr->fork_nr) );1510 1511 if( count ) // pending fork => allocate a new page, copy it, reset COW1695 uint32_t forks = hal_remote_lw( XPTR( page_cxy , &page_ptr->forks ) ); 1696 1697 if( forks ) // pending fork => allocate a new page, copy old to new 1512 1698 { 1513 1699 // allocate a new physical page … … 1539 1725 1540 1726 // update GPT[vpn] for all GPT copies 1541 // to maintain coherence of copies 1542 vmm_update_pte( process, 1543 vpn, 1544 new_attr, 1545 new_ppn ); 1546 1547 // decrement fork_nr in page descriptor 1548 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->fork_nr ) , -1 ); 1549 } 1550 else /////////////// page_fault request /////////// 1727 vmm_global_update_pte( process, vpn, new_attr, new_ppn ); 1728 1729 // decrement pending forks counter in page descriptor 1730 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , -1 ); 1731 } 1732 else ////////////////////////////////// page_fault request //////////////////////// 1551 1733 { 1552 1734 if( (old_attr & GPT_MAPPED) == 0 ) // true page_fault => map it 1553 1735 { 1554 1736 1555 excp_dmsg("\n[DBG] %s : core[%x,%d] handling page fault for vpn %x\n", 1556 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 1737 #if CONFIG_DEBUG_VMM_GET_PTE 1738 cycle = (uint32_t)hal_get_cycles(); 1739 if( CONFIG_DEBUG_VMM_GET_PTE < cycle ) 1740 printk("\n[DBG] %s : thread %x handling page fault for vpn %x in process %x\n", 1741 __FUNCTION__, CURRENT_THREAD, vpn, process->pid ); 1742 #endif 1557 1743 1558 1744 // allocate new_ppn, depending on vseg type … … 1592 1778 } 1593 1779 1594 excp_dmsg("\n[DBG] %s : core[%x,%d] update GPT for vpn %x / ppn = %x / attr = %x\n", 1595 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , new_ppn , new_attr ); 1596 1597 // retur success 1780 #if CONFIG_DEBUG_VMM_GET_PTE 1781 cycle = (uint32_t)hal_get_cycles(); 1782 if( CONFIG_DEBUG_VMM_GET_PTE < cycle ) 1783 printk("\n[DBG] %s : thread,%x exit for vpn %x in process %x / ppn = %x / attr = %x / cycle %d\n", 1784 __FUNCTION__, CURRENT_THREAD, vpn, process->pid, new_ppn, new_attr, cycle ); 1785 #endif 1786 1787 // return success 1598 1788 *ppn = new_ppn; 1599 1789 *attr = new_attr; … … 1612 1802 // get reference process cluster and local pointer 1613 1803 cxy_t ref_cxy = GET_CXY( process->ref_xp ); 1614 process_t * ref_ptr = (process_t *)GET_PTR( process->ref_xp );1804 process_t * ref_ptr = GET_PTR( process->ref_xp ); 1615 1805 1616 1806 // get missing PTE attributes and PPN from reference cluster … … 1651 1841 vpn_t vpn ) 1652 1842 { 1653 uint32_t attr; // missingpage attributes1654 ppn_t ppn; // missingpage PPN1843 uint32_t attr; // page attributes 1844 ppn_t ppn; // page PPN 1655 1845 error_t error; 1656 1846 1847 1657 1848 // get reference process cluster and local pointer 1658 1849 cxy_t ref_cxy = GET_CXY( process->ref_xp ); 1659 process_t * ref_ptr = (process_t *)GET_PTR( process->ref_xp );1850 process_t * ref_ptr = GET_PTR( process->ref_xp ); 1660 1851 1661 1852 // get new PTE attributes and PPN from reference cluster … … 1722 1913 { 1723 1914 cxy_t ref_cxy = GET_CXY( process->ref_xp ); 1724 process_t * ref_ptr = (process_t *)GET_PTR( process->ref_xp );1915 process_t * ref_ptr = GET_PTR( process->ref_xp ); 1725 1916 rpc_vmm_get_pte_client( ref_cxy , ref_ptr , vpn , false , &attr , &ppn , &error ); 1726 1917 } -
trunk/kernel/mm/vmm.h
r429 r433 99 99 * a remote_rwlock, because it can be accessed by a thread running in a remote cluster. 100 100 * An exemple is the vmm_fork_copy() function. 101 * 2. In most c usters, the VSL and GPT are only partial copies of the reference VSL and GPT101 * 2. In most clusters, the VSL and GPT are only partial copies of the reference VSL and GPT 102 102 * structures, stored in the reference cluster. 103 103 ********************************************************************************************/ … … 155 155 156 156 /********************************************************************************************* 157 * This function is called by the process_ fork_create() function. It partially copies157 * This function is called by the process_make_fork() function. It partially copies 158 158 * the content of a remote parent process VMM to the local child process VMM: 159 159 * - all DATA, MMAP, REMOTE vsegs registered in the parent VSL are registered in the child … … 176 176 177 177 /********************************************************************************************* 178 * This function is called by the process_make_fork() function to handlethe fork syscall.178 * This function is called by the process_make_fork() function executing the fork syscall. 179 179 * It set the COW flag, and reset the WRITABLE flag of all GPT entries of the DATA, MMAP, 180 180 * and REMOTE vsegs of a process identified by the <process> argument. 181 181 * It must be called by a thread running in the reference cluster, that contains the complete 182 * list of vsegs. Use the rpc_vmm_set_cow_client() when the calling thread client is remote.182 * VSL and GPT (use the rpc_vmm_set_cow_client() when the calling thread client is remote). 183 183 * It updates all copies of the process in all clusters, to maintain coherence in GPT copies, 184 184 * using the list of copies stored in the owner process, and using remote_write accesses to 185 * update the remote GPTs. It cannot fail, as only mapped entries in GPT copies are updated. 185 * update the remote GPTs. It atomically increment the pending_fork counter, in all involved 186 * physical page descriptors. It cannot fail, as only mapped entries in GPTs are updated. 186 187 ********************************************************************************************* 187 188 * @ process : local pointer on local reference process descriptor. … … 190 191 191 192 /********************************************************************************************* 192 * This function is called by the vmm_get_pte() function in case of COW exception.193 * It modifies both the PPN an the attributes for a GPT entry identified by the <process>194 * and <vpn> arguments.193 * This global function modifies a GPT entry identified by the <process> and <vpn> 194 * arguments in all clusters containing a process copy. 195 * It must be called by a thread running in the reference cluster. 195 196 * It updates all copies of the process in all clusters, to maintain coherence in GPT copies, 196 197 * using the list of copies stored in the owner process, and using remote_write accesses to … … 202 203 * @ ppn : PTE / physical page index. 203 204 ********************************************************************************************/ 204 void vmm_update_pte( struct process_s * process, 205 vpn_t vpn, 206 uint32_t attr, 207 ppn_t ppn ); 208 209 /********************************************************************************************* 210 * This function scan the list of vsegs registered in the VSL of the process 211 * identified by the <process> argument, and for each vseg: 212 * - it unmap from the GPT and releases all mapped pages in vseg. 213 * - it removes the vseg from the process VSL. 214 * - It releases the memory allocated to the vseg descriptor. 205 void vmm_global_update_pte( struct process_s * process, 206 vpn_t vpn, 207 uint32_t attr, 208 ppn_t ppn ); 209 210 /********************************************************************************************* 211 * This function unmaps from the local GPT all mapped PTEs of a vseg identified by the 212 * <process> and <vseg> arguments. It can be used for any type of vseg. 213 * If this function is executed in the reference cluster, it handles for each referenced 214 * physical pages the pending forks counter : 215 * - if counter is non-zero, it decrements it. 216 * - if counter is zero, it releases the physical page to local kmem allocator. 217 ********************************************************************************************* 218 * @ process : pointer on process descriptor. 219 * @ vseg : pointer on the vseg to be unmapped. 220 ********************************************************************************************/ 221 void vmm_unmap_vseg( struct process_s * process, 222 vseg_t * vseg ); 223 224 /********************************************************************************************* 225 * This function deletes, in the local cluster, all vsegs registered in the VSL 226 * of the process identified by the <process> argument. For each vseg: 227 * - it unmaps all vseg PTEs from the GPT (release the physical pages when required). 228 * - it removes the vseg from the local VSL. 229 * - it releases the memory allocated to the local vseg descriptors. 215 230 * Finally, it releases the memory allocated to the GPT itself. 216 231 ********************************************************************************************* … … 291 306 292 307 /********************************************************************************************* 293 * This function unmaps all mapped PTEs of a given vseg, from the generic page table294 * associated to a given process descriptor, and releases the physical memory allocated295 * to all mapped GPT entries. It can be used for any type of vseg.296 *********************************************************************************************297 * @ process : pointer on process descriptor.298 * @ vseg : pointer on the vseg to be unmapped.299 ********************************************************************************************/300 void vmm_unmap_vseg( struct process_s * process,301 vseg_t * vseg );302 303 /*********************************************************************************************304 308 * This function removes a given region (defined by a base address and a size) from 305 309 * the VMM of a given process descriptor. This can modify the number of vsegs: … … 340 344 /********************************************************************************************* 341 345 * This function is called by the generic exception handler when a page-fault event 342 * has been detected in a given cluster.346 * has been detected for a given process in a given cluster. 343 347 * - If the local cluster is the reference, it call directly the vmm_get_pte() function. 344 348 * - If the local cluster is not the reference cluster, it send a RPC_VMM_GET_PTE … … 355 359 /********************************************************************************************* 356 360 * This function is called by the generic exception handler when a copy-on-write event 357 * has been detected in a given cluster. 358 * - If the local cluster is the reference, it call directly the vmm_get_pte() function. 359 * - If the local cluster is not the reference cluster, it send a RPC_VMM_GET_PTE 360 * to the reference cluster to get the missing PTE attributes and PPN, 361 * and update the local page table. 361 * has been detected for a given process in a given cluster. 362 * It takes the lock protecting the physical page, and test the pending forks counter. 363 * If no pending fork: 364 * - it reset the COW flag and set the WRITE flag in the reference GPT entry, and in all 365 * the GPT copies 366 367 * If there is a pending forkon the 368 * - It get the involved vseg pointer. 369 * - It allocates a new physical page from the cluster defined by the vseg type. 370 * - It copies the old physical page content to the new physical page. 371 * - It decrements the pending_fork counter in old physical page descriptor. 372 362 373 ********************************************************************************************* 363 374 * @ process : pointer on process descriptor. … … 369 380 370 381 /********************************************************************************************* 371 * This function is called when a new PTE (GPT entry) is required because a "page-fault", 372 * or "copy-on_write" event has been detected for a given <vpn> in a given <process>. 373 * The <cow> argument defines the type of event to be handled. 382 * This function handle both the "page-fault" and "copy-on_write" events for a given <vpn> 383 * in a given <process>. The <cow> argument defines the type of event to be handled. 374 384 * This function must be called by a thread running in reference cluster, and the vseg 375 * containing the searched VPN should be registered in the reference VMM. 376 * - for an actual page-fault, it allocates the missing physical page from the target cluster 377 * defined by the vseg type, initialize it, and update the reference page table. 385 * containing the searched VPN must be registered in the reference VMM. 386 * - for an page-fault, it allocates the missing physical page from the target cluster 387 * defined by the vseg type, initializes it, and updates the reference GPT, but not 388 * the copies GPT, that will be updated on demand. 378 389 * - for a copy-on-write, it allocates a new physical page from the target cluster, 379 * initialise it from the old physical page, and update the reference page table. 380 * In both cases, it calls the RPC_PMEM_GET_PAGES to get the new physical page if the 381 * target cluster is not the reference cluster. 390 * initialise it from the old physical page, and updates the reference GPT and all 391 * the GPT copies, for coherence. 392 * In both cases, it calls the RPC_PMEM_GET_PAGES to get the new physical page when 393 * the target cluster is not the reference cluster. 382 394 * It returns in the <attr> and <ppn> arguments the accessed or modified PTE. 383 395 ********************************************************************************************* … … 400 412 * (Physical Page Number) associated to a missing page defined by the <vpn> argument. 401 413 * - For the FILE type, it returns directly the physical page from the file mapper. 402 * - For the CODE and DATA types, it allocates a new ph sical page from the cluster defined414 * - For the CODE and DATA types, it allocates a new physical page from the cluster defined 403 415 * by the <vseg->cxy> field, or by the <vpn> MSB bits for a distributed vseg, 404 416 * and initialize this page from the .elf file mapper.
Note: See TracChangeset
for help on using the changeset viewer.