Changeset 640
- Timestamp:
- Oct 1, 2019, 1:19:00 PM (5 years ago)
- Location:
- trunk
- Files:
-
- 25 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/generic/hal_gpt.h
r635 r640 2 2 * hal_gpt.h - Generic Page Table API definition. 3 3 * 4 * Authors Alain Greiner (2016 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 69 69 typedef struct gpt_s 70 70 { 71 void * ptr; /*! local pointer on GPT root */ 72 ppn_t ppn; /*! PPN of GPT root */ 71 void * ptr; /*! local pointer on GPT root */ 72 uint32_t pte1_wait_events; /*! total number of pte1 wait events on this gpt */ 73 uint32_t pte1_wait_iters; /*! total number of iterations in all pte1 wait */ 74 uint32_t pte2_wait_events; /*! total number of pte2 wait events on this gpt */ 75 uint32_t pte2_wait_iters; /*! total number of iterations in all pte2 wait */ 73 76 } 74 77 gpt_t; … … 87 90 * This function releases all memory dynamically allocated for a generic page table. 88 91 * For a multi-levels radix tree implementation, it includes all nodes in the tree. 92 * All GPT entries are supposed to be previously unmapped. 89 93 **************************************************************************************** 90 94 * @ gpt : pointer on generic page table descriptor. -
trunk/hal/tsar_mips32/core/hal_context.c
r635 r640 151 151 assert( (context != NULL ), "CPU context not allocated" ); 152 152 153 // compute the PPN for the GPT PT1 154 ppn_t gpt_pt1_ppn = ppm_base2ppn( XPTR( local_cxy , thread->process->vmm.gpt.ptr ) ); 155 153 156 // initialisation depends on thread type 154 157 if( thread->type == THREAD_USER ) … … 160 163 context->c0_sr = SR_USR_MODE; 161 164 context->c0_th = (uint32_t)thread; 162 context->c2_ptpr = (uint32_t)( (thread->process->vmm.gpt.ppn)>> 1);165 context->c2_ptpr = (uint32_t)(gpt_pt1_ppn >> 1); 163 166 context->c2_mode = 0xF; 164 167 } … … 170 173 context->c0_sr = SR_SYS_MODE; 171 174 context->c0_th = (uint32_t)thread; 172 context->c2_ptpr = (uint32_t)( (thread->process->vmm.gpt.ppn)>> 1);175 context->c2_ptpr = (uint32_t)(gpt_pt1_ppn >> 1); 173 176 context->c2_mode = 0x3; 174 177 } … … 193 196 194 197 process_t * child_process; // local pointer on child processs 195 uint32_t child_pt_ppn; // PPN of child process PT1 198 void * child_gpt_ptr; // local pointer on child GPT PT1 199 uint32_t child_gpt_ppn; // PPN of child GPT PT1 196 200 vseg_t * child_us_vseg; // local pointer on child user stack vseg 197 201 … … 216 220 child_process = hal_remote_lpt( XPTR(child_cxy , &child_ptr->process) ); 217 221 218 // get ppn of remote child process page table 219 child_pt_ppn = hal_remote_l32( XPTR(child_cxy , &child_process->vmm.gpt.ppn) ); 222 // get base and ppn of remote child process GPT PT1 223 child_gpt_ptr = hal_remote_l32( XPTR(child_cxy , &child_process->vmm.gpt.ptr) ); 224 child_gpt_ppn = ppm_base2ppn( XPTR( child_cxy , child_gpt_ptr ) ); 220 225 221 226 // get local pointer on local parent uzone (in parent kernel stack) … … 285 290 context.sp_29 = (uint32_t)child_ksp; 286 291 context.c0_th = (uint32_t)child_ptr; 287 context.c2_ptpr = (uint32_t)child_ pt_ppn >> 1;292 context.c2_ptpr = (uint32_t)child_gpt_ppn >> 1; 288 293 289 294 // From this point, both parent and child execute the following code, … … 304 309 uint32_t child_sp = parent_uzone[UZ_SP] + child_us_base - parent_us_base; 305 310 uint32_t child_th = (uint32_t)child_ptr; 306 uint32_t child_ptpr = (uint32_t)child_ pt_ppn >> 1;311 uint32_t child_ptpr = (uint32_t)child_gpt_ppn >> 1; 307 312 308 313 #if DEBUG_HAL_CONTEXT -
trunk/hal/tsar_mips32/core/hal_gpt.c
r637 r640 25 25 #include <hal_gpt.h> 26 26 #include <hal_special.h> 27 #include <hal_irqmask.h> 27 28 #include <printk.h> 28 29 #include <bits.h> … … 133 134 /////////////////////////////////////////////////////////////////////////////////////// 134 135 135 #define GPT_LOCK_WATCHDOG 100000 0136 #define GPT_LOCK_WATCHDOG 100000 136 137 137 138 ///////////////////////////////////// … … 166 167 } 167 168 168 gpt->ptr = base; 169 gpt->ppn = ppm_base2ppn( XPTR( local_cxy , base ) ); 169 // initialze the GPT descriptor 170 gpt->ptr = base; 171 gpt->pte1_wait_events = 0; 172 gpt->pte1_wait_iters = 0; 173 gpt->pte2_wait_events = 0; 174 gpt->pte2_wait_iters = 0; 170 175 171 176 #if DEBUG_HAL_GPT_CREATE … … 173 178 if( DEBUG_HAL_GPT_CREATE < cycle ) 174 179 printk("\n[%s] thread[%x,%x] exit / pt1_base %x / pt1_ppn %x / cycle %d\n", 175 __FUNCTION__, this->process->pid, this->trdid, gpt->ptr, gpt->ppn, cycle ); 180 __FUNCTION__, this->process->pid, this->trdid, 181 base, ppm_base2ppn( XPTR( local_cxy , base ) ), cycle ); 176 182 #endif 177 183 … … 192 198 kmem_req_t req; 193 199 200 thread_t * this = CURRENT_THREAD; 201 194 202 #if DEBUG_HAL_GPT_DESTROY 195 203 uint32_t cycle = (uint32_t)hal_get_cycles(); 196 thread_t * this = CURRENT_THREAD;197 204 if( DEBUG_HAL_GPT_DESTROY < cycle ) 198 205 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", … … 212 219 if( (pte1 & TSAR_PTE_SMALL) == 0 ) // BIG page 213 220 { 214 printk("\n[WARNING] in %s : mapped big page/ ix1 %x\n",215 __FUNCTION__ 221 printk("\n[WARNING] %s : valid PTE1 / thread[%x,%x] / ix1 %x\n", 222 __FUNCTION__, this->process->pid, this->trdid, ix1 ); 216 223 } 217 224 else // PT2 exist … … 228 235 if( (attr & TSAR_PTE_MAPPED) != 0 ) // PTE2 mapped 229 236 { 230 printk("\n[WARNING] in %s : mapped small page/ ix1 %x / ix2 %x\n",231 __FUNCTION__ 237 printk("\n[WARNING] %s : valid PTE2 / thread[%x,%x] / ix1 %x / ix2 %x\n", 238 __FUNCTION__, this->process->pid, this->trdid, ix1, ix2 ); 232 239 } 233 240 } … … 272 279 uint32_t pte2_attr; // PT2[ix2].attr current value 273 280 uint32_t pte2_ppn; // PT2[ix2].ppn current value 274 bool_t atomic; 275 276 #if GPT_LOCK_WATCHDOG 277 uint32_t count = 0; 278 #endif 281 bool_t success; // used for both PTE1 and PTE2 mapping 282 uint32_t count; // watchdog 283 uint32_t sr_save; // for critical section 279 284 280 285 // get cluster and local pointer on GPT … … 285 290 thread_t * this = CURRENT_THREAD; 286 291 uint32_t cycle = (uint32_t)hal_get_cycles(); 287 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 292 // if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 293 if( (vpn == 0x3600) && (gpt_cxy == 0x11) ) 288 294 printk("\n[%s] thread[%x,%x] enters / vpn %x in cluster %x / cycle %d\n", 289 295 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle ); … … 303 309 pte1 = hal_remote_l32( pte1_xp ); 304 310 305 // If PTE1 is unmapped and unlocked, try to atomically lock this PT1 entry. 306 // This PTE1 locking prevent multiple concurrent PT2 allocations 307 // - only the thread that successfully locked the PTE1 allocates a new PT2 308 // and updates the PTE1 309 // - all other threads simply wait until the missing PTE1 is mapped. 310 311 if( pte1 == 0 ) 311 // If PTE1 is unmapped, the calling thread try to map this PTE1. 312 // To prevent multiple concurrent PT2 allocations, only the thread that 313 // successfully locked the PTE1 allocates a new PT2 and updates the PTE1. 314 // All other threads simply wait until the missing PTE1 is mapped. 315 316 if( (pte1 & TSAR_PTE_MAPPED) == 0 ) 312 317 { 313 // try to atomically lock the PTE1 to prevent concurrent PT2 allocations 314 atomic = hal_remote_atomic_cas( pte1_xp, 315 pte1, 316 pte1 | TSAR_PTE_LOCKED ); 317 if( atomic ) 318 { 319 // allocate one 4 Kbytes physical page for PT2 318 if( (pte1 & TSAR_PTE_LOCKED) == 0 ) 319 { 320 // try to atomically lock the PTE1 321 success = hal_remote_atomic_cas( pte1_xp, 322 pte1, 323 TSAR_PTE_LOCKED ); 324 } 325 else 326 { 327 success = false; 328 } 329 330 if( success ) // winner thread allocates one 4 Kbytes page for PT2 331 { 332 // enter critical section 333 hal_disable_irq( &sr_save ); 334 320 335 req.type = KMEM_PPM; 321 336 req.order = 0; … … 336 351 pte1 = TSAR_PTE_MAPPED | TSAR_PTE_SMALL | pt2_ppn; 337 352 338 // set the PTE1 value in PT1 339 // this unlocks the PTE1 353 // set the PTE1 value in PT1 / this unlocks the PTE1 340 354 hal_remote_s32( pte1_xp , pte1 ); 341 355 hal_fence(); 342 356 343 #if (DEBUG_HAL_GPT_LOCK_PTE & 1) 344 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 345 printk("\n[%s] thread[%x,%x] allocates a new PT2 for vpn %x in cluster %x\n", 357 // exit critical section 358 hal_restore_irq( sr_save ); 359 360 #if DEBUG_HAL_GPT_LOCK_PTE 361 // if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 362 if( (vpn == 0x3600) && (gpt_cxy == 0x11) ) 363 printk("\n[%s] PTE1 unmapped : winner thread[%x,%x] allocates a PT2 for vpn %x in cluster %x\n", 346 364 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy ); 347 365 #endif 348 366 349 } // end if atomic 350 } // end if (pte1 == 0) 351 352 // wait until PTE1 is mapped by another thread 353 while( (pte1 & TSAR_PTE_MAPPED) == 0 ) 354 { 355 pte1 = hal_remote_l32( pte1_xp ); 356 357 #if GPT_LOCK_WATCHDOG 358 if( count > GPT_LOCK_WATCHDOG ) 359 { 360 thread_t * thread = CURRENT_THREAD; 361 printk("\n[PANIC] in %s : thread[%x,%x] waiting PTE1 / vpn %x / cxy %x / %d iterations\n", 362 __FUNCTION__, thread->process->pid, thread->trdid, vpn, gpt_cxy, count ); 363 hal_core_sleep(); 364 } 365 count++; 366 #endif 367 368 } 369 370 // check pte1 because only small page can be locked 371 assert( (pte1 & TSAR_PTE_SMALL), "cannot lock a big page\n"); 372 373 #if (DEBUG_HAL_GPT_LOCK_PTE & 1) 374 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 367 } 368 else // other threads wait until PTE1 mapped by the winner 369 { 370 371 #if DEBUG_HAL_GPT_LOCK_PTE 372 // if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 373 if( (vpn == 0x3600) && (gpt_cxy == 0x11) ) 374 printk("\n[%s] PTE1 unmapped : loser thread[%x,%x] wait PTE1 for vpn %x in cluster %x\n", 375 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy ); 376 #endif 377 378 count = 0; 379 do 380 { 381 // get current pte1 value 382 pte1 = hal_remote_l32( pte1_xp ); 383 384 // check iterations number 385 if( count > GPT_LOCK_WATCHDOG ) 386 { 387 thread_t * this = CURRENT_THREAD; 388 uint32_t cycle = (uint32_t)hal_get_cycles(); 389 printk("\n[PANIC] in %s for PTE1 after %d iterations\n" 390 " thread[%x,%x] / vpn %x / cluster %x / pte1 %x / cycle %d\n", 391 __FUNCTION__, count, this->process->pid, this->trdid, 392 vpn, gpt_cxy, pte1, cycle ); 393 394 xptr_t process_xp = cluster_get_process_from_pid_in_cxy( gpt_cxy, 395 this->process->pid ); 396 hal_vmm_display( process_xp , true ); 397 398 hal_core_sleep(); 399 } 400 401 // increment watchdog 402 count++; 403 } 404 while( (pte1 & TSAR_PTE_MAPPED) == 0 ); 405 406 #if CONFIG_INSTRUMENTATION_GPT 407 hal_remote_atomic_add( XPTR( gpt_cxy , &gpt_ptr->pte1_wait_events ) , 1 ); 408 hal_remote_atomic_add( XPTR( gpt_cxy , &gpt_ptr->pte1_wait_iters ) , count ); 409 #endif 410 411 412 #if DEBUG_HAL_GPT_LOCK_PTE 413 // if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 414 if( (vpn == 0x3600) && (gpt_cxy == 0x11) ) 415 printk("\n[%s] PTE1 unmapped : loser thread[%x,%x] get PTE1 for vpn %x in cluster %x\n", 416 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy ); 417 #endif 418 } 419 } // end if pte1 unmapped 420 421 // This code is executed by all calling threads 422 423 // check PTE1 : only small and mapped pages can be locked 424 assert( (pte1 & (TSAR_PTE_SMALL | TSAR_PTE_MAPPED)) , "cannot lock a big or unmapped page\n"); 425 426 #if DEBUG_HAL_GPT_LOCK_PTE 427 // if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 428 if( (vpn == 0x3600) && (gpt_cxy == 0x11) ) 375 429 printk("\n[%s] thread[%x,%x] get pte1 %x for vpn %x in cluster %x\n", 376 430 __FUNCTION__, this->process->pid, this->trdid, pte1, vpn, gpt_cxy ); … … 384 438 pte2_xp = XPTR( gpt_cxy , &pt2[2 * ix2] ); 385 439 386 // wait until PTE2 atomically set using a remote CAS 440 // initialize external loop watchdog 441 count = 0; 442 443 // in this busy waiting loop, each thread try to atomically 444 // lock the PTE2, after checking that the PTE2 is not locked 445 387 446 do 388 447 { 389 390 #if GPT_LOCK_WATCHDOG 391 count = 0; 392 #endif 393 394 // wait until PTE lock released by the current owner 395 do 448 // get current value of pte2_attr 449 pte2_attr = hal_remote_l32( pte2_xp ); 450 451 // check loop watchdog 452 if( count > GPT_LOCK_WATCHDOG ) 396 453 { 397 pte2_attr = hal_remote_l32( pte2_xp ); 398 399 #if GPT_LOCK_WATCHDOG 400 if( count > GPT_LOCK_WATCHDOG ) 401 { 402 thread_t * thread = CURRENT_THREAD; 403 printk("\n[PANIC] in %s : thread[%x,%x] waiting PTE2 / vpn %x / cxy %x / %d iterations\n", 404 __FUNCTION__, thread->process->pid, thread->trdid, vpn, gpt_cxy, count ); 405 hal_core_sleep(); 406 } 407 count++; 408 #endif 409 454 thread_t * this = CURRENT_THREAD; 455 uint32_t cycle = (uint32_t)hal_get_cycles(); 456 printk("\n[PANIC] in %s for PTE2 after %d iterations\n" 457 " thread[%x,%x] / vpn %x / cluster %x / pte2_attr %x / cycle %d\n", 458 __FUNCTION__, count, this->process->pid, this->trdid, 459 vpn, gpt_cxy, pte2_attr, cycle ); 460 461 xptr_t process_xp = cluster_get_process_from_pid_in_cxy( gpt_cxy, 462 this->process->pid ); 463 hal_vmm_display( process_xp , true ); 464 465 hal_core_sleep(); 410 466 } 411 while( (pte2_attr & TSAR_PTE_LOCKED) != 0 ); 412 413 // try to atomically set the TSAR_PTE_LOCKED attribute 414 atomic = hal_remote_atomic_cas( pte2_xp, 415 pte2_attr, 416 (pte2_attr | TSAR_PTE_LOCKED) ); 467 468 // increment loop watchdog 469 count++; 470 471 if( (pte2_attr & TSAR_PTE_LOCKED) == 0 ) 472 { 473 // try to atomically set the TSAR_PTE_LOCKED attribute 474 success = hal_remote_atomic_cas( pte2_xp, 475 pte2_attr, 476 (pte2_attr | TSAR_PTE_LOCKED) ); 477 } 478 else 479 { 480 success = false; 481 } 417 482 } 418 while( atomic == 0 ); 483 while( success == false ); 484 485 #if CONFIG_INSTRUMENTATION_GPT 486 hal_remote_atomic_add( XPTR( gpt_cxy , &gpt_ptr->pte2_wait_events ) , 1 ); 487 hal_remote_atomic_add( XPTR( gpt_cxy , &gpt_ptr->pte2_wait_iters ) , count ); 488 #endif 419 489 420 490 // get PTE2.ppn … … 423 493 #if DEBUG_HAL_GPT_LOCK_PTE 424 494 cycle = (uint32_t)hal_get_cycles(); 425 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 426 printk("\n[%s] thread[%x,%x] exit / vpn %x in cluster %x / attr %x / ppn %x / cycle %d\n", 495 // if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 496 if( (vpn == 0x3600) && (gpt_cxy == 0x11) ) 497 printk("\n[%s] thread[%x,%x] success / vpn %x in cluster %x / attr %x / ppn %x / cycle %d\n", 427 498 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, pte2_attr, pte2_ppn, cycle ); 428 499 #endif … … 452 523 gpt_t * gpt_ptr = GET_PTR( gpt_xp ); 453 524 525 // compute indexes in P1 and PT2 526 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 527 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 528 529 // get local pointer on PT1 530 pt1 = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 531 532 // build extended pointer on PTE1 == PT1[ix1] 533 pte1_xp = XPTR( gpt_cxy , &pt1[ix1] ); 534 535 // get current pte1 value 536 pte1 = hal_remote_l32( pte1_xp ); 537 538 assert( ((pte1 & TSAR_PTE_MAPPED) != 0), 539 "PTE1 for vpn %x in cluster %x is unmapped / pte1 = %x\n", vpn, gpt_cxy, pte1 ); 540 541 assert( ((pte1 & TSAR_PTE_SMALL ) != 0), 542 "PTE1 for vpn %x in cluster %x is not small / pte1 = %x\n", vpn, gpt_cxy, pte1 ); 543 544 // get pointer on PT2 base 545 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 546 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 547 548 // build extended pointers on PT2[ix2].attr 549 pte2_xp = XPTR( gpt_cxy , &pt2[2 * ix2] ); 550 551 // get PT2[ix2].attr 552 pte2_attr = hal_remote_l32( pte2_xp ); 553 554 assert( ((pte2_attr & TSAR_PTE_LOCKED) != 0), 555 "PTE2 for vpn %x in cluster %x is unlocked / pte2_attr = %x\n", vpn, gpt_cxy, pte2_attr ); 556 557 // reset TSAR_PTE_LOCKED attribute 558 hal_remote_s32( pte2_xp , pte2_attr & ~TSAR_PTE_LOCKED ); 559 454 560 #if DEBUG_HAL_GPT_LOCK_PTE 455 561 thread_t * this = CURRENT_THREAD; 456 562 uint32_t cycle = (uint32_t)hal_get_cycles(); 457 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 458 printk("\n[%s] thread[%x,%x] enters for vpn %x in cluster %x / cycle %d\n", 459 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle ); 460 #endif 461 462 // compute indexes in P1 and PT2 463 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 464 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 465 466 // get local pointer on PT1 467 pt1 = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 468 469 // build extended pointer on PTE1 == PT1[ix1] 470 pte1_xp = XPTR( gpt_cxy , &pt1[ix1] ); 471 472 // get current pte1 value 473 pte1 = hal_remote_l32( pte1_xp ); 474 475 // check PTE1 attributes 476 assert( ((pte1 & TSAR_PTE_MAPPED) != 0), "unmapped PTE1\n"); 477 assert( ((pte1 & TSAR_PTE_SMALL ) != 0), "big page PTE1\n"); 478 479 // get pointer on PT2 base 480 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 481 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 482 483 // build extended pointers on PT2[ix2].attr 484 pte2_xp = XPTR( gpt_cxy , &pt2[2 * ix2] ); 485 486 // get PT2[ix2].attr 487 pte2_attr = hal_remote_l32( pte2_xp ); 488 489 // check PTE2 attributes 490 assert( ((pte2_attr & TSAR_PTE_MAPPED) != 0), "unmapped PTE2\n"); 491 assert( ((pte2_attr & TSAR_PTE_LOCKED) != 0), "unlocked PTE2\n"); 492 493 // reset TSAR_PTE_LOCKED attribute 494 hal_remote_s32( pte2_xp , pte2_attr & ~TSAR_PTE_LOCKED ); 495 496 #if DEBUG_HAL_GPT_LOCK_PTE 497 cycle = (uint32_t)hal_get_cycles(); 498 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 563 // if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 564 if( (vpn == 0xc5fff) && (gpt_cxy == 0x1) ) 499 565 printk("\n[%s] thread[%x,%x] unlocks vpn %x in cluster %x / cycle %d\n", 500 566 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle ); … … 587 653 588 654 // PTE1 must be mapped because PTE2 must be locked 589 assert( (pte1 & TSAR_PTE_MAPPED), "PTE1 must be mapped\n" ); 655 assert( (pte1 & TSAR_PTE_MAPPED), 656 "PTE1 for vpn %x in cluster %x must be mapped / pte1 = %x\n", vpn, gpt_cxy, pte1 ); 590 657 591 658 // get PT2 base … … 601 668 602 669 // PTE2 must be locked 603 assert( (pte2_attr & TSAR_PTE_LOCKED), "PTE2 must be locked\n" ); 670 assert( (pte2_attr & TSAR_PTE_LOCKED), 671 "PTE2 for vpn %x in cluster %x must be locked / pte2_attr = %x\n", vpn, gpt_cxy, pte2_attr ); 604 672 605 673 // set PTE2 in PT2 (in this order) -
trunk/hal/tsar_mips32/core/hal_vmm.c
r637 r640 296 296 } 297 297 298 #if CONFIG_INSTRUMENTATION_GPT 299 uint32_t pte1_events = hal_remote_l32( XPTR( process_cxy , &vmm->gpt.pte1_wait_events ) ); 300 uint32_t pte1_iters = hal_remote_l32( XPTR( process_cxy , &vmm->gpt.pte1_wait_iters ) ); 301 uint32_t pte1_ratio = (pte1_events == 0 ) ? 0 : (pte1_iters / pte1_events); 302 nolock_printk("\nGPT_WAIT_PTE1 : %d events / %d iterations => %d iter/event\n", 303 pte1_events, pte1_iters, pte1_ratio ); 304 305 uint32_t pte2_events = hal_remote_l32( XPTR( process_cxy , &vmm->gpt.pte1_wait_events ) ); 306 uint32_t pte2_iters = hal_remote_l32( XPTR( process_cxy , &vmm->gpt.pte1_wait_iters ) ); 307 uint32_t pte2_ratio = (pte2_events == 0 ) ? 0 : (pte2_iters / pte2_events); 308 nolock_printk("GPT_WAIT_PTE2 : %d events / %d iterations => %d iter/event\n", 309 pte2_events, pte2_iters, pte2_ratio ); 310 #endif 311 298 312 // release locks 299 313 remote_busylock_release( txt_lock_xp ); … … 302 316 } // hal_vmm_display() 303 317 318 -
trunk/kernel/kern/kernel_init.c
r637 r640 163 163 "PROCESS_FDARRAY", // 27 164 164 "PROCESS_DIR", // 28 165 " unused_29",// 29165 "VMM_VSL", // 29 166 166 167 167 "PROCESS_THTBL", // 30 … … 170 170 "VFS_SIZE", // 32 171 171 "VFS_FILE", // 33 172 "VMM_VSL", // 34 173 "VFS_MAIN", // 35 174 "FATFS_FAT", // 36 172 "VFS_MAIN", // 34 173 "FATFS_FAT", // 35 175 174 }; 176 175 … … 1418 1417 #endif 1419 1418 1420 #if (DEBUG_KERNEL_INIT & 1)1421 if( (core_lid == 0) & (local_cxy == 0) )1422 sched_display( 0 );1423 #endif1424 1425 1419 if( (core_lid == 0) && (local_cxy == 0) ) 1426 1420 { … … 1445 1439 " - khm manager : %d bytes\n" 1446 1440 " - vmm manager : %d bytes\n" 1447 " - gpt root : %d bytes\n"1448 1441 " - vfs inode : %d bytes\n" 1449 1442 " - vfs dentry : %d bytes\n" … … 1473 1466 sizeof( khm_t ), 1474 1467 sizeof( vmm_t ), 1475 sizeof( gpt_t ),1476 1468 sizeof( vfs_inode_t ), 1477 1469 sizeof( vfs_dentry_t ), -
trunk/kernel/kern/rpc.c
r637 r640 73 73 &rpc_vfs_inode_load_all_pages_server, // 19 74 74 75 &rpc_ vmm_get_vseg_server,// 2076 &rpc_ vmm_global_update_pte_server,// 2175 &rpc_undefined, // 20 76 &rpc_undefined, // 21 77 77 &rpc_undefined, // 22 78 78 &rpc_undefined, // 23 79 79 &rpc_mapper_sync_server, // 24 80 &rpc_ undefined,// 2581 &rpc_vmm_ delete_vseg_server, // 2680 &rpc_vmm_resize_vseg_server, // 25 81 &rpc_vmm_remove_vseg_server, // 26 82 82 &rpc_vmm_create_vseg_server, // 27 83 83 &rpc_vmm_set_cow_server, // 28 … … 109 109 "VFS_INODE_LOAD_ALL_PAGES", // 19 110 110 111 " GET_VSEG",// 20112 " GLOBAL_UPDATE_PTE",// 21111 "VMM_GLOBAL_RESIZE_VSEG", // 20 112 "VMM_GLOBAL_UPDATE_PTE", // 21 113 113 "undefined_22", // 22 114 114 "undefined_23", // 23 115 115 "MAPPER_SYNC", // 24 116 116 "undefined_25", // 25 117 "VMM_ DELETE_VSEG", // 26117 "VMM_REMOVE_VSEG", // 26 118 118 "VMM_CREATE_VSEG", // 27 119 119 "VMM_SET_COW", // 28 … … 1072 1072 1073 1073 ///////////////////////////////////////////////////////////////////////////////////////// 1074 // [8] Marshaling functions attached to RPC_V RS_FS_UPDATE_DENTRY1074 // [8] Marshaling functions attached to RPC_VFS_FS_UPDATE_DENTRY 1075 1075 ///////////////////////////////////////////////////////////////////////////////////////// 1076 1076 … … 2059 2059 2060 2060 ///////////////////////////////////////////////////////////////////////////////////////// 2061 // [20] Marshaling functions attached to RPC_VMM_GET_VSEG 2062 ///////////////////////////////////////////////////////////////////////////////////////// 2063 2061 // [20] RPC_VMM_GET_VSEG deprecated [AG] sept 2019 2062 ///////////////////////////////////////////////////////////////////////////////////////// 2063 2064 /* 2064 2065 ////////////////////////////////////////////////// 2065 2066 void rpc_vmm_get_vseg_client( cxy_t cxy, … … 2144 2145 #endif 2145 2146 } 2146 2147 2148 ///////////////////////////////////////////////////////////////////////////////////////// 2149 // [21] Marshaling functions attached to RPC_VMM_GLOBAL_UPDATE_PTE 2150 ///////////////////////////////////////////////////////////////////////////////////////// 2151 2152 /////////////////////////////////////////////////////// 2153 void rpc_vmm_global_update_pte_client( cxy_t cxy, 2154 process_t * process, // in 2155 vpn_t vpn, // in 2156 uint32_t attr, // in 2157 ppn_t ppn ) // in 2158 { 2159 #if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE 2160 thread_t * this = CURRENT_THREAD; 2161 uint32_t cycle = (uint32_t)hal_get_cycles(); 2162 if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE ) 2163 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2164 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2165 #endif 2166 2167 uint32_t responses = 1; 2168 2169 // initialise RPC descriptor header 2170 rpc_desc_t rpc; 2171 rpc.index = RPC_VMM_GLOBAL_UPDATE_PTE; 2172 rpc.blocking = true; 2173 rpc.rsp = &responses; 2174 2175 // set input arguments in RPC descriptor 2176 rpc.args[0] = (uint64_t)(intptr_t)process; 2177 rpc.args[1] = (uint64_t)vpn; 2178 rpc.args[2] = (uint64_t)attr; 2179 rpc.args[3] = (uint64_t)ppn; 2180 2181 // register RPC request in remote RPC fifo 2182 rpc_send( cxy , &rpc ); 2183 2184 #if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE 2185 cycle = (uint32_t)hal_get_cycles(); 2186 if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE ) 2187 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2188 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2189 #endif 2190 } 2191 2192 ////////////////////////////////////////////////// 2193 void rpc_vmm_global_update_pte_server( xptr_t xp ) 2194 { 2195 #if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE 2196 thread_t * this = CURRENT_THREAD; 2197 uint32_t cycle = (uint32_t)hal_get_cycles(); 2198 if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE ) 2199 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2200 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2201 #endif 2202 2203 process_t * process; 2204 vpn_t vpn; 2205 uint32_t attr; 2206 ppn_t ppn; 2207 2208 // get client cluster identifier and pointer on RPC descriptor 2209 cxy_t client_cxy = GET_CXY( xp ); 2210 rpc_desc_t * desc = GET_PTR( xp ); 2211 2212 // get input argument "process" & "vpn" from client RPC descriptor 2213 process = (process_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2214 vpn = (vpn_t) hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 2215 attr = (uint32_t) hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); 2216 ppn = (ppn_t) hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) ); 2217 2218 // call local kernel function 2219 vmm_global_update_pte( process , vpn , attr , ppn ); 2220 2221 #if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE 2222 cycle = (uint32_t)hal_get_cycles(); 2223 if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE ) 2224 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2225 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2226 #endif 2227 } 2228 2229 ///////////////////////////////////////////////////////////////////////////////////////// 2230 // [22] Marshaling functions attached to RPC_KCM_ALLOC 2147 */ 2148 2149 ///////////////////////////////////////////////////////////////////////////////////////// 2150 // [21] undefined 2151 ///////////////////////////////////////////////////////////////////////////////////////// 2152 2153 ///////////////////////////////////////////////////////////////////////////////////////// 2154 // [22] RPC_KCM_ALLOC deprecated [AG] sept 2019 2231 2155 ///////////////////////////////////////////////////////////////////////////////////////// 2232 2156 … … 2308 2232 2309 2233 ///////////////////////////////////////////////////////////////////////////////////////// 2310 // [23] Marshaling functions attached to RPC_KCM_FREE2234 // [23] RPC_KCM_FREE deprecated [AG] sept 2019 2311 2235 ///////////////////////////////////////////////////////////////////////////////////////// 2312 2236 … … 2460 2384 2461 2385 ///////////////////////////////////////////////////////////////////////////////////////// 2462 // [25] Marshaling functions attached to RPC_MAPPER_HANDLE_MISS 2463 ///////////////////////////////////////////////////////////////////////////////////////// 2464 2465 /* 2386 // [25] Marshaling functions attached to RPC_VMM_RESIZE_VSEG 2387 ///////////////////////////////////////////////////////////////////////////////////////// 2388 2466 2389 ////////////////////////////////////////////////////////// 2467 void rpc_ mapper_handle_miss_client( cxy_t cxy,2468 struct mapper_s * mapper,2469 uint32_t page_id,2470 xptr_t * page_xp,2471 error_t * error)2472 { 2473 #if DEBUG_RPC_ MAPPER_HANDLE_MISS2474 thread_t * this = CURRENT_THREAD; 2475 uint32_t cycle = (uint32_t)hal_get_cycles(); 2476 if( cycle > DEBUG_RPC_ MAPPER_HANDLE_MISS)2390 void rpc_vmm_resize_vseg_client( cxy_t cxy, 2391 struct process_s * process, 2392 struct vseg_s * vseg, 2393 intptr_t new_base, 2394 intptr_t new_size ) 2395 { 2396 #if DEBUG_RPC_VMM_RESIZE_VSEG 2397 thread_t * this = CURRENT_THREAD; 2398 uint32_t cycle = (uint32_t)hal_get_cycles(); 2399 if( cycle > DEBUG_RPC_VMM_RESIZE_VSEG ) 2477 2400 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2478 2401 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); … … 2483 2406 // initialise RPC descriptor header 2484 2407 rpc_desc_t rpc; 2485 rpc.index = RPC_ MAPPER_HANDLE_MISS;2408 rpc.index = RPC_VMM_RESIZE_VSEG; 2486 2409 rpc.blocking = true; 2487 2410 rpc.rsp = &responses; 2488 2411 2489 2412 // set input arguments in RPC descriptor 2490 rpc.args[0] = (uint64_t)(intptr_t)mapper; 2491 rpc.args[1] = (uint64_t)page_id; 2413 rpc.args[0] = (uint64_t)(intptr_t)process; 2414 rpc.args[1] = (uint64_t)(intptr_t)vseg; 2415 rpc.args[2] = (uint64_t)new_base; 2416 rpc.args[3] = (uint64_t)new_size; 2492 2417 2493 2418 // register RPC request in remote RPC fifo 2494 2419 rpc_send( cxy , &rpc ); 2495 2420 2496 // get output values from RPC descriptor 2497 *page_xp = (xptr_t)rpc.args[2]; 2498 *error = (error_t)rpc.args[3]; 2499 2500 #if DEBUG_RPC_MAPPER_HANDLE_MISS 2501 cycle = (uint32_t)hal_get_cycles(); 2502 if( cycle > DEBUG_RPC_MAPPER_HANDLE_MISS ) 2503 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2504 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2505 #endif 2506 } 2507 2508 /////////////////////////////////////////////// 2509 void rpc_mapper_handle_miss_server( xptr_t xp ) 2510 { 2511 #if DEBUG_RPC_MAPPER_HANDLE_MISS 2512 thread_t * this = CURRENT_THREAD; 2513 uint32_t cycle = (uint32_t)hal_get_cycles(); 2514 if( cycle > DEBUG_RPC_MAPPER_HANDLE_MISS ) 2515 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2516 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2517 #endif 2518 2519 mapper_t * mapper; 2520 uint32_t page_id; 2521 xptr_t page_xp; 2522 error_t error; 2421 #if DEBUG_RPC_VMM_RESIZE_VSEG 2422 cycle = (uint32_t)hal_get_cycles(); 2423 if( cycle > DEBUG_RPC_VMM_RESIZE_VSEG ) 2424 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2425 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2426 #endif 2427 } 2428 2429 //////////////////////////////////////////// 2430 void rpc_vmm_resize_vseg_server( xptr_t xp ) 2431 { 2432 #if DEBUG_RPC_VMM_RESIZE_VSEG 2433 thread_t * this = CURRENT_THREAD; 2434 uint32_t cycle = (uint32_t)hal_get_cycles(); 2435 if( cycle > DEBUG_RPC_VMM_RESIZE_VSEG ) 2436 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2437 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2438 #endif 2439 2440 process_t * process; 2441 vseg_t * vseg; 2442 intptr_t new_base; 2443 intptr_t new_size; 2523 2444 2524 2445 // get client cluster identifier and pointer on RPC descriptor … … 2527 2448 2528 2449 // get arguments from client RPC descriptor 2529 mapper = (mapper_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2530 page_id = hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 2531 2532 // call local kernel function 2533 error = mapper_handle_miss( mapper, 2534 page_id, 2535 &page_xp ); 2536 2537 // set output argument to client RPC descriptor 2538 hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)page_xp ); 2539 hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error ); 2540 2541 #if DEBUG_RPC_MAPPER_HANDLE_MISS 2542 cycle = (uint32_t)hal_get_cycles(); 2543 if( cycle > DEBUG_RPC_MAPPER_HANDLE_MISS ) 2544 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2545 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2546 #endif 2547 } 2548 */ 2549 2550 ///////////////////////////////////////////////////////////////////////////////////////// 2551 // [26] Marshaling functions attached to RPC_VMM_DELETE_VSEG 2552 ///////////////////////////////////////////////////////////////////////////////////////// 2553 2554 ////////////////////////////////////////////////// 2555 void rpc_vmm_delete_vseg_client( cxy_t cxy, 2556 pid_t pid, 2557 intptr_t vaddr ) 2558 { 2559 #if DEBUG_RPC_VMM_DELETE_VSEG 2450 process = (process_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2451 vseg = (vseg_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 2452 new_base = (intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) ); 2453 new_size = (intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) ); 2454 2455 // call relevant kernel function 2456 vmm_resize_vseg( process, 2457 vseg, 2458 new_base, 2459 new_size ); 2460 2461 #if DEBUG_RPC_VMM_RESIZE_VSEG 2462 cycle = (uint32_t)hal_get_cycles(); 2463 if( cycle > DEBUG_RPC_VMM_RESIZE_VSEG ) 2464 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2465 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2466 #endif 2467 } 2468 2469 2470 ///////////////////////////////////////////////////////////////////////////////////////// 2471 // [26] Marshaling functions attached to RPC_VMM_REMOVE_VSEG 2472 ///////////////////////////////////////////////////////////////////////////////////////// 2473 2474 ///////////////////////////////////////////////// 2475 void rpc_vmm_remove_vseg_client( cxy_t cxy, 2476 process_t * process, 2477 vseg_t * vseg ) 2478 { 2479 #if DEBUG_RPC_VMM_REMOVE_VSEG 2560 2480 thread_t * this = CURRENT_THREAD; 2561 2481 uint32_t cycle = (uint32_t)hal_get_cycles(); 2562 if( cycle > DEBUG_RPC_VMM_ DELETE_VSEG )2482 if( cycle > DEBUG_RPC_VMM_REMOVE_VSEG ) 2563 2483 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2564 2484 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); … … 2569 2489 2570 2490 // initialise RPC descriptor header 2571 rpc.index = RPC_VMM_ DELETE_VSEG;2491 rpc.index = RPC_VMM_REMOVE_VSEG; 2572 2492 rpc.blocking = true; 2573 2493 rpc.rsp = &responses; 2574 2494 2575 2495 // set input arguments in RPC descriptor 2576 rpc.args[0] = (uint64_t) pid;2577 rpc.args[1] = (uint64_t) vaddr;2496 rpc.args[0] = (uint64_t)(intptr_t)process; 2497 rpc.args[1] = (uint64_t)(intptr_t)vseg; 2578 2498 2579 2499 // register RPC request in remote RPC fifo 2580 2500 rpc_send( cxy , &rpc ); 2581 2501 2582 #if DEBUG_RPC_VMM_ DELETE_VSEG2583 cycle = (uint32_t)hal_get_cycles(); 2584 if( cycle > DEBUG_RPC_VMM_ DELETE_VSEG )2502 #if DEBUG_RPC_VMM_REMOVE_VSEG 2503 cycle = (uint32_t)hal_get_cycles(); 2504 if( cycle > DEBUG_RPC_VMM_REMOVE_VSEG ) 2585 2505 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2586 2506 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); … … 2589 2509 2590 2510 //////////////////////////////////////////// 2591 void rpc_vmm_delete_vseg_server( xptr_t xp ) 2592 { 2593 #if DEBUG_RPC_VMM_DELETE_VSEG 2594 uint32_t cycle = (uint32_t)hal_get_cycles(); 2595 thread_t * this = CURRENT_THREAD; 2596 if( DEBUG_RPC_VMM_DELETE_VSEG < cycle ) 2597 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2598 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 2599 #endif 2511 void rpc_vmm_remove_vseg_server( xptr_t xp ) 2512 { 2513 #if DEBUG_RPC_VMM_REMOVE_VSEG 2514 uint32_t cycle = (uint32_t)hal_get_cycles(); 2515 thread_t * this = CURRENT_THREAD; 2516 if( DEBUG_RPC_VMM_REMOVE_VSEG < cycle ) 2517 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2518 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 2519 #endif 2520 2521 process_t * process; 2522 vseg_t * vseg; 2600 2523 2601 2524 // get client cluster identifier and pointer on RPC descriptor … … 2604 2527 2605 2528 // get arguments from RPC descriptor 2606 p id_t pid = (pid_t) hal_remote_l64( XPTR(client_cxy , &desc->args[0]) );2607 intptr_t vaddr = (intptr_t)hal_remote_l64( XPTR(client_cxy , &desc->args[1]) );2529 process = (process_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) ); 2530 vseg = (vseg_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) ); 2608 2531 2609 2532 // call relevant kernel function 2610 vmm_delete_vseg( pid , vaddr ); 2611 2612 #if DEBUG_RPC_VMM_DELETE_VSEG 2613 cycle = (uint32_t)hal_get_cycles(); 2614 if( DEBUG_RPC_VMM_DELETE_VSEG < cycle ) 2533 vmm_remove_vseg( process, 2534 vseg ); 2535 2536 #if DEBUG_RPC_VMM_REMOVE_VSEG 2537 cycle = (uint32_t)hal_get_cycles(); 2538 if( DEBUG_RPC_VMM_REMOVE_VSEG < cycle ) 2615 2539 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2616 2540 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); -
trunk/kernel/kern/rpc.h
r635 r640 60 60 typedef enum 61 61 { 62 RPC_UNDEFINED_0 = 0, // RPC_PMEM_GET_PAGES deprecated [AG]63 RPC_UNDEFINED_1 = 1, // RPC_PMEM_RELEASE_PAGES deprecated [AG]64 RPC_UNDEFINED_2 = 2, // RPC_PMEM_DISPLAY deprecated [AG]62 RPC_UNDEFINED_0 = 0, // 63 RPC_UNDEFINED_1 = 1, // 64 RPC_UNDEFINED_2 = 2, // 65 65 RPC_PROCESS_MAKE_FORK = 3, 66 66 RPC_USER_DIR_CREATE = 4, … … 82 82 RPC_VFS_INODE_LOAD_ALL_PAGES = 19, 83 83 84 RPC_ VMM_GET_VSEG = 20,85 RPC_ VMM_GLOBAL_UPDATE_PTE = 21,86 RPC_UNDEFINED_22 = 22, // RPC_KCM_ALLOC deprecated [AG]87 RPC_UNDEFINED_23 = 23, // RPC_KCM_FREE deprecated [AG]84 RPC_UNDEFINED_20 = 20, // 85 RPC_UNDEFINED_21 = 21, // 86 RPC_UNDEFINED_22 = 22, // 87 RPC_UNDEFINED_23 = 23, // 88 88 RPC_MAPPER_SYNC = 24, 89 RPC_ UNDEFUNED_25 = 25, // RPC_MAPPER_HANDLE_MISS deprecated [AG]90 RPC_VMM_ DELETE_VSEG = 26,89 RPC_VMM_RESIZE_VSEG = 25, 90 RPC_VMM_REMOVE_VSEG = 26, 91 91 RPC_VMM_CREATE_VSEG = 27, 92 92 RPC_VMM_SET_COW = 28, 93 RPC_UNDEFINED_29 = 29, // RPC_VMM_DISPLAY deprecated [AG]93 RPC_UNDEFINED_29 = 29, // 94 94 95 95 RPC_MAX_INDEX = 30, … … 175 175 176 176 /*********************************************************************************** 177 * [0] The RPC_PMEM_GET_PAGES allocates one or several pages in a remote cluster, 178 * and returns the local pointer on the page descriptor. 179 * deprecated [AG] may 2019 180 *********************************************************************************** 181 * @ cxy : server cluster identifier 182 * @ order : [in] ln2( number of requested pages ) 183 * @ page : [out] local pointer on page descriptor / NULL if failure 184 **********************************************************************************/ 185 186 /* 187 void rpc_pmem_get_pages_client( cxy_t cxy, 188 uint32_t order, 189 struct page_s ** page ); 190 191 void rpc_pmem_get_pages_server( xptr_t xp ); 192 */ 193 194 /*********************************************************************************** 195 * [1] The RPC_PMEM_RELEASE_PAGES release one or several pages to a remote cluster. 196 * deprecated [AG] may 2019 197 *********************************************************************************** 198 * @ cxy : server cluster identifier 199 * @ page : [in] local pointer on page descriptor to release. 200 **********************************************************************************/ 201 202 /* 203 void rpc_pmem_release_pages_client( cxy_t cxy, 204 struct page_s * page ); 205 206 void rpc_pmem_release_pages_server( xptr_t xp ); 207 */ 208 209 /*********************************************************************************** 210 * [2] The RPC_PPM_DISPLAY allows any client thread to require any remote cluster 211 * identified by the <cxy> argumentto display the physical memory allocator state. 212 * deprecated [AG] may 2019 213 **********************************************************************************/ 214 215 /* 216 void rpc_ppm_display_client( cxy_t cxy ); 217 218 void rpc_ppm_display_server( xptr_t xp ); 219 */ 177 * [0] undefined 178 **********************************************************************************/ 179 180 /*********************************************************************************** 181 * [1] undefined 182 **********************************************************************************/ 183 184 /*********************************************************************************** 185 * [2] undefined 186 **********************************************************************************/ 220 187 221 188 /*********************************************************************************** … … 523 490 524 491 /*********************************************************************************** 525 * [20] The RPC_VMM_GET_VSEG returns an extended pointer 526 * on the vseg containing a given virtual address in a given process. 527 * The server cluster is supposed to be the reference cluster. 528 * It returns a non zero error value if no vseg has been founded. 529 *********************************************************************************** 530 * @ cxy : server cluster identifier. 531 * @ process : [in] pointer on process descriptor in server cluster. 532 * @ vaddr : [in] virtual address to be searched. 533 * @ vseg_xp : [out] buffer for extended pointer on vseg in client cluster. 534 * @ error : [out] local pointer on buffer for error code (in client cluster). 535 **********************************************************************************/ 536 void rpc_vmm_get_vseg_client( cxy_t cxy, 537 struct process_s * process, 538 intptr_t vaddr, 539 xptr_t * vseg_xp, 540 error_t * error ); 541 542 void rpc_vmm_get_vseg_server( xptr_t xp ); 543 544 /*********************************************************************************** 545 * [21] The RPC_VMM_GLOBAL_UPDATE_PTE can be used by a thread that is not running 546 * in reference cluster, to ask the reference cluster to update a specific entry, 547 * identified by the <vpn> argument in all GPT copies of a process identified by 548 * the <process> argument, using the values defined by <attr> and <ppn> arguments. 549 * The server cluster is supposed to be the reference cluster. 550 * It does not return any error code as the called function vmm_global_update_pte() 551 * cannot fail. 552 *********************************************************************************** 553 * @ cxy : server cluster identifier. 554 * @ process : [in] pointer on process descriptor in server cluster. 555 * @ vpn : [in] virtual address to be searched. 556 * @ attr : [in] PTE attributes. 557 * @ ppn : [it] PTE PPN. 558 **********************************************************************************/ 559 void rpc_vmm_global_update_pte_client( cxy_t cxy, 560 struct process_s * process, 561 vpn_t vpn, 562 uint32_t attr, 563 ppn_t ppn ); 564 565 void rpc_vmm_global_update_pte_server( xptr_t xp ); 566 567 /*********************************************************************************** 568 * [22] The RPC_KCM_ALLOC allocates memory from a given KCM in a remote cluster, 569 * and returns an extended pointer on the allocated object. 570 It returns XPTR_NULL if physical memory cannot be allocated. 571 *********************************************************************************** 572 * @ cxy : server cluster identifier. 573 * @ kmem_type : [in] KCM object type (as defined in kmem.h). 574 * @ buf_xp : [out] buffer for extended pointer on allocated buffer. 575 **********************************************************************************/ 576 577 /* 578 void rpc_kcm_alloc_client( cxy_t cxy, 579 uint32_t kmem_type, 580 xptr_t * buf_xp ); 581 582 void rpc_kcm_alloc_server( xptr_t xp ); 583 */ 584 585 /*********************************************************************************** 586 * [23] The RPC_KCM_FREE releases memory allocated for a KCM object of a given type, 587 * in a remote cluster. 588 *********************************************************************************** 589 * @ cxy : server cluster identifier. 590 * @ buf : [in] local pointer on allocated buffer. 591 * @ kmem_type : [in] KCM object type (as defined in kmem.h). 592 **********************************************************************************/ 593 594 /* 595 void rpc_kcm_free_client( cxy_t cxy, 596 void * buf, 597 uint32_t kmem_type ); 598 599 void rpc_kcm_free_server( xptr_t xp ); 600 */ 492 * [20] undefined 493 **********************************************************************************/ 494 495 /*********************************************************************************** 496 * [21] undefined 497 **********************************************************************************/ 498 499 /*********************************************************************************** 500 * [22] undefined 501 **********************************************************************************/ 502 503 /*********************************************************************************** 504 * [23] undefined 505 **********************************************************************************/ 601 506 602 507 /*********************************************************************************** … … 615 520 616 521 /*********************************************************************************** 617 * [25] The RPC__MAPPER_HANDLE_MISS allows a client thread to request a remote 618 * mapper to load a missing page from the IOC device. 619 * On the server side, this RPC call the mapper_handle_miss() function and return 620 * an extended pointer on the allocated page descriptor and an error status. 522 * [25] The RPC_VMM_RESIZE_VSEG allows a client thread to request a remote vseg 523 * resize. Both the VSL and the GPT are updated in the remote cluster. 621 524 *********************************************************************************** 622 525 * @ cxy : server cluster identifier. 623 * @ mapper : [in] local pointer on mapper. 624 * @ page_id : [in] missing page index in mapper 625 * @ buffer : [in] user space pointer / kernel extended pointer 626 * @ page_xp : [out] pointer on buffer for extended pointer on page descriptor. 627 * @ error : [out] error status (0 if success). 628 **********************************************************************************/ 629 /* 630 void rpc_mapper_handle_miss_client( cxy_t cxy, 631 struct mapper_s * mapper, 632 uint32_t page_id, 633 xptr_t * page_xp, 634 error_t * error ); 526 * @ process : [in] local pointer on remote process. 527 * @ vseg : [in] local pointer on remote vseg. 528 * @ new_base : [in] new vseg base address. 529 * @ new_size : [in] new vseg size. 530 **********************************************************************************/ 531 void rpc_vmm_resize_vseg_client( cxy_t cxy, 532 struct process_s * process, 533 struct vseg_s * vseg, 534 intptr_t new_base, 535 intptr_t new_size ); 635 536 636 void rpc_mapper_handle_miss_server( xptr_t xp ); 637 */ 638 /*********************************************************************************** 639 * [26] The RPC_VMM_DELETE_VSEG allows any client thread to request a remote 640 * cluster to delete from a given VMM, identified by the <pid> argument 641 * a given vseg, identified by the <vaddr> argument. 537 void rpc_vmm_resize_vseg_server( xptr_t xp ); 538 539 /*********************************************************************************** 540 * [26] The RPC_VMM_REMOVE_VSEG allows a client thread to request a remote vseg 541 * delete. Bothe the VSL and the GPT are updated in the remote cluster. 642 542 *********************************************************************************** 643 543 * @ cxy : server cluster identifier. 644 * @ p id : [in] target process identifier.645 * @ v addr : [in] vseg base address.646 **********************************************************************************/ 647 void rpc_vmm_ delete_vseg_client( cxy_tcxy,648 pid_t pid,649 intptr_t vaddr);544 * @ process : [in] local pointer on remote process. 545 * @ vseg : [in] local pointer on remote vseg. 546 **********************************************************************************/ 547 void rpc_vmm_remove_vseg_client( cxy_t cxy, 548 struct process_s * process, 549 struct vseg_s * vseg ); 650 550 651 void rpc_vmm_ delete_vseg_server( xptr_t xp );551 void rpc_vmm_remove_vseg_server( xptr_t xp ); 652 552 653 553 /*********************************************************************************** … … 698 598 699 599 /*********************************************************************************** 700 * [29] The RPC_VMM_DISPLAY allows any client thread to display the VMM state 701 * of a remote reference process identified by the <cxy> and <process> arguments. 702 * The type of display is defined by the <detailed> boolean argument. 703 *********************************************************************************** 704 * @ cxy : server cluster identifier. 705 * @ process : [in] local pointer on reference process descriptor. 706 * @ detailed : [in] detailed display if true. 707 **********************************************************************************/ 708 709 /* 710 void rpc_hal_vmm_display_client( cxy_t cxy, 711 struct process_s * process, 712 bool_t detailed ); 713 714 void rpc_hal_vmm_display_server( xptr_t xp ); 715 */ 600 * [29] undefined 601 **********************************************************************************/ 602 716 603 717 604 #endif -
trunk/kernel/kern/scheduler.c
r635 r640 507 507 #if (DEBUG_SCHED_YIELD & 0x1) 508 508 if( sched->trace || (cycle > DEBUG_SCHED_YIELD) ) 509 sched_ display(lid );509 sched_remote_display( local_cxy , lid ); 510 510 #endif 511 511 … … 593 593 } // end sched_yield() 594 594 595 596 ///////////////////////////////597 void sched_display( lid_t lid )598 {599 list_entry_t * iter;600 thread_t * thread;601 602 core_t * core = &LOCAL_CLUSTER->core_tbl[lid];603 scheduler_t * sched = &core->scheduler;604 605 // get pointers on TXT0 chdev606 xptr_t txt0_xp = chdev_dir.txt_tx[0];607 cxy_t txt0_cxy = GET_CXY( txt0_xp );608 chdev_t * txt0_ptr = GET_PTR( txt0_xp );609 610 // get extended pointer on remote TXT0 lock611 xptr_t lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );612 613 // get TXT0 lock614 remote_busylock_acquire( lock_xp );615 616 nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n",617 local_cxy , lid, sched->current, LOCAL_CLUSTER->rpc_threads[lid],618 (uint32_t)hal_get_cycles() );619 620 // display kernel threads621 LIST_FOREACH( &sched->k_root , iter )622 {623 thread = LIST_ELEMENT( iter , thread_t , sched_list );624 if (thread->type == THREAD_DEV)625 {626 nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X / %s\n",627 thread_type_str( thread->type ), thread->process->pid, thread->trdid,628 thread, thread->blocked, thread->flags, thread->chdev->name );629 }630 else631 {632 nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",633 thread_type_str( thread->type ), thread->process->pid, thread->trdid,634 thread, thread->blocked, thread->flags );635 }636 }637 638 // display user threads639 LIST_FOREACH( &sched->u_root , iter )640 {641 thread = LIST_ELEMENT( iter , thread_t , sched_list );642 nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",643 thread_type_str( thread->type ), thread->process->pid, thread->trdid,644 thread, thread->blocked, thread->flags );645 }646 647 // release TXT0 lock648 remote_busylock_release( lock_xp );649 650 } // end sched_display()651 595 652 596 ///////////////////////////////////// … … 684 628 nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n", 685 629 cxy , lid, current, rpcs, (uint32_t)hal_get_cycles() ); 630 nolock_printk(" type | pid | trdid | desc | block | flags | func\n"); 686 631 687 632 // display kernel threads … … 706 651 hal_remote_strcpy( XPTR( local_cxy , name ), XPTR( cxy , chdev->name ) ); 707 652 708 nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X /%s\n",653 nolock_printk(" - %s | %X | %X | %X | %X | %X | %s\n", 709 654 thread_type_str( type ), pid, trdid, thread, blocked, flags, name ); 710 655 } 711 656 else 712 657 { 713 nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",658 nolock_printk(" - %s | %X | %X | %X | %X | %X |\n", 714 659 thread_type_str( type ), pid, trdid, thread, blocked, flags ); 715 660 } … … 732 677 process_t * process = hal_remote_lpt ( XPTR( cxy , &thread->process ) ); 733 678 pid_t pid = hal_remote_l32 ( XPTR( cxy , &process->pid ) ); 734 735 nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n", 736 thread_type_str( type ), pid, trdid, thread, blocked, flags ); 679 void * func = hal_remote_lpt ( XPTR( cxy , &thread->entry_func ) ); 680 681 nolock_printk(" - %s | %X | %X | %X | %X | %X | %x\n", 682 thread_type_str( type ), pid, trdid, thread, blocked, flags, (uint32_t)func ); 737 683 738 684 // get next user thread list_entry -
trunk/kernel/kern/scheduler.h
r637 r640 89 89 90 90 /********************************************************************************************* 91 * This debug function displays on TXT0 the internal state of a local scheduler,92 * identified by the core local index <lid>. It must be called by a local thread.93 *********************************************************************************************94 * @ lid : local index of target core.95 ********************************************************************************************/96 void sched_display( lid_t lid );97 98 /*********************************************************************************************99 91 * This debug function displays on TXT0 the internal state of a scheduler, 100 92 * identified by the target cluster identifier <cxy> and the core local index <lid>. -
trunk/kernel/kern/thread.c
r637 r640 890 890 core_t * core = thread->core; 891 891 892 #if DEBUG_THREAD_DESTROY 893 uint32_t cycle = (uint32_t)hal_get_cycles(); 892 893 #if DEBUG_THREAD_DESTROY || CONFIG_INSTRUMENTATION_PGFAULTS 894 uint32_t cycle; 894 895 thread_t * this = CURRENT_THREAD; 896 #endif 897 898 #if (DEBUG_THREAD_DESTROY & 1) 899 cycle = (uint32_t)hal_get_cycles(); 895 900 if( DEBUG_THREAD_DESTROY < cycle ) 896 901 printk("\n[%s] thread[%x,%x] enter to destroy thread[%x,%x] / cycle %d\n", … … 902 907 903 908 #if CONFIG_INSTRUMENTATION_PGFAULTS 904 process->vmm.false_pgfault_nr += thread->info.false_pgfault_nr; 905 process->vmm.local_pgfault_nr += thread->info.local_pgfault_nr; 906 process->vmm.global_pgfault_nr += thread->info.global_pgfault_nr; 907 process->vmm.false_pgfault_cost += thread->info.false_pgfault_cost; 908 process->vmm.local_pgfault_cost += thread->info.local_pgfault_cost; 909 process->vmm.global_pgfault_cost += thread->info.global_pgfault_cost; 909 process->vmm.false_pgfault_nr += thread->info.false_pgfault_nr; 910 process->vmm.local_pgfault_nr += thread->info.local_pgfault_nr; 911 process->vmm.global_pgfault_nr += thread->info.global_pgfault_nr; 912 process->vmm.false_pgfault_cost += thread->info.false_pgfault_cost; 913 process->vmm.local_pgfault_cost += thread->info.local_pgfault_cost; 914 process->vmm.global_pgfault_cost += thread->info.global_pgfault_cost; 915 #endif 916 917 #if (CONFIG_INSTRUMENTATION_PGFAULTS & 1) 918 uint32_t false_nr = thread->info.false_pgfault_nr; 919 uint32_t local_nr = thread->info.local_pgfault_nr; 920 uint32_t global_nr = thread->info.global_pgfault_nr; 921 uint32_t false_cost = thread->info.false_pgfault_cost; 922 uint32_t local_cost = thread->info.local_pgfault_cost; 923 uint32_t global_cost = thread->info.global_pgfault_cost; 924 printk("***** thread[%x,%x] page-faults\n" 925 " - false %d ( %d cycles )\n" 926 " - local %d ( %d cycles )\n" 927 " - global %d ( %d cycles )\n", 928 this->process->pid, this->trdid, 929 false_nr , false_cost / false_nr, 930 local_nr , local_cost / local_nr, 931 global_nr, global_cost / global_nr ); 910 932 #endif 911 933 … … 1270 1292 cycle = (uint32_t)hal_get_cycles(); 1271 1293 if( DEBUG_THREAD_IDLE < cycle ) 1272 sched_ display(CURRENT_THREAD->core->lid );1294 sched_remote_display( local_cxy , CURRENT_THREAD->core->lid ); 1273 1295 #endif 1274 1296 // search a runable thread -
trunk/kernel/kernel_config.h
r637 r640 96 96 #define DEBUG_HAL_GPT_CREATE 0 97 97 #define DEBUG_HAL_GPT_DESTROY 0 98 #define DEBUG_HAL_GPT_LOCK_PTE 098 #define DEBUG_HAL_GPT_LOCK_PTE 2 99 99 #define DEBUG_HAL_GPT_SET_COW 0 100 100 #define DEBUG_HAL_GPT_SET_PTE 0 … … 259 259 #define DEBUG_VMM_GET_ONE_PPN 0 260 260 #define DEBUG_VMM_GET_PTE 0 261 #define DEBUG_VMM_GLOBAL_DELETE_VSEG 0 262 #define DEBUG_VMM_GLOBAL_RESIZE_VSEG 0 261 263 #define DEBUG_VMM_HANDLE_PAGE_FAULT 0 262 264 #define DEBUG_VMM_HANDLE_COW 0 … … 309 311 #define LOCK_PROCESS_FDARRAY 27 // remote (Q) protect array of open files in owner process 310 312 #define LOCK_PROCESS_DIR 28 // remote (Q) protect xlist of open directories in process 313 #define LOCK_VMM_VSL 29 // remote (Q) protect VSL (local list of vsegs) 311 314 312 315 #define LOCK_PROCESS_THTBL 30 // local (RW) protect local array of threads in a process … … 315 318 #define LOCK_VFS_SIZE 32 // remote (RW) protect inode state and associated mapper 316 319 #define LOCK_VFS_FILE 33 // remote (RW) protect file descriptor state 317 #define LOCK_VMM_VSL 34 // remote (RW) protect VSL (local list of vsegs) 318 #define LOCK_VFS_MAIN 35 // remote (RW) protect vfs traversal (in root inode) 319 #define LOCK_FATFS_FAT 36 // remote (RW) protect exclusive access to the FATFS FAT 320 #define LOCK_VFS_MAIN 34 // remote (RW) protect vfs traversal (in root inode) 321 #define LOCK_FATFS_FAT 35 // remote (RW) protect exclusive access to the FATFS FAT 320 322 321 323 //////////////////////////////////////////////////////////////////////////////////////////// … … 462 464 #define CONFIG_INSTRUMENTATION_PGFAULTS 0 463 465 #define CONFIG_INSTRUMENTATION_FOOTPRINT 0 466 #define CONFIG_INSTRUMENTATION_GPT 1 464 467 465 468 -
trunk/kernel/libk/remote_barrier.h
r623 r640 56 56 * If the (x_size, y_size, nthreads) arguments are defined in the barrier attributes, 57 57 * the barrier is implemented as a hierarchical quad-tree covering all clusters in the 58 * (x_size * ysize) mesh, including cluster (0,0), with nthreads per cluster, and called 59 * DQT : Distributed Quad Tree. This DQT implementation supposes a regular architecture, 60 uint32_t arity = hal_remote_l32( XPTR( node_cxy , &node_ptr->arity )); 58 * (x_size * ysize) mesh, including cluster (0,0), with nthreads per cluster. 59 * This DQT (Distributed Quad Tree) implementation assumes a regular architecture, 61 60 * and a strong contraint on the threads placement: exactly "nthreads" threads per 62 61 * cluster in the (x_size * y_size) mesh. … … 77 76 * It is implemented in the reference process cluster, and contains 78 77 * - the barrier identifier, 79 * - the implementation type (simple or QDT),78 * - the implementation type (simple or dqt), 80 79 * - an xlist implementing the set of barriers dynamically created by a given process, 81 80 * - a pointer on the implementation specific descriptor (simple_barrier / sqt_barrier). -
trunk/kernel/libk/user_dir.c
r635 r640 294 294 295 295 // delete the vseg 296 if( ref_cxy == local_cxy) 297 vmm_delete_vseg( ref_pid, vpn_base << CONFIG_PPM_PAGE_SHIFT ); 298 else 299 rpc_vmm_delete_vseg_client( ref_cxy, ref_pid, vpn_base << CONFIG_PPM_PAGE_SHIFT ); 300 296 if( ref_cxy == local_cxy) vmm_remove_vseg( ref_ptr, vseg ); 297 else rpc_vmm_remove_vseg_client( ref_cxy, ref_ptr, vseg ); 298 301 299 // release the user_dir descriptor 302 300 req.type = KMEM_KCM; … … 459 457 rpc.rsp = &responses; 460 458 rpc.blocking = false; 461 rpc.index = RPC_VMM_ DELETE_VSEG;459 rpc.index = RPC_VMM_REMOVE_VSEG; 462 460 rpc.thread = this; 463 461 rpc.lid = this->core->lid; -
trunk/kernel/mm/vmm.c
r635 r640 32 32 #include <printk.h> 33 33 #include <memcpy.h> 34 #include <remote_rwlock.h>35 34 #include <remote_queuelock.h> 36 35 #include <list.h> … … 313 312 314 313 // initialize the lock protecting the VSL 315 remote_ rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );314 remote_queuelock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL ); 316 315 317 316 … … 425 424 426 425 // take the VSL lock 427 remote_ rwlock_wr_acquire( lock_xp );426 remote_queuelock_acquire( lock_xp ); 428 427 429 428 // scan the VSL to delete all non kernel vsegs … … 474 473 475 474 // release the VSL lock 476 remote_ rwlock_wr_release( lock_xp );475 remote_queuelock_release( lock_xp ); 477 476 478 477 // FIXME il faut gérer les process copies... … … 491 490 492 491 } // end vmm_user_reset() 492 493 ///////////////////////////////////////////////// 494 void vmm_global_delete_vseg( process_t * process, 495 intptr_t base ) 496 { 497 pid_t pid; 498 cxy_t owner_cxy; 499 lpid_t owner_lpid; 500 501 xlist_entry_t * process_root_ptr; 502 xptr_t process_root_xp; 503 xptr_t process_iter_xp; 504 505 xptr_t remote_process_xp; 506 cxy_t remote_process_cxy; 507 process_t * remote_process_ptr; 508 509 xptr_t vsl_root_xp; 510 xptr_t vsl_lock_xp; 511 xptr_t vsl_iter_xp; 512 513 #if DEBUG_VMM_GLOBAL_DELETE_VSEG 514 uint32_t cycle = (uint32_t)hal_get_cycles(); 515 thread_t * this = CURRENT_THREAD; 516 #endif 517 518 #if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1) 519 if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle ) 520 printk("\n[%s] thread[%x,%x] : process %x / base %x / cycle %d\n", 521 __FUNCTION__, this->process->pid, this->trdid, process->pid, base, cycle ); 522 #endif 523 524 // get owner process cluster and local index 525 pid = process->pid; 526 owner_cxy = CXY_FROM_PID( pid ); 527 owner_lpid = LPID_FROM_PID( pid ); 528 529 // get extended pointer on root of process copies xlist in owner cluster 530 process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; 531 process_root_xp = XPTR( owner_cxy , process_root_ptr ); 532 533 // loop on process copies 534 XLIST_FOREACH( process_root_xp , process_iter_xp ) 535 { 536 // get cluster and local pointer on remote process 537 remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); 538 remote_process_ptr = GET_PTR( remote_process_xp ); 539 remote_process_cxy = GET_CXY( remote_process_xp ); 540 541 // build extended pointers on remote VSL root and lock 542 vsl_root_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsegs_root ); 543 vsl_lock_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsl_lock ); 544 545 // get lock on remote VSL 546 remote_queuelock_acquire( vsl_lock_xp ); 547 548 // loop on vsegs in remote process VSL 549 XLIST_FOREACH( vsl_root_xp , vsl_iter_xp ) 550 { 551 // get pointers on current vseg 552 xptr_t vseg_xp = XLIST_ELEMENT( vsl_iter_xp , vseg_t , xlist ); 553 vseg_t * vseg_ptr = GET_PTR( vseg_xp ); 554 555 // get current vseg base address 556 intptr_t vseg_base = (intptr_t)hal_remote_lpt( XPTR( remote_process_cxy, 557 &vseg_ptr->min ) ); 558 559 if( vseg_base == base ) // found searched vseg 560 { 561 if( remote_process_cxy == local_cxy ) 562 { 563 vmm_remove_vseg( process, 564 vseg_ptr ); 565 } 566 else 567 { 568 rpc_vmm_remove_vseg_client( remote_process_cxy, 569 remote_process_ptr, 570 vseg_ptr ); 571 } 572 573 #if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1) 574 if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle ) 575 printk("\n[%s] thread[%x,%x] deleted vseg %x for process %x in cluster %x\n", 576 __FUNCTION__, this->process->pid, this->trdid, base, process->pid, remote_process_cxy ); 577 #endif 578 579 } 580 } // end of loop on vsegs 581 582 #if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1) 583 if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle ) 584 hal_vmm_display( remote_process_xp , false ); 585 #endif 586 587 // release lock on remote VSL 588 remote_queuelock_release( vsl_lock_xp ); 589 590 } // end of loop on process copies 591 592 #if DEBUG_VMM_GLOBAL_DELETE_VSEG 593 cycle = (uint32_t)hal_get_cycles(); 594 if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle ) 595 printk("\n[%s] thread[%x,%x] exit for process %x / base %x / cycle %d\n", 596 __FUNCTION__, this->process->pid, this->trdid, process->pid , base, cycle ); 597 #endif 598 599 } // end vmm_global_delete_vseg() 600 601 //////////////////////////////////////////////// 602 void vmm_global_resize_vseg( process_t * process, 603 intptr_t base, 604 intptr_t new_base, 605 intptr_t new_size ) 606 { 607 pid_t pid; 608 cxy_t owner_cxy; 609 lpid_t owner_lpid; 610 611 xlist_entry_t * process_root_ptr; 612 xptr_t process_root_xp; 613 xptr_t process_iter_xp; 614 615 xptr_t remote_process_xp; 616 cxy_t remote_process_cxy; 617 process_t * remote_process_ptr; 618 619 xptr_t vsl_root_xp; 620 xptr_t vsl_lock_xp; 621 xptr_t vsl_iter_xp; 622 623 #if DEBUG_VMM_GLOBAL_RESIZE_VSEG 624 uint32_t cycle = (uint32_t)hal_get_cycles(); 625 thread_t * this = CURRENT_THREAD; 626 #endif 627 628 #if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1) 629 if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle ) 630 printk("\n[%s] thread[%x,%x] : process %x / base %x / new_base %x / new_size %x / cycle %d\n", 631 __FUNCTION__, this->process->pid, this->trdid, process->pid, base, new_base, new_size, cycle ); 632 #endif 633 634 // get owner process cluster and local index 635 pid = process->pid; 636 owner_cxy = CXY_FROM_PID( pid ); 637 owner_lpid = LPID_FROM_PID( pid ); 638 639 // get extended pointer on root of process copies xlist in owner cluster 640 process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; 641 process_root_xp = XPTR( owner_cxy , process_root_ptr ); 642 643 // loop on process copies 644 XLIST_FOREACH( process_root_xp , process_iter_xp ) 645 { 646 // get cluster and local pointer on remote process 647 remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); 648 remote_process_ptr = GET_PTR( remote_process_xp ); 649 remote_process_cxy = GET_CXY( remote_process_xp ); 650 651 // build extended pointers on remote VSL root and lock 652 vsl_root_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsegs_root ); 653 vsl_lock_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsl_lock ); 654 655 // get lock on remote VSL 656 remote_queuelock_acquire( vsl_lock_xp ); 657 658 // loop on vsegs in remote process VSL 659 XLIST_FOREACH( vsl_root_xp , vsl_iter_xp ) 660 { 661 // get pointers on current vseg 662 xptr_t vseg_xp = XLIST_ELEMENT( vsl_iter_xp , vseg_t , xlist ); 663 vseg_t * vseg_ptr = GET_PTR( vseg_xp ); 664 665 // get current vseg base address 666 intptr_t vseg_base = (intptr_t)hal_remote_lpt( XPTR( remote_process_cxy, 667 &vseg_ptr->min ) ); 668 669 if( vseg_base == base ) // found searched vseg 670 { 671 if( remote_process_cxy == local_cxy ) 672 { 673 vmm_resize_vseg( remote_process_ptr, 674 vseg_ptr, 675 new_base, 676 new_size ); 677 } 678 else 679 { 680 rpc_vmm_resize_vseg_client( remote_process_cxy, 681 remote_process_ptr, 682 vseg_ptr, 683 new_base, 684 new_size ); 685 } 686 687 #if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1) 688 if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle ) 689 printk("\n[%s] thread[%x,%x] resized vseg %x for process %x in cluster %x\n", 690 __FUNCTION__, this->process->pid, this->trdid, base, process->pid, remote_process_cxy ); 691 #endif 692 693 } 694 } // end of loop on vsegs 695 696 #if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1) 697 if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle ) 698 hal_vmm_display( remote_process_xp , false ); 699 #endif 700 701 // release lock on remote VSL 702 remote_queuelock_release( vsl_lock_xp ); 703 } // end of loop on process copies 704 705 #if DEBUG_VMM_GLOBAL_RESIZE_VSEG 706 cycle = (uint32_t)hal_get_cycles(); 707 if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle ) 708 printk("\n[%s] thread[%x,%x] exit for process %x / base %x / cycle %d\n", 709 __FUNCTION__, this->process->pid, this->trdid, process->pid , base, cycle ); 710 #endif 711 712 } // end vmm_global_resize_vseg() 493 713 494 714 //////////////////////////////////////////////// … … 498 718 ppn_t ppn ) 499 719 { 720 pid_t pid; 721 cxy_t owner_cxy; 722 lpid_t owner_lpid; 723 500 724 xlist_entry_t * process_root_ptr; 501 725 xptr_t process_root_xp; … … 507 731 xptr_t remote_gpt_xp; 508 732 509 pid_t pid; 510 cxy_t owner_cxy; 511 lpid_t owner_lpid; 512 513 #if DEBUG_VMM_UPDATE_PTE 733 #if DEBUG_VMM_GLOBAL_UPDATE_PTE 514 734 uint32_t cycle = (uint32_t)hal_get_cycles(); 515 735 thread_t * this = CURRENT_THREAD; 516 if( DEBUG_VMM_UPDATE_PTE < cycle ) 736 #endif 737 738 739 #if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1) 740 if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle ) 517 741 printk("\n[%s] thread[%x,%x] enter for process %x / vpn %x / attr %x / ppn %x / ycle %d\n", 518 742 __FUNCTION__, this->process->pid, this->trdid, process->pid, vpn, attr, ppn, cycle ); 519 743 #endif 520 744 521 // get extended pointer on root of process copies xlist in owner cluster745 // get owner process cluster and local index 522 746 pid = process->pid; 523 747 owner_cxy = CXY_FROM_PID( pid ); 524 748 owner_lpid = LPID_FROM_PID( pid ); 749 750 // get extended pointer on root of process copies xlist in owner cluster 525 751 process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; 526 752 process_root_xp = XPTR( owner_cxy , process_root_ptr ); 527 753 528 // check local cluster is owner cluster 529 assert( (owner_cxy == local_cxy) , "must be called in owner cluster\n"); 530 531 // loop on destination process copies 754 // loop on process copies 532 755 XLIST_FOREACH( process_root_xp , process_iter_xp ) 533 756 { … … 537 760 remote_process_cxy = GET_CXY( remote_process_xp ); 538 761 539 #if (DEBUG_VMM_ UPDATE_PTE & 1)540 if( DEBUG_VMM_ UPDATE_PTE < cycle )762 #if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1) 763 if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle ) 541 764 printk("\n[%s] thread[%x,%x] handling vpn %x for process %x in cluster %x\n", 542 765 __FUNCTION__, this->process->pid, this->trdid, vpn, process->pid, remote_process_cxy ); … … 550 773 } 551 774 552 #if DEBUG_VMM_ UPDATE_PTE775 #if DEBUG_VMM_GLOBAL_UPDATE_PTE 553 776 cycle = (uint32_t)hal_get_cycles(); 554 if( DEBUG_VMM_ UPDATE_PTE < cycle )777 if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle ) 555 778 printk("\n[%s] thread[%x,%x] exit for process %x / vpn %x / cycle %d\n", 556 779 __FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle ); 557 780 #endif 558 781 559 #if (DEBUG_VMM_ UPDATE_PTE & 1)782 #if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1) 560 783 hal_vmm_display( process , true ); 561 784 #endif … … 772 995 parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsl_lock ); 773 996 774 // take the lock protecting the parent VSL in read mode775 remote_ rwlock_rd_acquire( parent_lock_xp );997 // take the lock protecting the parent VSL 998 remote_queuelock_acquire( parent_lock_xp ); 776 999 777 1000 // loop on parent VSL xlist … … 809 1032 vseg_init_from_ref( child_vseg , parent_vseg_xp ); 810 1033 811 // build extended pointer on VSL lock812 xptr_t lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock );1034 // build extended pointer on child VSL lock 1035 xptr_t child_lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock ); 813 1036 814 // take the VSL lock in write mode815 remote_ rwlock_wr_acquire(lock_xp );1037 // take the child VSL lock 1038 remote_queuelock_acquire( child_lock_xp ); 816 1039 817 1040 // register child vseg in child VSL 818 1041 vmm_attach_vseg_to_vsl( child_vmm , child_vseg ); 819 1042 820 // release the VSL lock821 remote_ rwlock_wr_release(lock_xp );1043 // release the child VSL lock 1044 remote_queuelock_release( child_lock_xp ); 822 1045 823 1046 #if DEBUG_VMM_FORK_COPY … … 866 1089 867 1090 // release the parent VSL lock in read mode 868 remote_ rwlock_rd_release( parent_lock_xp );1091 remote_queuelock_release( parent_lock_xp ); 869 1092 870 1093 // initialize the child VMM STACK allocator … … 939 1162 940 1163 // take the VSL lock 941 remote_ rwlock_wr_acquire( vsl_lock_xp );1164 remote_queuelock_acquire( vsl_lock_xp ); 942 1165 943 1166 // scan the VSL to delete all registered vsegs … … 968 1191 969 1192 // release the VSL lock 970 remote_ rwlock_wr_release( vsl_lock_xp );1193 remote_queuelock_release( vsl_lock_xp ); 971 1194 972 1195 // remove all registered MMAP vsegs … … 1042 1265 1043 1266 } // end vmm_check_conflict() 1044 1045 1046 1267 1047 1268 //////////////////////////////////////////////// … … 1060 1281 error_t error; 1061 1282 1283 #if DEBUG_VMM_CREATE_VSEG 1284 thread_t * this = CURRENT_THREAD; 1285 uint32_t cycle; 1286 #endif 1287 1062 1288 #if (DEBUG_VMM_CREATE_VSEG & 1) 1063 thread_t * this = CURRENT_THREAD; 1064 uint32_t cycle = (uint32_t)hal_get_cycles(); 1289 cycle = (uint32_t)hal_get_cycles(); 1065 1290 if( DEBUG_VMM_CREATE_VSEG < cycle ) 1066 1291 printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cxy %x / cycle %d\n", … … 1180 1405 1181 1406 // take the VSL lock in write mode 1182 remote_ rwlock_wr_acquire( lock_xp );1407 remote_queuelock_acquire( lock_xp ); 1183 1408 1184 1409 // attach vseg to VSL … … 1186 1411 1187 1412 // release the VSL lock 1188 remote_ rwlock_wr_release( lock_xp );1413 remote_queuelock_release( lock_xp ); 1189 1414 1190 1415 #if DEBUG_VMM_CREATE_VSEG 1191 1416 cycle = (uint32_t)hal_get_cycles(); 1192 if( DEBUG_VMM_CREATE_VSEG < cycle ) 1417 // if( DEBUG_VMM_CREATE_VSEG < cycle ) 1418 if( type == VSEG_TYPE_REMOTE ) 1193 1419 printk("\n[%s] thread[%x,%x] exit / process %x / %s / base %x / cxy %x / cycle %d\n", 1194 1420 __FUNCTION__, this->process->pid, this->trdid, … … 1200 1426 } // vmm_create_vseg() 1201 1427 1428 //////////////////////////////////////////////////////////////////////////////////////////// 1429 // This static function is called by the vmm_remove_vseg() and vmm_resize_vseg() functions. 1430 // Depending on the vseg <type>, it decrements the physical page refcount, and 1431 // conditionnally release to the relevant kmem the physical page identified by <ppn>. 1432 //////////////////////////////////////////////////////////////////////////////////////////// 1433 // @ process : local pointer on process. 1434 // @ vseg : local pointer on vseg. 1435 // @ ppn : released pysical page index. 1436 //////////////////////////////////////////////////////////////////////////////////////////// 1437 static void vmm_ppn_release( process_t * process, 1438 vseg_t * vseg, 1439 ppn_t ppn ) 1440 { 1441 bool_t do_release; 1442 1443 // get vseg type 1444 vseg_type_t type = vseg->type; 1445 1446 // compute is_ref 1447 bool_t is_ref = (GET_CXY( process->ref_xp ) == local_cxy); 1448 1449 // get pointers on physical page descriptor 1450 xptr_t page_xp = ppm_ppn2page( ppn ); 1451 cxy_t page_cxy = GET_CXY( page_xp ); 1452 page_t * page_ptr = GET_PTR( page_xp ); 1453 1454 // decrement page refcount 1455 xptr_t count_xp = XPTR( page_cxy , &page_ptr->refcount ); 1456 hal_remote_atomic_add( count_xp , -1 ); 1457 1458 // compute the do_release condition depending on vseg type 1459 if( (type == VSEG_TYPE_FILE) || 1460 (type == VSEG_TYPE_KCODE) || 1461 (type == VSEG_TYPE_KDATA) || 1462 (type == VSEG_TYPE_KDEV) ) 1463 { 1464 // no physical page release for FILE and KERNEL 1465 do_release = false; 1466 } 1467 else if( (type == VSEG_TYPE_CODE) || 1468 (type == VSEG_TYPE_STACK) ) 1469 { 1470 // always release physical page for private vsegs 1471 do_release = true; 1472 } 1473 else if( (type == VSEG_TYPE_ANON) || 1474 (type == VSEG_TYPE_REMOTE) ) 1475 { 1476 // release physical page if reference cluster 1477 do_release = is_ref; 1478 } 1479 else if( is_ref ) // vseg_type == DATA in reference cluster 1480 { 1481 // get extended pointers on forks and lock field in page descriptor 1482 xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); 1483 xptr_t lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1484 1485 // take lock protecting "forks" counter 1486 remote_busylock_acquire( lock_xp ); 1487 1488 // get number of pending forks from page descriptor 1489 uint32_t forks = hal_remote_l32( forks_xp ); 1490 1491 // decrement pending forks counter if required 1492 if( forks ) hal_remote_atomic_add( forks_xp , -1 ); 1493 1494 // release lock protecting "forks" counter 1495 remote_busylock_release( lock_xp ); 1496 1497 // release physical page if forks == 0 1498 do_release = (forks == 0); 1499 } 1500 else // vseg_type == DATA not in reference cluster 1501 { 1502 // no physical page release if not in reference cluster 1503 do_release = false; 1504 } 1505 1506 // release physical page to relevant kmem when required 1507 if( do_release ) 1508 { 1509 ppm_remote_free_pages( page_cxy , page_ptr ); 1510 1511 #if DEBUG_VMM_PPN_RELEASE 1512 thread_t * this = CURRENT_THREAD; 1513 if( DEBUG_VMM_PPN_RELEASE < cycle ) 1514 printk("\n[%s] thread[%x,%x] released ppn %x to kmem\n", 1515 __FUNCTION__, this->process->pid, this->trdid, ppn ); 1516 #endif 1517 1518 } 1519 } // end vmm_ppn_release() 1202 1520 1203 1521 ////////////////////////////////////////// … … 1205 1523 vseg_t * vseg ) 1206 1524 { 1207 vmm_t * vmm; // local pointer on process VMM1208 xptr_t gpt_xp; // extended pointer on GPT1209 bool_t is_ref; // local process is reference process1210 1525 uint32_t vseg_type; // vseg type 1211 1526 vpn_t vpn; // VPN of current PTE … … 1214 1529 ppn_t ppn; // current PTE ppn value 1215 1530 uint32_t attr; // current PTE attributes 1216 xptr_t page_xp; // extended pointer on page descriptor1217 cxy_t page_cxy; // page descriptor cluster1218 page_t * page_ptr; // page descriptor pointer1219 xptr_t count_xp; // extended pointer on page refcount1220 1531 1221 1532 // check arguments … … 1223 1534 assert( (vseg != NULL), "vseg argument is NULL" ); 1224 1535 1225 // compute is_ref1226 is_ref = (GET_CXY( process->ref_xp ) == local_cxy);1227 1228 1536 // get pointers on local process VMM 1229 vmm = &process->vmm;1537 vmm_t * vmm = &process->vmm; 1230 1538 1231 1539 // build extended pointer on GPT 1232 gpt_xp = XPTR( local_cxy , &vmm->gpt );1540 xptr_t gpt_xp = XPTR( local_cxy , &vmm->gpt ); 1233 1541 1234 1542 // get relevant vseg infos … … 1240 1548 uint32_t cycle = (uint32_t)hal_get_cycles(); 1241 1549 thread_t * this = CURRENT_THREAD; 1550 #endif 1551 1552 #if (DEBUG_VMM_REMOVE_VSEG & 1 ) 1242 1553 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1243 1554 printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n", … … 1246 1557 #endif 1247 1558 1248 // loop on PTEs in GPT 1559 // loop on PTEs in GPT to unmap all mapped PTE 1249 1560 for( vpn = vpn_min ; vpn < vpn_max ; vpn++ ) 1250 1561 { … … 1257 1568 #if( DEBUG_VMM_REMOVE_VSEG & 1 ) 1258 1569 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1259 printk("- unmap vpn %x / ppn %x / %s" , vpn , ppn, vseg_type_str(vseg_type) ); 1570 printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s", 1571 __FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) ); 1260 1572 #endif 1261 1573 // unmap GPT entry in local GPT 1262 1574 hal_gpt_reset_pte( gpt_xp , vpn ); 1263 1575 1264 // get pointers on physical page descriptor 1265 page_xp = ppm_ppn2page( ppn ); 1266 page_cxy = GET_CXY( page_xp ); 1267 page_ptr = GET_PTR( page_xp ); 1268 1269 // decrement page refcount 1270 count_xp = XPTR( page_cxy , &page_ptr->refcount ); 1271 hal_remote_atomic_add( count_xp , -1 ); 1272 1273 // compute the ppn_release condition depending on vseg type 1274 bool_t ppn_release; 1275 if( (vseg_type == VSEG_TYPE_FILE) || 1276 (vseg_type == VSEG_TYPE_KCODE) || 1277 (vseg_type == VSEG_TYPE_KDATA) || 1278 (vseg_type == VSEG_TYPE_KDEV) ) 1279 { 1280 // no physical page release for FILE and KERNEL 1281 ppn_release = false; 1282 } 1283 else if( (vseg_type == VSEG_TYPE_CODE) || 1284 (vseg_type == VSEG_TYPE_STACK) ) 1285 { 1286 // always release physical page for private vsegs 1287 ppn_release = true; 1288 } 1289 else if( (vseg_type == VSEG_TYPE_ANON) || 1290 (vseg_type == VSEG_TYPE_REMOTE) ) 1291 { 1292 // release physical page if reference cluster 1293 ppn_release = is_ref; 1294 } 1295 else if( is_ref ) // vseg_type == DATA in reference cluster 1296 { 1297 // get extended pointers on forks and lock field in page descriptor 1298 xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); 1299 xptr_t lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1300 1301 // take lock protecting "forks" counter 1302 remote_busylock_acquire( lock_xp ); 1303 1304 // get number of pending forks from page descriptor 1305 uint32_t forks = hal_remote_l32( forks_xp ); 1306 1307 // decrement pending forks counter if required 1308 if( forks ) hal_remote_atomic_add( forks_xp , -1 ); 1309 1310 // release lock protecting "forks" counter 1311 remote_busylock_release( lock_xp ); 1312 1313 // release physical page if forks == 0 1314 ppn_release = (forks == 0); 1315 } 1316 else // vseg_type == DATA not in reference cluster 1317 { 1318 // no physical page release if not in reference cluster 1319 ppn_release = false; 1320 } 1321 1322 // release physical page to relevant kmem when required 1323 if( ppn_release ) ppm_remote_free_pages( page_cxy , page_ptr ); 1324 1325 #if( DEBUG_VMM_REMOVE_VSEG & 1 ) 1326 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1327 { 1328 if( ppn_release ) printk(" / released to kmem\n" ); 1329 else printk("\n"); 1330 } 1331 #endif 1576 // release physical page when required 1577 vmm_ppn_release( process , vseg , ppn ); 1332 1578 } 1333 1579 } … … 1368 1614 } // end vmm_remove_vseg() 1369 1615 1370 1371 /////////////////////////////////// 1372 void vmm_delete_vseg( pid_t pid, 1373 intptr_t vaddr ) 1616 ///////////////////////////////////////////// 1617 void vmm_resize_vseg( process_t * process, 1618 vseg_t * vseg, 1619 intptr_t new_base, 1620 intptr_t new_size ) 1374 1621 { 1375 process_t * process; // local pointer on local process 1376 vseg_t * vseg; // local pointer on local vseg containing vaddr 1377 1378 // get local pointer on local process descriptor 1379 process = cluster_get_local_process_from_pid( pid ); 1380 1381 if( process == NULL ) 1382 { 1383 printk("\n[WARNING] in %s : cannot get local process descriptor\n", 1384 __FUNCTION__ ); 1385 return; 1386 } 1387 1388 // get local pointer on local vseg containing vaddr 1389 vseg = vmm_vseg_from_vaddr( &process->vmm , vaddr ); 1390 1391 if( vseg == NULL ) 1392 { 1393 printk("\n[WARNING] in %s : cannot get vseg descriptor\n", 1394 __FUNCTION__ ); 1395 return; 1396 } 1397 1398 // call relevant function 1399 vmm_remove_vseg( process , vseg ); 1400 1401 } // end vmm_delete_vseg 1402 1403 1404 ///////////////////////////////////////////// 1405 vseg_t * vmm_vseg_from_vaddr( vmm_t * vmm, 1406 intptr_t vaddr ) 1407 { 1408 xptr_t vseg_xp; 1409 vseg_t * vseg; 1410 xptr_t iter_xp; 1411 1412 // get extended pointers on VSL lock and root 1413 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 1414 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 1415 1416 // get lock protecting the VSL 1417 remote_rwlock_rd_acquire( lock_xp ); 1418 1419 // scan the list of vsegs in VSL 1420 XLIST_FOREACH( root_xp , iter_xp ) 1421 { 1422 // get pointers on vseg 1423 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 1424 vseg = GET_PTR( vseg_xp ); 1425 1426 // return success when match 1427 if( (vaddr >= vseg->min) && (vaddr < vseg->max) ) 1428 { 1429 // return success 1430 remote_rwlock_rd_release( lock_xp ); 1431 return vseg; 1432 } 1433 } 1434 1435 // return failure 1436 remote_rwlock_rd_release( lock_xp ); 1437 return NULL; 1438 1439 } // end vmm_vseg_from_vaddr() 1440 1441 ///////////////////////////////////////////// 1442 error_t vmm_resize_vseg( process_t * process, 1443 intptr_t base, 1444 intptr_t size ) 1445 { 1446 error_t error; 1447 vseg_t * new; 1448 vpn_t vpn_min; 1449 vpn_t vpn_max; 1622 vpn_t vpn; 1623 ppn_t ppn; 1624 uint32_t attr; 1625 1626 // check arguments 1627 assert( (process != NULL), "process argument is NULL" ); 1628 assert( (vseg != NULL), "vseg argument is NULL" ); 1450 1629 1451 1630 #if DEBUG_VMM_RESIZE_VSEG 1452 1631 uint32_t cycle = (uint32_t)hal_get_cycles(); 1453 1632 thread_t * this = CURRENT_THREAD; 1633 #endif 1634 1635 #if (DEBUG_VMM_RESIZE_VSEG & 1) 1454 1636 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1455 printk("\n[%s] thread[%x,%x] enter / process %x / base %x / size %d / cycle %d\n", 1456 __FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle ); 1457 #endif 1458 1459 // get pointer on process VMM 1460 vmm_t * vmm = &process->vmm; 1461 1462 intptr_t addr_min = base; 1463 intptr_t addr_max = base + size; 1464 1465 // get pointer on vseg 1466 vseg_t * vseg = vmm_vseg_from_vaddr( vmm , base ); 1467 1468 if( vseg == NULL) 1469 { 1470 printk("\n[ERROR] in %s : vseg(%x,%d) not found\n", 1471 __FUNCTION__, base , size ); 1472 return -1; 1473 } 1474 1475 // resize depends on unmapped region base and size 1476 if( (vseg->min > addr_min) || (vseg->max < addr_max) ) // not included in vseg 1477 { 1478 printk("\n[ERROR] in %s : unmapped region[%x->%x[ not included in vseg[%x->%x[\n", 1479 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1480 1481 error = -1; 1482 } 1483 else if( (vseg->min == addr_min) && (vseg->max == addr_max) ) // vseg must be deleted 1484 { 1637 printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n", 1638 __FUNCTION__, this->process->pid, this->trdid, 1639 process->pid, vseg_type_str(vseg->type), old_base, cycle ); 1640 #endif 1641 1642 // get existing vseg vpn_min and vpn_max 1643 vpn_t old_vpn_min = vseg->vpn_base; 1644 vpn_t old_vpn_max = old_vpn_min + vseg->vpn_size - 1; 1645 1646 // compute new vseg vpn_min & vpn_max 1647 intptr_t min = new_base; 1648 intptr_t max = new_base + new_size; 1649 vpn_t new_vpn_min = min >> CONFIG_PPM_PAGE_SHIFT; 1650 vpn_t new_vpn_max = (max - 1) >> CONFIG_PPM_PAGE_SHIFT; 1651 1652 // build extended pointer on GPT 1653 xptr_t gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); 1654 1655 // loop on PTEs in GPT to unmap PTE if (oldd_vpn_min <= vpn < new_vpn_min) 1656 for( vpn = old_vpn_min ; vpn < new_vpn_min ; vpn++ ) 1657 { 1658 // get ppn and attr 1659 hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn ); 1660 1661 if( attr & GPT_MAPPED ) // PTE is mapped 1662 { 1485 1663 1486 1664 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1487 1665 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1488 printk("\n[%s] unmapped region[%x->%x[ equal vseg[%x->%x[\n", 1489 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1490 #endif 1491 vmm_delete_vseg( process->pid , vseg->min ); 1492 1493 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1666 printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s", 1667 __FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) ); 1668 #endif 1669 // unmap GPT entry 1670 hal_gpt_reset_pte( gpt_xp , vpn ); 1671 1672 // release physical page when required 1673 vmm_ppn_release( process , vseg , ppn ); 1674 } 1675 } 1676 1677 // loop on PTEs in GPT to unmap PTE if (new vpn_max <= vpn < old_vpn_max) 1678 for( vpn = new_vpn_max ; vpn < old_vpn_max ; vpn++ ) 1679 { 1680 // get ppn and attr 1681 hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn ); 1682 1683 if( attr & GPT_MAPPED ) // PTE is mapped 1684 { 1685 1686 #if( DEBUG_VMM_REMOVE_VSEG & 1 ) 1494 1687 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1495 printk("\n[%s] thread[%x,%x] deleted vseg\n", 1496 __FUNCTION__, this->process->pid, this->trdid ); 1497 #endif 1498 error = 0; 1499 } 1500 else if( vseg->min == addr_min ) // vseg must be resized 1501 { 1502 1503 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1688 printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s", 1689 __FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) ); 1690 #endif 1691 // unmap GPT entry in local GPT 1692 hal_gpt_reset_pte( gpt_xp , vpn ); 1693 1694 // release physical page when required 1695 vmm_ppn_release( process , vseg , ppn ); 1696 } 1697 } 1698 1699 // resize vseg in VSL 1700 vseg->min = min; 1701 vseg->max = max; 1702 vseg->vpn_base = new_vpn_min; 1703 vseg->vpn_size = new_vpn_max - new_vpn_min + 1; 1704 1705 #if DEBUG_VMM_RESIZE_VSEG 1706 cycle = (uint32_t)hal_get_cycles(); 1504 1707 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1505 printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n", 1506 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1507 #endif 1508 // update vseg min address 1509 vseg->min = addr_max; 1510 1511 // update vpn_base and vpn_size 1512 vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; 1513 vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; 1514 vseg->vpn_base = vpn_min; 1515 vseg->vpn_size = vpn_max - vpn_min + 1; 1516 1517 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1518 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1519 printk("\n[%s] thread[%x,%x] changed vseg_min\n", 1520 __FUNCTION__, this->process->pid, this->trdid ); 1521 #endif 1522 error = 0; 1523 } 1524 else if( vseg->max == addr_max ) // vseg must be resized 1525 { 1526 1527 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1528 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1529 printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n", 1530 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1531 #endif 1532 // update vseg max address 1533 vseg->max = addr_min; 1534 1535 // update vpn_base and vpn_size 1536 vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; 1537 vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; 1538 vseg->vpn_base = vpn_min; 1539 vseg->vpn_size = vpn_max - vpn_min + 1; 1540 1541 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1542 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1543 printk("\n[%s] thread[%x,%x] changed vseg_max\n", 1544 __FUNCTION__, this->process->pid, this->trdid ); 1545 #endif 1546 error = 0; 1547 1548 } 1549 else // vseg cut in three regions 1550 { 1551 1552 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1553 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1554 printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n", 1555 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1556 #endif 1557 // resize existing vseg 1558 vseg->max = addr_min; 1559 1560 // update vpn_base and vpn_size 1561 vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; 1562 vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; 1563 vseg->vpn_base = vpn_min; 1564 vseg->vpn_size = vpn_max - vpn_min + 1; 1565 1566 // create new vseg 1567 new = vmm_create_vseg( process, 1568 vseg->type, 1569 addr_min, 1570 (vseg->max - addr_max), 1571 vseg->file_offset, 1572 vseg->file_size, 1573 vseg->mapper_xp, 1574 vseg->cxy ); 1575 1576 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1577 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1578 printk("\n[%s] thread[%x,%x] replaced vseg by two smal vsegs\n", 1579 __FUNCTION__, this->process->pid, this->trdid ); 1580 #endif 1581 1582 if( new == NULL ) error = -1; 1583 else error = 0; 1584 } 1585 1586 #if DEBUG_VMM_RESIZE_VSEG 1587 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1588 printk("\n[%s] thread[%x,%x] exit / process %x / base %x / size %d / cycle %d\n", 1589 __FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle ); 1590 #endif 1591 1592 return error; 1593 1594 } // vmm_resize_vseg() 1708 printk("[%s] thread[%x,%x] exit / process %x / %s / base %x / cycle %d\n", 1709 __FUNCTION__, this->process->pid, this->trdid, 1710 process->pid, vseg_type_str(vseg->type), vseg->min, cycle ); 1711 #endif 1712 1713 } // end vmm_resize_vseg 1714 1715 ///////////////////////////////////////////////////////////////////////////////////////////// 1716 // This static function is called twice by the vmm_get_vseg() function. 1717 // It scan the - possibly remote - VSL defined by the <vmm_xp> argument to find the vseg 1718 // containing a given virtual address <vaddr>. It uses remote accesses to access the remote 1719 // VSL if required. The VSL lock protecting the VSL must be taken by the caller. 1720 ///////////////////////////////////////////////////////////////////////////////////////////// 1721 // @ vmm_xp : extended pointer on the process VMM. 1722 // @ vaddr : virtual address. 1723 // @ return local pointer on remote vseg if success / return NULL if not found. 1724 ///////////////////////////////////////////////////////////////////////////////////////////// 1725 static vseg_t * vmm_vseg_from_vaddr( xptr_t vmm_xp, 1726 intptr_t vaddr ) 1727 { 1728 xptr_t iter_xp; 1729 xptr_t vseg_xp; 1730 vseg_t * vseg; 1731 intptr_t min; 1732 intptr_t max; 1733 1734 // get cluster and local pointer on target VMM 1735 vmm_t * vmm_ptr = GET_PTR( vmm_xp ); 1736 cxy_t vmm_cxy = GET_CXY( vmm_xp ); 1737 1738 // build extended pointer on VSL root 1739 xptr_t root_xp = XPTR( vmm_cxy , &vmm_ptr->vsegs_root ); 1740 1741 // scan the list of vsegs in VSL 1742 XLIST_FOREACH( root_xp , iter_xp ) 1743 { 1744 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 1745 vseg = GET_PTR( vseg_xp ); 1746 1747 min = hal_remote_l32( XPTR( vmm_cxy , &vseg->min ) ); 1748 max = hal_remote_l32( XPTR( vmm_cxy , &vseg->max ) ); 1749 1750 // return success when match 1751 if( (vaddr >= min) && (vaddr < max) ) return vseg; 1752 } 1753 1754 // return failure 1755 return NULL; 1756 1757 } // end vmm_vseg_from_vaddr() 1595 1758 1596 1759 /////////////////////////////////////////// … … 1599 1762 vseg_t ** found_vseg ) 1600 1763 { 1601 xptr_t vseg_xp; 1602 vseg_t * vseg; 1603 vmm_t * vmm; 1604 error_t error; 1605 1606 // get pointer on local VMM 1607 vmm = &process->vmm; 1764 xptr_t loc_lock_xp; // extended pointer on local VSL lock 1765 xptr_t ref_lock_xp; // extended pointer on reference VSL lock 1766 vseg_t * loc_vseg; // local pointer on local vseg 1767 vseg_t * ref_vseg; // local pointer on reference vseg 1768 1769 // build extended pointer on local VSL lock 1770 loc_lock_xp = XPTR( local_cxy , &process->vmm.vsl_lock ); 1771 1772 // get local VSL lock 1773 remote_queuelock_acquire( loc_lock_xp ); 1608 1774 1609 1775 // try to get vseg from local VMM 1610 vseg = vmm_vseg_from_vaddr( vmm, vaddr );1611 1612 if ( vseg == NULL ) // vseg not found in local cluster => try to get it from ref1613 1776 loc_vseg = vmm_vseg_from_vaddr( XPTR( local_cxy, &process->vmm ) , vaddr ); 1777 1778 if (loc_vseg == NULL) // vseg not found => access reference VSL 1779 { 1614 1780 // get extended pointer on reference process 1615 1781 xptr_t ref_xp = process->ref_xp; 1616 1782 1617 // get cluster and local pointer on reference process 1783 // get cluster and local pointer on reference process 1618 1784 cxy_t ref_cxy = GET_CXY( ref_xp ); 1619 1785 process_t * ref_ptr = GET_PTR( ref_xp ); 1620 1786 1621 if( local_cxy == ref_cxy ) return -1; // local cluster is the reference 1622 1623 // get extended pointer on reference vseg 1624 rpc_vmm_get_vseg_client( ref_cxy , ref_ptr , vaddr , &vseg_xp , &error ); 1625 1626 if( error ) return -1; // vseg not found => illegal user vaddr 1627 1628 // allocate a vseg in local cluster 1629 vseg = vseg_alloc(); 1630 1631 if( vseg == NULL ) return -1; // cannot allocate a local vseg 1632 1633 // initialise local vseg from reference 1634 vseg_init_from_ref( vseg , vseg_xp ); 1635 1636 // build extended pointer on VSL lock 1637 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 1638 1639 // take the VSL lock in write mode 1640 remote_rwlock_wr_acquire( lock_xp ); 1641 1642 // register local vseg in local VSL 1643 vmm_attach_vseg_to_vsl( vmm , vseg ); 1644 1645 // release the VSL lock 1646 remote_rwlock_wr_release( lock_xp ); 1647 } 1648 1649 // success 1650 *found_vseg = vseg; 1651 return 0; 1652 1787 // build extended pointer on reference VSL lock 1788 ref_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.vsl_lock ); 1789 1790 // get reference VSL lock 1791 remote_queuelock_acquire( ref_lock_xp ); 1792 1793 // try to get vseg from reference VMM 1794 ref_vseg = vmm_vseg_from_vaddr( XPTR( ref_cxy , &ref_ptr->vmm ) , vaddr ); 1795 1796 if( ref_vseg == NULL ) // vseg not found => return error 1797 { 1798 printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n", 1799 __FUNCTION__, vaddr, process->pid ); 1800 1801 // release reference VSL lock 1802 remote_queuelock_release( ref_lock_xp ); 1803 1804 return -1; 1805 } 1806 else // vseg found => try to update local VSL 1807 { 1808 // allocate a local vseg descriptor 1809 loc_vseg = vseg_alloc(); 1810 1811 if( loc_vseg == NULL ) // no memory => return error 1812 { 1813 printk("\n[ERROR] in %s : vaddr %x in process %x / no memory for local vseg\n", 1814 __FUNCTION__, vaddr, process->pid ); 1815 1816 // release reference VSL & local VSL locks 1817 remote_queuelock_release( ref_lock_xp ); 1818 remote_queuelock_release( loc_lock_xp ); 1819 1820 return -1; 1821 } 1822 else // update local VSL and return success 1823 { 1824 // initialize local vseg 1825 vseg_init_from_ref( loc_vseg , XPTR( ref_cxy , ref_vseg ) ); 1826 1827 // register local vseg in local VSL 1828 vmm_attach_vseg_to_vsl( &process->vmm , loc_vseg ); 1829 1830 // release reference VSL & local VSL locks 1831 remote_queuelock_release( ref_lock_xp ); 1832 remote_queuelock_release( loc_lock_xp ); 1833 1834 *found_vseg = loc_vseg; 1835 return 0; 1836 } 1837 } 1838 } 1839 else // vseg found in local VSL => return success 1840 { 1841 // release local VSL lock 1842 remote_queuelock_release( loc_lock_xp ); 1843 1844 *found_vseg = loc_vseg; 1845 return 0; 1846 } 1653 1847 } // end vmm_get_vseg() 1654 1848 … … 1658 1852 // pointer on the allocated page descriptor. 1659 1853 // The vseg cannot have the FILE type. 1854 ////////////////////////////////////////////////////////////////////////////////////// 1855 // @ vseg : local pointer on vseg. 1856 // @ vpn : unmapped vpn. 1857 // @ return an extended pointer on the allocated page 1660 1858 ////////////////////////////////////////////////////////////////////////////////////// 1661 1859 static xptr_t vmm_page_allocate( vseg_t * vseg, … … 2194 2392 #if DEBUG_VMM_HANDLE_COW 2195 2393 uint32_t cycle = (uint32_t)hal_get_cycles(); 2196 if( DEBUG_VMM_HANDLE_COW < cycle)2394 if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) 2197 2395 printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n", 2198 2396 __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); … … 2200 2398 2201 2399 #if ((DEBUG_VMM_HANDLE_COW & 3) == 3 ) 2202 hal_vmm_display( process, true );2400 hal_vmm_display( XPTR( local_cxy , process ) , true ); 2203 2401 #endif 2204 2402 … … 2216 2414 2217 2415 #if DEBUG_VMM_HANDLE_COW 2218 if( DEBUG_VMM_HANDLE_COW < cycle)2416 if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) 2219 2417 printk("\n[%s] thread[%x,%x] get vseg %s\n", 2220 2418 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) ); … … 2256 2454 2257 2455 #if DEBUG_VMM_HANDLE_COW 2258 if( DEBUG_VMM_HANDLE_COW < cycle)2456 if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) 2259 2457 printk("\n[%s] thread[%x,%x] get pte for vpn %x : ppn %x / attr %x\n", 2260 2458 __FUNCTION__, this->process->pid, this->trdid, vpn, old_ppn, old_attr ); … … 2285 2483 2286 2484 #if DEBUG_VMM_HANDLE_COW 2287 if( DEBUG_VMM_HANDLE_COW < cycle)2485 if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) 2288 2486 printk("\n[%s] thread[%x,%x] get forks = %d for vpn %x\n", 2289 2487 __FUNCTION__, this->process->pid, this->trdid, forks, vpn ); … … 2315 2513 2316 2514 #if DEBUG_VMM_HANDLE_COW 2317 if( DEBUG_VMM_HANDLE_COW < cycle)2515 if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) 2318 2516 printk("\n[%s] thread[%x,%x] get new ppn %x for vpn %x\n", 2319 2517 __FUNCTION__, this->process->pid, this->trdid, new_ppn, vpn ); … … 2326 2524 2327 2525 #if DEBUG_VMM_HANDLE_COW 2328 if( DEBUG_VMM_HANDLE_COW < cycle)2526 if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) 2329 2527 printk("\n[%s] thread[%x,%x] copied old page to new page\n", 2330 2528 __FUNCTION__, this->process->pid, this->trdid ); … … 2338 2536 2339 2537 #if(DEBUG_VMM_HANDLE_COW & 1) 2340 if( DEBUG_VMM_HANDLE_COW < cycle)2538 if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) 2341 2539 printk("\n[%s] thread[%x,%x] no pending forks / keep existing PPN %x\n", 2342 2540 __FUNCTION__, this->process->pid, this->trdid, old_ppn ); … … 2349 2547 2350 2548 #if(DEBUG_VMM_HANDLE_COW & 1) 2351 if( DEBUG_VMM_HANDLE_COW < cycle)2549 if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) 2352 2550 printk("\n[%s] thread[%x,%x] new_attr %x / new_ppn %x\n", 2353 2551 __FUNCTION__, this->process->pid, this->trdid, new_attr, new_ppn ); … … 2367 2565 else 2368 2566 { 2369 if( ref_cxy == local_cxy ) // reference cluster is local 2370 { 2371 vmm_global_update_pte( process, 2372 vpn, 2373 new_attr, 2374 new_ppn ); 2375 } 2376 else // reference cluster is remote 2377 { 2378 rpc_vmm_global_update_pte_client( ref_cxy, 2379 ref_ptr, 2380 vpn, 2381 new_attr, 2382 new_ppn ); 2383 } 2567 // set new PTE in all GPT copies 2568 vmm_global_update_pte( process, 2569 vpn, 2570 new_attr, 2571 new_ppn ); 2384 2572 } 2385 2573 2386 2574 #if DEBUG_VMM_HANDLE_COW 2387 2575 cycle = (uint32_t)hal_get_cycles(); 2388 if( DEBUG_VMM_HANDLE_COW < cycle)2576 if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) 2389 2577 printk("\n[%s] thread[%x,%x] exit for vpn %x / core[%x,%d] / cycle %d\n", 2390 2578 __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); … … 2392 2580 2393 2581 #if ((DEBUG_VMM_HANDLE_COW & 3) == 3) 2394 hal_vmm_display( process, true );2582 hal_vmm_display( XPTR( local_cxy , process ) , true ); 2395 2583 #endif 2396 2584 -
trunk/kernel/mm/vmm.h
r635 r640 112 112 typedef struct vmm_s 113 113 { 114 remote_ rwlock_t vsl_lock; /*! lock protecting the local VSL*/115 xlist_entry_t vsegs_root; /*! Virtual Segment List (complete in reference)*/116 uint32_t vsegs_nr; /*! total number of local vsegs*/117 118 gpt_t gpt; /*! Generic Page Table (complete in reference)*/119 120 stack_mgr_t stack_mgr; /*! embedded STACK vsegs allocator*/121 mmap_mgr_t mmap_mgr; /*! embedded MMAP vsegs allocator*/122 123 uint32_t false_pgfault_nr; /*! false page fault counter (for all threads)*/124 uint32_t local_pgfault_nr; /*! false page fault counter (for all threads)*/125 uint32_t global_pgfault_nr; /*! false page fault counter (for all threads)*/126 uint32_t false_pgfault_cost; /*! cumulated cost (for all threads)*/127 uint32_t local_pgfault_cost; /*! cumulated cost (for all threads)*/128 uint32_t global_pgfault_cost; /*! cumulated cost (for all threads)*/129 130 vpn_t args_vpn_base; /*! args vseg first page*/131 vpn_t envs_vpn_base; /*! envs vseg first page*/132 vpn_t code_vpn_base; /*! code vseg first page*/133 vpn_t data_vpn_base; /*! data vseg first page*/134 vpn_t heap_vpn_base; /*! heap zone first page*/135 136 intptr_t entry_point; /*! main thread entry point*/114 remote_queuelock_t vsl_lock; /*! lock protecting the local VSL */ 115 xlist_entry_t vsegs_root; /*! Virtual Segment List root */ 116 uint32_t vsegs_nr; /*! total number of local vsegs */ 117 118 gpt_t gpt; /*! Generic Page Table descriptor */ 119 120 stack_mgr_t stack_mgr; /*! embedded STACK vsegs allocator */ 121 mmap_mgr_t mmap_mgr; /*! embedded MMAP vsegs allocator */ 122 123 uint32_t false_pgfault_nr; /*! false page fault counter (for all threads) */ 124 uint32_t local_pgfault_nr; /*! false page fault counter (for all threads) */ 125 uint32_t global_pgfault_nr; /*! false page fault counter (for all threads) */ 126 uint32_t false_pgfault_cost; /*! cumulated cost (for all threads) */ 127 uint32_t local_pgfault_cost; /*! cumulated cost (for all threads) */ 128 uint32_t global_pgfault_cost; /*! cumulated cost (for all threads) */ 129 130 vpn_t args_vpn_base; /*! args vseg first page */ 131 vpn_t envs_vpn_base; /*! envs vseg first page */ 132 vpn_t code_vpn_base; /*! code vseg first page */ 133 vpn_t data_vpn_base; /*! data vseg first page */ 134 vpn_t heap_vpn_base; /*! heap zone first page */ 135 136 intptr_t entry_point; /*! main thread entry point */ 137 137 } 138 138 vmm_t; … … 143 143 * - The GPT has been previously created, with the hal_gpt_create() function. 144 144 * - The "kernel" vsegs are previously registered, by the hal_vmm_kernel_update() function. 145 * - The "code" and "data" vsegs ar e registered by the elf_load_process() function.145 * - The "code" and "data" vsegs arlmmmmmme registered by the elf_load_process() function. 146 146 * - The "stack" vsegs are dynamically registered by the thread_user_create() function. 147 147 * - The "file", "anon", "remote" vsegs are dynamically registered by the mmap() syscall. … … 206 206 207 207 /********************************************************************************************* 208 * This function modifies the size of the vseg identified by <process> and <base> arguments 209 * in all clusters containing a VSL copy, as defined by <new_base> and <new_size> arguments. 210 * This function is called by the sys_munmap() function, and can be called by a thread 211 * running in any cluster, as it uses remote accesses. 212 * It cannot fail, as only vseg registered in VSL copies are updated. 213 ********************************************************************************************* 214 * @ process : local pointer on process descriptor. 215 * @ base : current vseg base address in user space. 216 * @ new_base : new vseg base. 217 * @ new_size : new vseg size. 218 ********************************************************************************************/ 219 void vmm_global_resize_vseg( struct process_s * process, 220 intptr_t base, 221 intptr_t new_base, 222 intptr_t new_size ); 223 224 /********************************************************************************************* 225 * This function removes the vseg identified by the <process> and <base> arguments from 226 * the VSL and remove all associated PTE entries from the GPT. 227 * This is done in all clusters containing a VMM copy to maintain VMM coherence. 228 * This function can be called by a thread running in any cluster, as it uses the 229 * vmm_remove_vseg() in the local cluster, and the RPC_VMM_REMOVE_VSEG for remote clusters. 230 * It cannot fail, as only vseg registered in VSL copies are deleted. 231 ********************************************************************************************* 232 * @ pid : local pointer on process identifier. 233 * @ base : vseg base address in user space. 234 ********************************************************************************************/ 235 void vmm_global_delete_vseg( struct process_s * process, 236 intptr_t base ); 237 238 /********************************************************************************************* 208 239 * This function modifies one GPT entry identified by the <process> and <vpn> arguments 209 * in all clusters containing a process copy. It is used to maintain coherence in GPT 210 * copies, using remote_write accesses. 211 * It must be called by a thread running in the process owner cluster. 212 * Use the RPC_VMM_GLOBAL_UPDATE_PTE if required. 240 * in all clusters containing a process copy. It maintains coherence in GPT copies, 241 * using remote_write accesses. 213 242 * It cannot fail, as only mapped PTE2 in GPT copies are updated. 214 243 ********************************************************************************************* … … 282 311 /********************************************************************************************* 283 312 * This function removes from the VMM of a process descriptor identified by the <process> 284 * argument the vseg identified by the <vseg> argument. It can be used for any type of vseg. 285 * As it uses local pointers, it must be called by a local thread. 286 * It is called by the vmm_user_reset(), vmm_delete_vseg() and vmm_destroy() functions. 313 * argument the vseg identified by the <vseg> argument. 314 * It is called by the vmm_user_reset(), vmm_global_delete_vseg() and vmm_destroy() functions. 315 * It must be called by a local thread, running in the cluster containing the modified VMM. 316 * Use the RPC_VMM_REMOVE_VSEG if required. 287 317 * It makes a kernel panic if the process is not registered in the local cluster, 288 318 * or if the vseg is not registered in the process VSL. 289 319 * For all vseg types, the vseg is detached from local VSL, and all associated PTEs are 290 320 * unmapped from local GPT. Other actions depend on the vseg type: 291 * -Regarding the vseg descriptor release:321 * Regarding the vseg descriptor release: 292 322 * . for ANON and REMOTE, the vseg is not released, but registered in local zombi_list. 293 323 * . for STACK the vseg is released to the local stack allocator. 294 324 * . for all other types, the vseg is released to the local kmem. 295 * -Regarding the physical pages release:325 * Regarding the physical pages release: 296 326 * . for KERNEL and FILE, the pages are not released to kmem. 297 327 * . for CODE and STACK, the pages are released to local kmem when they are not COW. 298 328 * . for DATA, ANON and REMOTE, the pages are released to relevant kmem only when 299 329 * the local cluster is the reference cluster. 300 * The lock protecting the VSL must be taken by the caller.301 ********************************************************************************************* 302 * @ process : local pointer on process .303 * @ vseg : local pointer on vseg.330 * The VSL lock protecting the VSL must be taken by the caller. 331 ********************************************************************************************* 332 * @ process : local pointer on process descriptor. 333 * @ vseg : local pointer on target vseg. 304 334 ********************************************************************************************/ 305 335 void vmm_remove_vseg( struct process_s * process, … … 307 337 308 338 /********************************************************************************************* 309 * This function call the vmm_remove vseg() function to remove from the VMM of a local 310 * process descriptor, identified by the <pid> argument the vseg identified by the <vaddr> 311 * virtual address in user space. 312 * Use the RPC_VMM_DELETE_VSEG to remove a vseg from a remote process descriptor. 313 ********************************************************************************************* 314 * @ pid : process identifier. 315 * @ vaddr : virtual address in user space. 316 ********************************************************************************************/ 317 void vmm_delete_vseg( pid_t pid, 318 intptr_t vaddr ); 319 320 /********************************************************************************************* 321 * This function removes a given region (defined by a base address and a size) from 322 * the VMM of a given process descriptor. This can modify the number of vsegs: 323 * (a) if the region is not entirely mapped in an existing vseg, it's an error. 324 * (b) if the region has same base and size as an existing vseg, the vseg is removed. 325 * (c) if the removed region cut the vseg in two parts, it is modified. 326 * (d) if the removed region cut the vseg in three parts, it is modified, and a new 327 * vseg is created with same type. 328 * FIXME [AG] this function should be called by a thread running in the reference cluster, 329 * and the VMM should be updated in all process descriptors copies. 330 ********************************************************************************************* 331 * @ process : pointer on process descriptor 332 * @ base : vseg base address 333 * @ size : vseg size (bytes) 334 ********************************************************************************************/ 335 error_t vmm_resize_vseg( struct process_s * process, 336 intptr_t base, 337 intptr_t size ); 338 339 /********************************************************************************************* 340 * This low-level function scan the local VSL in <vmm> to find the unique vseg containing 341 * a given virtual address <vaddr>. 342 * It is called by the vmm_get_vseg(), vmm_get_pte(), and vmm_resize_vseg() functions. 343 ********************************************************************************************* 344 * @ vmm : pointer on the process VMM. 345 * @ vaddr : virtual address. 346 * @ return vseg pointer if success / return NULL if not found. 347 ********************************************************************************************/ 348 struct vseg_s * vmm_vseg_from_vaddr( vmm_t * vmm, 349 intptr_t vaddr ); 350 351 /********************************************************************************************* 352 * This function checks that a given virtual address is contained in a registered vseg. 353 * It can be called by any thread running in any cluster: 354 * - if the vseg is registered in the local process VMM, it returns the local vseg pointer. 355 * - if the vseg is missing in local VMM, it uses a RPC to get it from the reference cluster, 356 * register it in local VMM and returns the local vseg pointer, if success. 357 * - it returns an user error if the vseg is missing in the reference VMM, or if there is 358 * not enough memory for a new vseg descriptor in the calling thread cluster. 359 ********************************************************************************************* 360 * @ process : [in] pointer on process descriptor 361 * @ vaddr : [in] virtual address 362 * @ vseg : [out] local pointer on local vseg 363 * @ returns 0 if success / returns -1 if user error (out of segment). 339 * This function resize a local vseg identified by the <process> and <vseg> arguments. 340 * It is called by the vmm_global_resize() function. 341 * It must be called by a local thread, running in the cluster containing the modified VMM. 342 * Use the RPC_VMM_RESIZE_VSEG if required. 343 * It makes a kernel panic if the process is not registered in the local cluster, 344 * or if the vseg is not registered in the process VSL. 345 * The new vseg, defined by the <new_base> and <new_size> arguments must be strictly 346 * included in the target vseg. The target VSL size and base fields are modified in the VSL. 347 * If the new vseg contains less pages than the target vseg, the relevant pages are 348 * removed from the GPT. 349 * The VSL lock protecting the VSL must be taken by the caller. 350 ********************************************************************************************* 351 * @ process : local pointer on process descriptor 352 * @ vseg : local pointer on target vseg 353 * @ new_base : vseg base address 354 * @ new_size : vseg size (bytes) 355 ********************************************************************************************/ 356 void vmm_resize_vseg( struct process_s * process, 357 struct vseg_s * vseg, 358 intptr_t new_base, 359 intptr_t new_size ); 360 361 /********************************************************************************************* 362 * This function checks that a given virtual address <vaddr> in a given <process> is 363 * contained in a registered vseg. It can be called by any thread running in any cluster. 364 * - if the vseg is registered in the local process VSL, it returns the local vseg pointer. 365 * - if the vseg is missing in local VSL, it access directly the reference VSL. 366 * - if the vseg is found in reference VSL, it updates the local VSL and returns this pointer. 367 * It returns an error when the vseg is missing in the reference VMM, or when there is 368 * not enough memory for a new vseg descriptor in the calling thread cluster. 369 * For both the local and the reference VSL, it takes the VSL lock before scanning the VSL. 370 ********************************************************************************************* 371 * @ process : [in] pointer on process descriptor. 372 * @ vaddr : [in] virtual address. 373 * @ vseg : [out] local pointer on local vseg. 374 * @ returns 0 if success / returns -1 if user error 364 375 ********************************************************************************************/ 365 376 error_t vmm_get_vseg( struct process_s * process, … … 395 406 * This function is called by the generic exception handler in case of WRITE violation event, 396 407 * detected for a given <vpn>. The <process> argument is used to access the relevant VMM. 397 * It returns a kernel panic if VPN is not in a registered vsegor is not mapped.408 * It returns a kernel panic if the faulty VPN is not in a registered vseg, or is not mapped. 398 409 * For a legal mapped vseg there is two cases: 399 410 * 1) If the missing VPN belongs to a private vseg (STACK), it access only the local GPT. -
trunk/kernel/mm/vseg.h
r625 r640 2 2 * vseg.h - virtual segment (vseg) related operations 3 3 * 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018,2019) 4 * Authors Alain Greiner (2016,2017,2018,2019) 7 5 * 8 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/syscalls/sys_display.c
r637 r640 134 134 case DISPLAY_VMM: 135 135 { 136 cxy_t cxy = (cxy_t)arg0; 137 pid_t pid = (pid_t)arg1; 136 cxy_t cxy = (cxy_t)arg0; 137 pid_t pid = (pid_t)arg1; 138 bool_t mapping = (arg2 != 0); 138 139 139 140 // check cxy argument … … 163 164 164 165 // call kernel function 165 hal_vmm_display( process_xp , true);166 hal_vmm_display( process_xp , mapping ); 166 167 167 168 break; … … 197 198 } 198 199 199 if( cxy == local_cxy ) 200 { 201 sched_display( lid ); 202 } 203 else 204 { 205 sched_remote_display( cxy , lid ); 206 } 200 // call kernel function 201 sched_remote_display( cxy , lid ); 207 202 208 203 break; -
trunk/kernel/syscalls/sys_munmap.c
r635 r640 72 72 } 73 73 74 // compute unmapped region min an max 75 intptr_t addr_min = (intptr_t)vaddr; 76 intptr_t addr_max = addr_min + size; 77 78 79 // get vseg min & max addresses 80 intptr_t vseg_min = vseg->min; 81 intptr_t vseg_max = vseg->max; 82 74 83 // enable IRQs 75 84 hal_enable_irq( &save_sr ); 76 85 77 // call relevant kernel function 78 error = vmm_resize_vseg( process , (intptr_t)vaddr , (intptr_t)size ); 79 80 if ( error ) 86 // action depend on both vseg and region bases & sizes 87 if( (vseg_min > addr_min) || (vseg_max < addr_max) ) // region not included in vseg 81 88 { 82 89 83 90 #if DEBUG_SYSCALLS_ERROR 84 printk("\n[ERROR] in %s : cannot remove mapping\n", __FUNCTION__ ); 91 printk("\n[ERROR] in %s : region[%x->%x] / vseg[%x->%x] => non included in vseg\n", 92 __FUNCTION__, process->pid, this->trdid, addr_min, addr_max, vseg_min, vseg_max ); 85 93 #endif 86 94 this->errno = EINVAL; 87 95 return -1; 96 } 97 else if( (vseg_min == addr_min) && (vseg_min == vseg_max) ) 98 { 99 100 #if( DEBUG_SYS_MUNMAP & 1 ) 101 if( DEBUG_SYS_MUNMAP < cycle ) 102 printk("\n[%s] unmapped region[%x->%x[ / vseg[%x->%x[ => vseg deleted\n", 103 __FUNCTION__, addr_min, addr_max, vseg_min, vseg_max ); 104 #endif 105 // delete existing vseg 106 vmm_global_delete_vseg( process, 107 vseg_min ); 108 } 109 else if( (vseg_min == addr_min) || (vseg_min == vseg_max) ) 110 { 111 112 #if( DEBUG_SYS_MUNMAP & 1 ) 113 if( DEBUG_SYS_MUNMAP < cycle ) 114 printk("\n[%s] unmapped region[%x->%x[ / vseg[%x->%x[ => vseg resized\n", 115 __FUNCTION__, addr_min, addr_max, vseg_min, vseg_max ); 116 #endif 117 // resize existing vseg 118 vmm_global_resize_vseg( process, 119 vseg_min, 120 addr_min, 121 addr_max - addr_min ); 122 } 123 else // vseg_min < addr_min) && (addr_max < vseg_max) 124 { 125 126 #if( DEBUG_SYS_MUNMAP & 1 ) 127 if( DEBUG_SYS_MUNMAP < cycle ) 128 printk("\n[%s] unmapped region[%x->%x[ / vseg[%x->%x[ => vseg resized & new vseg created\n", 129 __FUNCTION__, addr_min, addr_max, vseg_min, vseg_max ); 130 #endif 131 // resize existing vseg 132 vmm_global_resize_vseg( process, 133 vseg_min, 134 vseg_min, 135 addr_min - vseg_min ); 136 137 // create new vseg 138 vmm_create_vseg( process, 139 vseg->type, 140 addr_max, 141 vseg_max - addr_max, 142 vseg->file_offset, 143 vseg->file_size, 144 vseg->mapper_xp, 145 vseg->cxy ); 88 146 } 89 147 -
trunk/kernel/syscalls/syscalls.h
r637 r640 187 187 /****************************************************************************************** 188 188 * [11] This function remove an existing mapping defined by the <addr> and <size> 189 * arguments in user space. 190 ****************************************************************************************** 189 * arguments in user space. This can modify the number of vsegs: 190 * (a) if the region is not entirely mapped in one existing vseg, it's an error. 191 * (b) if the region has same base and size as an existing vseg, the vseg is removed. 192 * (c) if the removed region cut the exiting vseg in two parts, it is resized. 193 * (d) if the removed region cut the vseg in three parts, it is modified, and a new 194 * vseg is created with same type. 195 * All existing VSL copies are updated. 196 ****************************************************************************************** 191 197 * @ addr : base address in user space. 192 * #size : number of bytes.198 * @ size : number of bytes. 193 199 * @ return 0 if success / return -1 if failure. 194 200 *****************************************************************************************/ -
trunk/libs/libalmosmkh/almosmkh.c
r638 r640 289 289 290 290 ///////////////////////////////////////////////////// 291 int display_vmm( unsigned int cxy, unsigned int pid ) 291 int display_vmm( unsigned int cxy, 292 unsigned int pid, 293 unsigned int mapping ) 292 294 { 293 295 return hal_user_syscall( SYS_DISPLAY, 294 296 DISPLAY_VMM, 295 297 (reg_t)cxy, 296 (reg_t)pid, 0 ); 298 (reg_t)pid, 299 (reg_t)mapping ); 297 300 } 298 301 … … 500 503 printf(" / pid = "); 501 504 unsigned int pid = get_uint32(); 502 display_vmm( cxy , pid ); 505 printf(" / mapping = "); 506 unsigned int map = get_uint32(); 507 display_vmm( cxy , pid , map ); 503 508 } 504 509 // force the calling process to exit -
trunk/libs/libalmosmkh/almosmkh.h
r637 r640 165 165 * @ cxy : [in] target cluster identifier. 166 166 * @ pid : [in] process identifier. 167 * @ mapping : [in] detailed mapping if non zero. 167 168 * @ return 0 if success / return -1 if illegal argument. 168 169 **************************************************************************************/ 169 int display_vmm(unsigned int cxy, unsigned int pid ); 170 int display_vmm( unsigned int cxy, 171 unsigned int pid, 172 unsigned int mapping ); 170 173 171 174 /*************************************************************************************** -
trunk/libs/libpthread/pthread.h
r637 r640 214 214 /********************************************************************************************* 215 215 * This function synchronizes participating threads at the barrier referenced by <barrier>. 216 * The calling is blocked until the required number of threads have called the function217 * pthread_barrier_wait() specifying the barrier.216 * The calling thread is blocked until the required number of threads have called the 217 * function pthread_barrier_wait() specifying the barrier. 218 218 * When the required number of threads have called pthread_barrier_wait(), the constant 219 219 * PTHREAD_BARRIER_SERIAL_THREAD is returned to one unspecified thread and zero is returned -
trunk/params-hard.mk
r638 r640 3 3 ARCH = /Users/alain/soc/tsar-trunk-svn-2013/platforms/tsar_generic_iob 4 4 X_SIZE = 1 5 Y_SIZE = 15 Y_SIZE = 2 6 6 NB_PROCS = 1 7 7 NB_TTYS = 2 -
trunk/user/fft/fft.c
r638 r640 96 96 #define MODE COSIN // DATA array initialisation mode 97 97 #define CHECK 0 98 #define DEBUG_MAIN 1// trace main() function (detailed if odd)98 #define DEBUG_MAIN 0 // trace main() function (detailed if odd) 99 99 #define DEBUG_WORK 0 // trace work() function (detailed if odd) 100 100 #define DEBUG_FFT1D 0 // trace FFT1D() function (detailed if odd) 101 101 #define DEBUG_ROW 0 // trace FFTRow() function (detailed if odd) 102 102 #define PRINT_ARRAY 0 103 #define DISPLAY_SCHED_AND_VMM 0 // display final VMM state in all clusters 103 104 104 105 // macro to swap two variables … … 234 235 unsigned int ncores; // max number of cores per cluster 235 236 237 236 238 unsigned int x; // current index for cluster X coordinate 237 239 unsigned int y; // current index for cluster Y coordinate … … 258 260 #endif 259 261 262 int pid = getpid(); 263 260 264 // get FFT application start cycle 261 265 get_cycle( &start_init_cycle ); … … 311 315 312 316 printf("\n[fft] starts / %d points / %d thread(s) / PID %x / cycle %d\n", 313 N, nthreads, getpid(), (unsigned int)start_init_cycle );317 N, nthreads, pid, (unsigned int)start_init_cycle ); 314 318 315 319 // build instrumentation file name … … 498 502 } 499 503 500 // close instrumentation file501 ret = fclose( f );502 if( ret )503 {504 printf("\n[fft error] cannot close file <%s>\n", path );505 exit(0);506 }507 508 504 #if DEBUG_MAIN 509 505 get_cycle( &debug_cycle ); … … 553 549 { 554 550 unsigned int data_size = (N / nclusters) * 2 * sizeof(double); 555 unsigned int coefs_size = (rootN - 1) * 2 * sizeof(double);556 551 557 552 data[cid] = (double *)malloc( data_size ); … … 589 584 printf("\n[fft] %s : thread %d exit barrier for buffer allocation / cycle %d\n", 590 585 __FUNCTION__, tid, (unsigned int)barrier_stop ); 586 #endif 587 588 #if DISPLAY_SCHED_AND_VMM 589 unsigned int x_size; 590 unsigned int y_size; 591 unsigned int ncores; 592 get_config( &x_size , &y_size , &ncores ); 593 unsigned int x = cid / y_size; 594 unsigned int y = cid % y_size; 595 unsigned int cxy = HAL_CXY_FROM_XY( x , y ); 596 display_sched( cxy , lid ); 597 if( lid == 0 ) display_vmm( cxy , getpid() , 0 ); 591 598 #endif 592 599 … … 652 659 printf("\n[fft] %s : thread %d exit\n", 653 660 __FUNCTION__, tid ); 661 #endif 662 663 #if DISPLAY_SCHED_AND_VMM 664 printf("\n[fft] %s : thread %d exit\n", __FUNCTION__, tid ); 665 if( lid == 0 ) display_vmm( cxy , getpid() , 0 ); 654 666 #endif 655 667 -
trunk/user/ksh/ksh.c
r638 r640 400 400 if( argc < 2 ) 401 401 { 402 printf(" usage: display vmm cxy pid \n"402 printf(" usage: display vmm cxy pid mapping\n" 403 403 " display sched cxy lid\n" 404 404 " display process cxy\n" … … 416 416 else if( strcmp( argv[1] , "vmm" ) == 0 ) 417 417 { 418 if( argc != 4)419 { 420 printf(" usage: display vmm cxy pid \n");418 if( argc != 5 ) 419 { 420 printf(" usage: display vmm cxy pid mapping\n"); 421 421 } 422 422 else … … 424 424 unsigned int cxy = atoi(argv[2]); 425 425 unsigned int pid = atoi(argv[3]); 426 427 if( display_vmm( cxy , pid ) ) 426 unsigned int map = atoi(argv[4]); 427 428 if( display_vmm( cxy , pid , map ) ) 428 429 { 429 430 printf(" error: no process %x in cluster %x\n", pid , cxy );
Note: See TracChangeset
for help on using the changeset viewer.