- Timestamp:
- Oct 1, 2019, 1:19:00 PM (5 years ago)
- Location:
- trunk/hal
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/generic/hal_gpt.h
r635 r640 2 2 * hal_gpt.h - Generic Page Table API definition. 3 3 * 4 * Authors Alain Greiner (2016 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 69 69 typedef struct gpt_s 70 70 { 71 void * ptr; /*! local pointer on GPT root */ 72 ppn_t ppn; /*! PPN of GPT root */ 71 void * ptr; /*! local pointer on GPT root */ 72 uint32_t pte1_wait_events; /*! total number of pte1 wait events on this gpt */ 73 uint32_t pte1_wait_iters; /*! total number of iterations in all pte1 wait */ 74 uint32_t pte2_wait_events; /*! total number of pte2 wait events on this gpt */ 75 uint32_t pte2_wait_iters; /*! total number of iterations in all pte2 wait */ 73 76 } 74 77 gpt_t; … … 87 90 * This function releases all memory dynamically allocated for a generic page table. 88 91 * For a multi-levels radix tree implementation, it includes all nodes in the tree. 92 * All GPT entries are supposed to be previously unmapped. 89 93 **************************************************************************************** 90 94 * @ gpt : pointer on generic page table descriptor. -
trunk/hal/tsar_mips32/core/hal_context.c
r635 r640 151 151 assert( (context != NULL ), "CPU context not allocated" ); 152 152 153 // compute the PPN for the GPT PT1 154 ppn_t gpt_pt1_ppn = ppm_base2ppn( XPTR( local_cxy , thread->process->vmm.gpt.ptr ) ); 155 153 156 // initialisation depends on thread type 154 157 if( thread->type == THREAD_USER ) … … 160 163 context->c0_sr = SR_USR_MODE; 161 164 context->c0_th = (uint32_t)thread; 162 context->c2_ptpr = (uint32_t)( (thread->process->vmm.gpt.ppn)>> 1);165 context->c2_ptpr = (uint32_t)(gpt_pt1_ppn >> 1); 163 166 context->c2_mode = 0xF; 164 167 } … … 170 173 context->c0_sr = SR_SYS_MODE; 171 174 context->c0_th = (uint32_t)thread; 172 context->c2_ptpr = (uint32_t)( (thread->process->vmm.gpt.ppn)>> 1);175 context->c2_ptpr = (uint32_t)(gpt_pt1_ppn >> 1); 173 176 context->c2_mode = 0x3; 174 177 } … … 193 196 194 197 process_t * child_process; // local pointer on child processs 195 uint32_t child_pt_ppn; // PPN of child process PT1 198 void * child_gpt_ptr; // local pointer on child GPT PT1 199 uint32_t child_gpt_ppn; // PPN of child GPT PT1 196 200 vseg_t * child_us_vseg; // local pointer on child user stack vseg 197 201 … … 216 220 child_process = hal_remote_lpt( XPTR(child_cxy , &child_ptr->process) ); 217 221 218 // get ppn of remote child process page table 219 child_pt_ppn = hal_remote_l32( XPTR(child_cxy , &child_process->vmm.gpt.ppn) ); 222 // get base and ppn of remote child process GPT PT1 223 child_gpt_ptr = hal_remote_l32( XPTR(child_cxy , &child_process->vmm.gpt.ptr) ); 224 child_gpt_ppn = ppm_base2ppn( XPTR( child_cxy , child_gpt_ptr ) ); 220 225 221 226 // get local pointer on local parent uzone (in parent kernel stack) … … 285 290 context.sp_29 = (uint32_t)child_ksp; 286 291 context.c0_th = (uint32_t)child_ptr; 287 context.c2_ptpr = (uint32_t)child_ pt_ppn >> 1;292 context.c2_ptpr = (uint32_t)child_gpt_ppn >> 1; 288 293 289 294 // From this point, both parent and child execute the following code, … … 304 309 uint32_t child_sp = parent_uzone[UZ_SP] + child_us_base - parent_us_base; 305 310 uint32_t child_th = (uint32_t)child_ptr; 306 uint32_t child_ptpr = (uint32_t)child_ pt_ppn >> 1;311 uint32_t child_ptpr = (uint32_t)child_gpt_ppn >> 1; 307 312 308 313 #if DEBUG_HAL_CONTEXT -
trunk/hal/tsar_mips32/core/hal_gpt.c
r637 r640 25 25 #include <hal_gpt.h> 26 26 #include <hal_special.h> 27 #include <hal_irqmask.h> 27 28 #include <printk.h> 28 29 #include <bits.h> … … 133 134 /////////////////////////////////////////////////////////////////////////////////////// 134 135 135 #define GPT_LOCK_WATCHDOG 100000 0136 #define GPT_LOCK_WATCHDOG 100000 136 137 137 138 ///////////////////////////////////// … … 166 167 } 167 168 168 gpt->ptr = base; 169 gpt->ppn = ppm_base2ppn( XPTR( local_cxy , base ) ); 169 // initialze the GPT descriptor 170 gpt->ptr = base; 171 gpt->pte1_wait_events = 0; 172 gpt->pte1_wait_iters = 0; 173 gpt->pte2_wait_events = 0; 174 gpt->pte2_wait_iters = 0; 170 175 171 176 #if DEBUG_HAL_GPT_CREATE … … 173 178 if( DEBUG_HAL_GPT_CREATE < cycle ) 174 179 printk("\n[%s] thread[%x,%x] exit / pt1_base %x / pt1_ppn %x / cycle %d\n", 175 __FUNCTION__, this->process->pid, this->trdid, gpt->ptr, gpt->ppn, cycle ); 180 __FUNCTION__, this->process->pid, this->trdid, 181 base, ppm_base2ppn( XPTR( local_cxy , base ) ), cycle ); 176 182 #endif 177 183 … … 192 198 kmem_req_t req; 193 199 200 thread_t * this = CURRENT_THREAD; 201 194 202 #if DEBUG_HAL_GPT_DESTROY 195 203 uint32_t cycle = (uint32_t)hal_get_cycles(); 196 thread_t * this = CURRENT_THREAD;197 204 if( DEBUG_HAL_GPT_DESTROY < cycle ) 198 205 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", … … 212 219 if( (pte1 & TSAR_PTE_SMALL) == 0 ) // BIG page 213 220 { 214 printk("\n[WARNING] in %s : mapped big page/ ix1 %x\n",215 __FUNCTION__ 221 printk("\n[WARNING] %s : valid PTE1 / thread[%x,%x] / ix1 %x\n", 222 __FUNCTION__, this->process->pid, this->trdid, ix1 ); 216 223 } 217 224 else // PT2 exist … … 228 235 if( (attr & TSAR_PTE_MAPPED) != 0 ) // PTE2 mapped 229 236 { 230 printk("\n[WARNING] in %s : mapped small page/ ix1 %x / ix2 %x\n",231 __FUNCTION__ 237 printk("\n[WARNING] %s : valid PTE2 / thread[%x,%x] / ix1 %x / ix2 %x\n", 238 __FUNCTION__, this->process->pid, this->trdid, ix1, ix2 ); 232 239 } 233 240 } … … 272 279 uint32_t pte2_attr; // PT2[ix2].attr current value 273 280 uint32_t pte2_ppn; // PT2[ix2].ppn current value 274 bool_t atomic; 275 276 #if GPT_LOCK_WATCHDOG 277 uint32_t count = 0; 278 #endif 281 bool_t success; // used for both PTE1 and PTE2 mapping 282 uint32_t count; // watchdog 283 uint32_t sr_save; // for critical section 279 284 280 285 // get cluster and local pointer on GPT … … 285 290 thread_t * this = CURRENT_THREAD; 286 291 uint32_t cycle = (uint32_t)hal_get_cycles(); 287 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 292 // if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 293 if( (vpn == 0x3600) && (gpt_cxy == 0x11) ) 288 294 printk("\n[%s] thread[%x,%x] enters / vpn %x in cluster %x / cycle %d\n", 289 295 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle ); … … 303 309 pte1 = hal_remote_l32( pte1_xp ); 304 310 305 // If PTE1 is unmapped and unlocked, try to atomically lock this PT1 entry. 306 // This PTE1 locking prevent multiple concurrent PT2 allocations 307 // - only the thread that successfully locked the PTE1 allocates a new PT2 308 // and updates the PTE1 309 // - all other threads simply wait until the missing PTE1 is mapped. 310 311 if( pte1 == 0 ) 311 // If PTE1 is unmapped, the calling thread try to map this PTE1. 312 // To prevent multiple concurrent PT2 allocations, only the thread that 313 // successfully locked the PTE1 allocates a new PT2 and updates the PTE1. 314 // All other threads simply wait until the missing PTE1 is mapped. 315 316 if( (pte1 & TSAR_PTE_MAPPED) == 0 ) 312 317 { 313 // try to atomically lock the PTE1 to prevent concurrent PT2 allocations 314 atomic = hal_remote_atomic_cas( pte1_xp, 315 pte1, 316 pte1 | TSAR_PTE_LOCKED ); 317 if( atomic ) 318 { 319 // allocate one 4 Kbytes physical page for PT2 318 if( (pte1 & TSAR_PTE_LOCKED) == 0 ) 319 { 320 // try to atomically lock the PTE1 321 success = hal_remote_atomic_cas( pte1_xp, 322 pte1, 323 TSAR_PTE_LOCKED ); 324 } 325 else 326 { 327 success = false; 328 } 329 330 if( success ) // winner thread allocates one 4 Kbytes page for PT2 331 { 332 // enter critical section 333 hal_disable_irq( &sr_save ); 334 320 335 req.type = KMEM_PPM; 321 336 req.order = 0; … … 336 351 pte1 = TSAR_PTE_MAPPED | TSAR_PTE_SMALL | pt2_ppn; 337 352 338 // set the PTE1 value in PT1 339 // this unlocks the PTE1 353 // set the PTE1 value in PT1 / this unlocks the PTE1 340 354 hal_remote_s32( pte1_xp , pte1 ); 341 355 hal_fence(); 342 356 343 #if (DEBUG_HAL_GPT_LOCK_PTE & 1) 344 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 345 printk("\n[%s] thread[%x,%x] allocates a new PT2 for vpn %x in cluster %x\n", 357 // exit critical section 358 hal_restore_irq( sr_save ); 359 360 #if DEBUG_HAL_GPT_LOCK_PTE 361 // if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 362 if( (vpn == 0x3600) && (gpt_cxy == 0x11) ) 363 printk("\n[%s] PTE1 unmapped : winner thread[%x,%x] allocates a PT2 for vpn %x in cluster %x\n", 346 364 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy ); 347 365 #endif 348 366 349 } // end if atomic 350 } // end if (pte1 == 0) 351 352 // wait until PTE1 is mapped by another thread 353 while( (pte1 & TSAR_PTE_MAPPED) == 0 ) 354 { 355 pte1 = hal_remote_l32( pte1_xp ); 356 357 #if GPT_LOCK_WATCHDOG 358 if( count > GPT_LOCK_WATCHDOG ) 359 { 360 thread_t * thread = CURRENT_THREAD; 361 printk("\n[PANIC] in %s : thread[%x,%x] waiting PTE1 / vpn %x / cxy %x / %d iterations\n", 362 __FUNCTION__, thread->process->pid, thread->trdid, vpn, gpt_cxy, count ); 363 hal_core_sleep(); 364 } 365 count++; 366 #endif 367 368 } 369 370 // check pte1 because only small page can be locked 371 assert( (pte1 & TSAR_PTE_SMALL), "cannot lock a big page\n"); 372 373 #if (DEBUG_HAL_GPT_LOCK_PTE & 1) 374 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 367 } 368 else // other threads wait until PTE1 mapped by the winner 369 { 370 371 #if DEBUG_HAL_GPT_LOCK_PTE 372 // if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 373 if( (vpn == 0x3600) && (gpt_cxy == 0x11) ) 374 printk("\n[%s] PTE1 unmapped : loser thread[%x,%x] wait PTE1 for vpn %x in cluster %x\n", 375 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy ); 376 #endif 377 378 count = 0; 379 do 380 { 381 // get current pte1 value 382 pte1 = hal_remote_l32( pte1_xp ); 383 384 // check iterations number 385 if( count > GPT_LOCK_WATCHDOG ) 386 { 387 thread_t * this = CURRENT_THREAD; 388 uint32_t cycle = (uint32_t)hal_get_cycles(); 389 printk("\n[PANIC] in %s for PTE1 after %d iterations\n" 390 " thread[%x,%x] / vpn %x / cluster %x / pte1 %x / cycle %d\n", 391 __FUNCTION__, count, this->process->pid, this->trdid, 392 vpn, gpt_cxy, pte1, cycle ); 393 394 xptr_t process_xp = cluster_get_process_from_pid_in_cxy( gpt_cxy, 395 this->process->pid ); 396 hal_vmm_display( process_xp , true ); 397 398 hal_core_sleep(); 399 } 400 401 // increment watchdog 402 count++; 403 } 404 while( (pte1 & TSAR_PTE_MAPPED) == 0 ); 405 406 #if CONFIG_INSTRUMENTATION_GPT 407 hal_remote_atomic_add( XPTR( gpt_cxy , &gpt_ptr->pte1_wait_events ) , 1 ); 408 hal_remote_atomic_add( XPTR( gpt_cxy , &gpt_ptr->pte1_wait_iters ) , count ); 409 #endif 410 411 412 #if DEBUG_HAL_GPT_LOCK_PTE 413 // if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 414 if( (vpn == 0x3600) && (gpt_cxy == 0x11) ) 415 printk("\n[%s] PTE1 unmapped : loser thread[%x,%x] get PTE1 for vpn %x in cluster %x\n", 416 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy ); 417 #endif 418 } 419 } // end if pte1 unmapped 420 421 // This code is executed by all calling threads 422 423 // check PTE1 : only small and mapped pages can be locked 424 assert( (pte1 & (TSAR_PTE_SMALL | TSAR_PTE_MAPPED)) , "cannot lock a big or unmapped page\n"); 425 426 #if DEBUG_HAL_GPT_LOCK_PTE 427 // if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 428 if( (vpn == 0x3600) && (gpt_cxy == 0x11) ) 375 429 printk("\n[%s] thread[%x,%x] get pte1 %x for vpn %x in cluster %x\n", 376 430 __FUNCTION__, this->process->pid, this->trdid, pte1, vpn, gpt_cxy ); … … 384 438 pte2_xp = XPTR( gpt_cxy , &pt2[2 * ix2] ); 385 439 386 // wait until PTE2 atomically set using a remote CAS 440 // initialize external loop watchdog 441 count = 0; 442 443 // in this busy waiting loop, each thread try to atomically 444 // lock the PTE2, after checking that the PTE2 is not locked 445 387 446 do 388 447 { 389 390 #if GPT_LOCK_WATCHDOG 391 count = 0; 392 #endif 393 394 // wait until PTE lock released by the current owner 395 do 448 // get current value of pte2_attr 449 pte2_attr = hal_remote_l32( pte2_xp ); 450 451 // check loop watchdog 452 if( count > GPT_LOCK_WATCHDOG ) 396 453 { 397 pte2_attr = hal_remote_l32( pte2_xp ); 398 399 #if GPT_LOCK_WATCHDOG 400 if( count > GPT_LOCK_WATCHDOG ) 401 { 402 thread_t * thread = CURRENT_THREAD; 403 printk("\n[PANIC] in %s : thread[%x,%x] waiting PTE2 / vpn %x / cxy %x / %d iterations\n", 404 __FUNCTION__, thread->process->pid, thread->trdid, vpn, gpt_cxy, count ); 405 hal_core_sleep(); 406 } 407 count++; 408 #endif 409 454 thread_t * this = CURRENT_THREAD; 455 uint32_t cycle = (uint32_t)hal_get_cycles(); 456 printk("\n[PANIC] in %s for PTE2 after %d iterations\n" 457 " thread[%x,%x] / vpn %x / cluster %x / pte2_attr %x / cycle %d\n", 458 __FUNCTION__, count, this->process->pid, this->trdid, 459 vpn, gpt_cxy, pte2_attr, cycle ); 460 461 xptr_t process_xp = cluster_get_process_from_pid_in_cxy( gpt_cxy, 462 this->process->pid ); 463 hal_vmm_display( process_xp , true ); 464 465 hal_core_sleep(); 410 466 } 411 while( (pte2_attr & TSAR_PTE_LOCKED) != 0 ); 412 413 // try to atomically set the TSAR_PTE_LOCKED attribute 414 atomic = hal_remote_atomic_cas( pte2_xp, 415 pte2_attr, 416 (pte2_attr | TSAR_PTE_LOCKED) ); 467 468 // increment loop watchdog 469 count++; 470 471 if( (pte2_attr & TSAR_PTE_LOCKED) == 0 ) 472 { 473 // try to atomically set the TSAR_PTE_LOCKED attribute 474 success = hal_remote_atomic_cas( pte2_xp, 475 pte2_attr, 476 (pte2_attr | TSAR_PTE_LOCKED) ); 477 } 478 else 479 { 480 success = false; 481 } 417 482 } 418 while( atomic == 0 ); 483 while( success == false ); 484 485 #if CONFIG_INSTRUMENTATION_GPT 486 hal_remote_atomic_add( XPTR( gpt_cxy , &gpt_ptr->pte2_wait_events ) , 1 ); 487 hal_remote_atomic_add( XPTR( gpt_cxy , &gpt_ptr->pte2_wait_iters ) , count ); 488 #endif 419 489 420 490 // get PTE2.ppn … … 423 493 #if DEBUG_HAL_GPT_LOCK_PTE 424 494 cycle = (uint32_t)hal_get_cycles(); 425 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 426 printk("\n[%s] thread[%x,%x] exit / vpn %x in cluster %x / attr %x / ppn %x / cycle %d\n", 495 // if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 496 if( (vpn == 0x3600) && (gpt_cxy == 0x11) ) 497 printk("\n[%s] thread[%x,%x] success / vpn %x in cluster %x / attr %x / ppn %x / cycle %d\n", 427 498 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, pte2_attr, pte2_ppn, cycle ); 428 499 #endif … … 452 523 gpt_t * gpt_ptr = GET_PTR( gpt_xp ); 453 524 525 // compute indexes in P1 and PT2 526 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 527 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 528 529 // get local pointer on PT1 530 pt1 = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 531 532 // build extended pointer on PTE1 == PT1[ix1] 533 pte1_xp = XPTR( gpt_cxy , &pt1[ix1] ); 534 535 // get current pte1 value 536 pte1 = hal_remote_l32( pte1_xp ); 537 538 assert( ((pte1 & TSAR_PTE_MAPPED) != 0), 539 "PTE1 for vpn %x in cluster %x is unmapped / pte1 = %x\n", vpn, gpt_cxy, pte1 ); 540 541 assert( ((pte1 & TSAR_PTE_SMALL ) != 0), 542 "PTE1 for vpn %x in cluster %x is not small / pte1 = %x\n", vpn, gpt_cxy, pte1 ); 543 544 // get pointer on PT2 base 545 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 546 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 547 548 // build extended pointers on PT2[ix2].attr 549 pte2_xp = XPTR( gpt_cxy , &pt2[2 * ix2] ); 550 551 // get PT2[ix2].attr 552 pte2_attr = hal_remote_l32( pte2_xp ); 553 554 assert( ((pte2_attr & TSAR_PTE_LOCKED) != 0), 555 "PTE2 for vpn %x in cluster %x is unlocked / pte2_attr = %x\n", vpn, gpt_cxy, pte2_attr ); 556 557 // reset TSAR_PTE_LOCKED attribute 558 hal_remote_s32( pte2_xp , pte2_attr & ~TSAR_PTE_LOCKED ); 559 454 560 #if DEBUG_HAL_GPT_LOCK_PTE 455 561 thread_t * this = CURRENT_THREAD; 456 562 uint32_t cycle = (uint32_t)hal_get_cycles(); 457 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 458 printk("\n[%s] thread[%x,%x] enters for vpn %x in cluster %x / cycle %d\n", 459 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle ); 460 #endif 461 462 // compute indexes in P1 and PT2 463 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 464 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 465 466 // get local pointer on PT1 467 pt1 = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 468 469 // build extended pointer on PTE1 == PT1[ix1] 470 pte1_xp = XPTR( gpt_cxy , &pt1[ix1] ); 471 472 // get current pte1 value 473 pte1 = hal_remote_l32( pte1_xp ); 474 475 // check PTE1 attributes 476 assert( ((pte1 & TSAR_PTE_MAPPED) != 0), "unmapped PTE1\n"); 477 assert( ((pte1 & TSAR_PTE_SMALL ) != 0), "big page PTE1\n"); 478 479 // get pointer on PT2 base 480 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 481 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 482 483 // build extended pointers on PT2[ix2].attr 484 pte2_xp = XPTR( gpt_cxy , &pt2[2 * ix2] ); 485 486 // get PT2[ix2].attr 487 pte2_attr = hal_remote_l32( pte2_xp ); 488 489 // check PTE2 attributes 490 assert( ((pte2_attr & TSAR_PTE_MAPPED) != 0), "unmapped PTE2\n"); 491 assert( ((pte2_attr & TSAR_PTE_LOCKED) != 0), "unlocked PTE2\n"); 492 493 // reset TSAR_PTE_LOCKED attribute 494 hal_remote_s32( pte2_xp , pte2_attr & ~TSAR_PTE_LOCKED ); 495 496 #if DEBUG_HAL_GPT_LOCK_PTE 497 cycle = (uint32_t)hal_get_cycles(); 498 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 563 // if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 564 if( (vpn == 0xc5fff) && (gpt_cxy == 0x1) ) 499 565 printk("\n[%s] thread[%x,%x] unlocks vpn %x in cluster %x / cycle %d\n", 500 566 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle ); … … 587 653 588 654 // PTE1 must be mapped because PTE2 must be locked 589 assert( (pte1 & TSAR_PTE_MAPPED), "PTE1 must be mapped\n" ); 655 assert( (pte1 & TSAR_PTE_MAPPED), 656 "PTE1 for vpn %x in cluster %x must be mapped / pte1 = %x\n", vpn, gpt_cxy, pte1 ); 590 657 591 658 // get PT2 base … … 601 668 602 669 // PTE2 must be locked 603 assert( (pte2_attr & TSAR_PTE_LOCKED), "PTE2 must be locked\n" ); 670 assert( (pte2_attr & TSAR_PTE_LOCKED), 671 "PTE2 for vpn %x in cluster %x must be locked / pte2_attr = %x\n", vpn, gpt_cxy, pte2_attr ); 604 672 605 673 // set PTE2 in PT2 (in this order) -
trunk/hal/tsar_mips32/core/hal_vmm.c
r637 r640 296 296 } 297 297 298 #if CONFIG_INSTRUMENTATION_GPT 299 uint32_t pte1_events = hal_remote_l32( XPTR( process_cxy , &vmm->gpt.pte1_wait_events ) ); 300 uint32_t pte1_iters = hal_remote_l32( XPTR( process_cxy , &vmm->gpt.pte1_wait_iters ) ); 301 uint32_t pte1_ratio = (pte1_events == 0 ) ? 0 : (pte1_iters / pte1_events); 302 nolock_printk("\nGPT_WAIT_PTE1 : %d events / %d iterations => %d iter/event\n", 303 pte1_events, pte1_iters, pte1_ratio ); 304 305 uint32_t pte2_events = hal_remote_l32( XPTR( process_cxy , &vmm->gpt.pte1_wait_events ) ); 306 uint32_t pte2_iters = hal_remote_l32( XPTR( process_cxy , &vmm->gpt.pte1_wait_iters ) ); 307 uint32_t pte2_ratio = (pte2_events == 0 ) ? 0 : (pte2_iters / pte2_events); 308 nolock_printk("GPT_WAIT_PTE2 : %d events / %d iterations => %d iter/event\n", 309 pte2_events, pte2_iters, pte2_ratio ); 310 #endif 311 298 312 // release locks 299 313 remote_busylock_release( txt_lock_xp ); … … 302 316 } // hal_vmm_display() 303 317 318
Note: See TracChangeset
for help on using the changeset viewer.