Changeset 635 for trunk/hal/tsar_mips32/core/hal_gpt.c
- Timestamp:
- Jun 26, 2019, 11:42:37 AM (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/tsar_mips32/core/hal_gpt.c
r633 r635 2 2 * hal_gpt.c - implementation of the Generic Page Table API for TSAR-MIPS32 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 70 70 #define TSAR_MMU_IX2_FROM_VPN( vpn ) (vpn & 0x1FF) 71 71 72 #define TSAR_MMU_P TBA_FROM_PTE1( pte1 ) (pte1 & 0x0FFFFFFF)73 #define TSAR_MMU_PPN _FROM_PTE1( pte1 )((pte1 & 0x0007FFFF)<<9)72 #define TSAR_MMU_PPN2_FROM_PTE1( pte1 ) (pte1 & 0x0FFFFFFF) 73 #define TSAR_MMU_PPN1_FROM_PTE1( pte1 ) ((pte1 & 0x0007FFFF)<<9) 74 74 #define TSAR_MMU_ATTR_FROM_PTE1( pte1 ) (pte1 & 0xFFC00000) 75 75 … … 138 138 error_t hal_gpt_create( gpt_t * gpt ) 139 139 { 140 page_t * page; 141 xptr_t page_xp; 140 void * base; 142 141 143 142 thread_t * this = CURRENT_THREAD; … … 146 145 uint32_t cycle = (uint32_t)hal_get_cycles(); 147 146 if( DEBUG_HAL_GPT_CREATE < cycle ) 148 printk("\n[%s] :thread[%x,%x] enter / cycle %d\n",147 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 149 148 __FUNCTION__, this->process->pid, this->trdid, cycle ); 150 149 #endif 151 150 152 151 // check page size 153 assert( (CONFIG_PPM_PAGE_SIZE == 4096) , " for TSAR, thepage size must be 4 Kbytes\n" );152 assert( (CONFIG_PPM_PAGE_SIZE == 4096) , "the TSAR page size must be 4 Kbytes\n" ); 154 153 155 154 // allocates 2 physical pages for PT1 156 155 kmem_req_t req; 157 req.type = KMEM_P AGE;158 req. size= 1; // 2 small pages156 req.type = KMEM_PPM; 157 req.order = 1; // 2 small pages 159 158 req.flags = AF_KERNEL | AF_ZERO; 160 page = (page_t *)kmem_alloc( &req );161 162 if( page == NULL )159 base = kmem_alloc( &req ); 160 161 if( base == NULL ) 163 162 { 164 163 printk("\n[PANIC] in %s : no memory for PT1 / process %x / cluster %x\n", … … 167 166 } 168 167 169 // initialize generic page table descriptor 170 page_xp = XPTR( local_cxy , page ); 171 gpt->ptr = GET_PTR( ppm_page2base( page_xp ) ); 172 gpt->ppn = ppm_page2ppn( page_xp ); 168 gpt->ptr = base; 169 gpt->ppn = ppm_base2ppn( XPTR( local_cxy , base ) ); 173 170 174 171 #if DEBUG_HAL_GPT_CREATE 175 172 cycle = (uint32_t)hal_get_cycles(); 176 173 if( DEBUG_HAL_GPT_CREATE < cycle ) 177 printk("\n[%s] : thread[%x,%x] exit/ cycle %d\n",178 __FUNCTION__, this->process->pid, this->trdid, cycle );174 printk("\n[%s] thread[%x,%x] exit / pt1_base %x / pt1_ppn %x / cycle %d\n", 175 __FUNCTION__, this->process->pid, this->trdid, gpt->ptr, gpt->ppn, cycle ); 179 176 #endif 180 177 … … 199 196 thread_t * this = CURRENT_THREAD; 200 197 if( DEBUG_HAL_GPT_DESTROY < cycle ) 201 printk("\n[%s] :thread[%x,%x] enter / cycle %d\n",198 printk("\n[%s] thread[%x,%x] enter / cycle %d\n", 202 199 __FUNCTION__, this->process->pid, this->trdid, cycle ); 203 200 #endif … … 221 218 { 222 219 // get local pointer on PT2 223 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 224 xptr_t base_xp = ppm_ppn2base( pt2_ppn ); 225 pt2 = GET_PTR( base_xp ); 220 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 221 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 226 222 227 223 // scan the PT2 … … 238 234 239 235 // release the page allocated for the PT2 240 req.type = KMEM_P AGE;241 req.ptr = GET_PTR( ppm_base2page( XPTR(local_cxy , pt2 ) ) );236 req.type = KMEM_PPM; 237 req.ptr = pt2; 242 238 kmem_free( &req ); 243 239 } … … 246 242 247 243 // release the PT1 248 req.type = KMEM_P AGE;249 req.ptr = GET_PTR( ppm_base2page( XPTR(local_cxy , pt1 ) ) );244 req.type = KMEM_PPM; 245 req.ptr = pt1; 250 246 kmem_free( &req ); 251 247 … … 253 249 cycle = (uint32_t)hal_get_cycles(); 254 250 if( DEBUG_HAL_GPT_DESTROY < cycle ) 255 printk("\n[%s] :thread[%x,%x] exit / cycle %d\n",251 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 256 252 __FUNCTION__, this->process->pid, this->trdid, cycle ); 257 253 #endif 258 254 259 255 } // end hal_gpt_destroy() 260 261 /*262 263 /////////////////////////////////////////////////////////////////////////////////////264 // This static function can be used for debug.265 /////////////////////////////////////////////////////////////////////////////////////266 static void hal_gpt_display( process_t * process )267 {268 gpt_t * gpt;269 uint32_t ix1;270 uint32_t ix2;271 uint32_t * pt1;272 uint32_t pte1;273 ppn_t pt2_ppn;274 uint32_t * pt2;275 uint32_t pte2_attr;276 ppn_t pte2_ppn;277 vpn_t vpn;278 279 // check argument280 assert( (process != NULL) , "NULL process pointer\n");281 282 // get pointer on gpt283 gpt = &(process->vmm.gpt);284 285 // get pointer on PT1286 pt1 = (uint32_t *)gpt->ptr;287 288 printk("\n***** Tsar Page Table for process %x : &gpt = %x / &pt1 = %x\n\n",289 process->pid , gpt , pt1 );290 291 // scan the PT1292 for( ix1 = 0 ; ix1 < 2048 ; ix1++ )293 {294 pte1 = pt1[ix1];295 if( (pte1 & TSAR_PTE_MAPPED) != 0 )296 {297 if( (pte1 & TSAR_PTE_SMALL) == 0 ) // BIG page298 {299 vpn = ix1 << 9;300 printk(" - BIG : vpn = %x / pt1[%d] = %X\n", vpn , ix1 , pte1 );301 }302 else // SMALL pages303 {304 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 );305 xptr_t base_xp = ppm_ppn2base ( pt2_ppn );306 pt2 = GET_PTR( base_xp );307 308 // scan the PT2309 for( ix2 = 0 ; ix2 < 512 ; ix2++ )310 {311 pte2_attr = TSAR_MMU_ATTR_FROM_PTE2( pt2[2 * ix2] );312 pte2_ppn = TSAR_MMU_PPN_FROM_PTE2( pt2[2 * ix2 + 1] );313 314 if( (pte2_attr & TSAR_PTE_MAPPED) != 0 )315 {316 vpn = (ix1 << 9) | ix2;317 printk(" - SMALL : vpn %X / ppn %X / attr %X\n",318 vpn , pte2_ppn , tsar2gpt(pte2_attr) );319 }320 }321 }322 }323 }324 } // end hal_gpt_display()325 326 */327 256 328 257 //////////////////////////////////////////// … … 332 261 ppn_t * ppn ) 333 262 { 334 uint32_t * pt1_ptr; // local pointer on PT1 base 335 xptr_t ptd1_xp; // extended pointer on PT1[x1] entry 336 uint32_t ptd1; // value of PT1[x1] entry 337 338 xptr_t page_xp; 339 263 uint32_t * pt1; // local pointer on PT1 base 264 xptr_t pte1_xp; // extended pointer on PT1[x1] entry 265 uint32_t pte1; // value of PT1[x1] entry 266 267 kmem_req_t req; // kmem request fro PT2 allocation 268 269 uint32_t * pt2; // local pointer on PT2 base 340 270 ppn_t pt2_ppn; // PPN of page containing PT2 341 uint32_t * pt2_ptr; // local pointer on PT2 base342 271 xptr_t pte2_xp; // extended pointer on PT2[ix2].attr 343 272 uint32_t pte2_attr; // PT2[ix2].attr current value … … 357 286 uint32_t cycle = (uint32_t)hal_get_cycles(); 358 287 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 359 printk("\n[%s] :thread[%x,%x] enters / vpn %x in cluster %x / cycle %d\n",288 printk("\n[%s] thread[%x,%x] enters / vpn %x in cluster %x / cycle %d\n", 360 289 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle ); 361 290 #endif 362 291 363 292 // get indexes in PTI & PT2 from vpn 364 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); // index in PT1365 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); // index in PT2293 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 294 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 366 295 367 296 // get local pointer on PT1 368 pt1 _ptr= hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );369 370 // build extended pointer on PT D1 == PT1[ix1]371 pt d1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] );297 pt1 = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 298 299 // build extended pointer on PTE1 == PT1[ix1] 300 pte1_xp = XPTR( gpt_cxy , &pt1[ix1] ); 372 301 373 302 // get current PT1 entry value 374 pt d1 = hal_remote_l32( ptd1_xp );375 376 // If PT D1 is unmapped and unlocked, try to atomically lock this PT1 entry.377 // This PT D1 lockprevent multiple concurrent PT2 allocations378 // - only the thread that successfully locked the PT D1 allocates a new PT2379 // and updates the PT D1380 // - all other threads simply wait until the missing PT D1 is mapped.381 382 if( pt d1 == 0 )303 pte1 = hal_remote_l32( pte1_xp ); 304 305 // If PTE1 is unmapped and unlocked, try to atomically lock this PT1 entry. 306 // This PTE1 locking prevent multiple concurrent PT2 allocations 307 // - only the thread that successfully locked the PTE1 allocates a new PT2 308 // and updates the PTE1 309 // - all other threads simply wait until the missing PTE1 is mapped. 310 311 if( pte1 == 0 ) 383 312 { 384 // try to atomically lock the PT D1 to prevent concurrent PT2 allocations385 atomic = hal_remote_atomic_cas( pt d1_xp,386 pt d1,387 pt d1 | TSAR_PTE_LOCKED );313 // try to atomically lock the PTE1 to prevent concurrent PT2 allocations 314 atomic = hal_remote_atomic_cas( pte1_xp, 315 pte1, 316 pte1 | TSAR_PTE_LOCKED ); 388 317 if( atomic ) 389 318 { 390 319 // allocate one 4 Kbytes physical page for PT2 391 page_xp = ppm_remote_alloc_pages( gpt_cxy , 0 ); 392 393 if( page_xp == XPTR_NULL ) 320 req.type = KMEM_PPM; 321 req.order = 0; 322 req.flags = AF_ZERO | AF_KERNEL; 323 pt2 = kmem_remote_alloc( gpt_cxy , &req ); 324 325 if( pt2 == NULL ) 394 326 { 395 printk("\n[ERROR] in %s : cannot allocate memory for PT2\n", __FUNCTION__ ); 327 printk("\n[ERROR] in %s : cannot allocate memory for PT2 in cluster %d\n", 328 __FUNCTION__, gpt_cxy ); 396 329 return -1; 397 330 } 398 331 399 332 // get the PT2 PPN 400 pt2_ppn = ppm_ page2ppn( page_xp);401 402 // build PT D1403 pt d1 = TSAR_PTE_MAPPED | TSAR_PTE_SMALL | pt2_ppn;404 405 // set the PT D1 value in PT1406 // this unlocks the PT D1407 hal_remote_s32( pt d1_xp , ptd1 );333 pt2_ppn = ppm_base2ppn( XPTR( gpt_cxy , pt2 ) ); 334 335 // build PTE1 336 pte1 = TSAR_PTE_MAPPED | TSAR_PTE_SMALL | pt2_ppn; 337 338 // set the PTE1 value in PT1 339 // this unlocks the PTE1 340 hal_remote_s32( pte1_xp , pte1 ); 408 341 hal_fence(); 409 342 410 343 #if (DEBUG_HAL_GPT_LOCK_PTE & 1) 411 344 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 412 printk("\n[%s] :thread[%x,%x] allocates a new PT2 for vpn %x in cluster %x\n",345 printk("\n[%s] thread[%x,%x] allocates a new PT2 for vpn %x in cluster %x\n", 413 346 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy ); 414 347 #endif 415 348 416 349 } // end if atomic 417 } // end if (pt d1 == 0)418 419 // wait until PT D1 is mapped by another thread420 while( (pt d1 & TSAR_PTE_MAPPED) == 0 )350 } // end if (pte1 == 0) 351 352 // wait until PTE1 is mapped by another thread 353 while( (pte1 & TSAR_PTE_MAPPED) == 0 ) 421 354 { 422 pt d1 = hal_remote_l32( ptd1_xp );355 pte1 = hal_remote_l32( pte1_xp ); 423 356 424 357 #if GPT_LOCK_WATCHDOG … … 426 359 { 427 360 thread_t * thread = CURRENT_THREAD; 428 printk("\n[PANIC] in %s : thread[%x,%x] waiting PT D1 / vpn %x / cxy %x / %d iterations\n",361 printk("\n[PANIC] in %s : thread[%x,%x] waiting PTE1 / vpn %x / cxy %x / %d iterations\n", 429 362 __FUNCTION__, thread->process->pid, thread->trdid, vpn, gpt_cxy, count ); 430 363 hal_core_sleep(); … … 435 368 } 436 369 437 // check pt d1 because only small page can be locked438 assert( (pt d1 & TSAR_PTE_SMALL), "cannot lock a big page\n");370 // check pte1 because only small page can be locked 371 assert( (pte1 & TSAR_PTE_SMALL), "cannot lock a big page\n"); 439 372 440 373 #if (DEBUG_HAL_GPT_LOCK_PTE & 1) 441 374 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 442 printk("\n[%s] : thread[%x,%x] get ptd1 %x for vpn %x in cluster %x\n",443 __FUNCTION__, this->process->pid, this->trdid, pt d1, vpn, gpt_cxy );444 #endif 445 446 // get pointer on PT2 base from PTD1447 pt2_ppn = TSAR_MMU_P TBA_FROM_PTE1( ptd1 );448 pt2 _ptr= GET_PTR( ppm_ppn2base( pt2_ppn ) );375 printk("\n[%s] thread[%x,%x] get pte1 %x for vpn %x in cluster %x\n", 376 __FUNCTION__, this->process->pid, this->trdid, pte1, vpn, gpt_cxy ); 377 #endif 378 379 // get pointer on PT2 base 380 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 381 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 449 382 450 383 // build extended pointers on PT2[ix2].attr 451 pte2_xp = XPTR( gpt_cxy , &pt2 _ptr[2 * ix2] );384 pte2_xp = XPTR( gpt_cxy , &pt2[2 * ix2] ); 452 385 453 386 // wait until PTE2 atomically set using a remote CAS … … 491 424 cycle = (uint32_t)hal_get_cycles(); 492 425 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 493 printk("\n[%s] :thread[%x,%x] exit / vpn %x in cluster %x / attr %x / ppn %x / cycle %d\n",426 printk("\n[%s] thread[%x,%x] exit / vpn %x in cluster %x / attr %x / ppn %x / cycle %d\n", 494 427 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, pte2_attr, pte2_ppn, cycle ); 495 428 #endif … … 506 439 vpn_t vpn ) 507 440 { 508 uint32_t * pt1_ptr; // local pointer on PT1 base 509 xptr_t ptd1_xp; // extended pointer on PT1[ix1] 510 uint32_t ptd1; // value of PT1[ix1] entry 511 441 uint32_t * pt1; // local pointer on PT1 base 442 xptr_t pte1_xp; // extended pointer on PT1[ix1] 443 uint32_t pte1; // value of PT1[ix1] entry 444 445 uint32_t * pt2; // PT2 base address 512 446 ppn_t pt2_ppn; // PPN of page containing PT2 513 uint32_t * pt2_ptr; // PT2 base address514 447 xptr_t pte2_xp; // extended pointer on PT2[ix2].attr 515 448 uint32_t pte2_attr; // PTE2 attribute … … 523 456 uint32_t cycle = (uint32_t)hal_get_cycles(); 524 457 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 525 printk("\n[%s] :thread[%x,%x] enters for vpn %x in cluster %x / cycle %d\n",458 printk("\n[%s] thread[%x,%x] enters for vpn %x in cluster %x / cycle %d\n", 526 459 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle ); 527 460 #endif 528 461 529 462 // compute indexes in P1 and PT2 530 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); // index in PT1531 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); // index in PT2463 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 464 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 532 465 533 466 // get local pointer on PT1 534 pt1 _ptr= hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );535 536 // build extended pointer on PT D1 == PT1[ix1]537 pt d1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] );538 539 // get current pt d1 value540 pt d1 = hal_remote_l32( ptd1_xp );541 542 // check PT D1 attributes543 assert( ((pt d1 & TSAR_PTE_MAPPED) != 0), "unmapped PTE1\n");544 assert( ((pt d1 & TSAR_PTE_SMALL ) != 0), "big page PTE1\n");545 546 // get pointer on PT2 base from PTD1547 pt2_ppn = TSAR_MMU_P TBA_FROM_PTE1( ptd1 );548 pt2 _ptr= GET_PTR( ppm_ppn2base( pt2_ppn ) );467 pt1 = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 468 469 // build extended pointer on PTE1 == PT1[ix1] 470 pte1_xp = XPTR( gpt_cxy , &pt1[ix1] ); 471 472 // get current pte1 value 473 pte1 = hal_remote_l32( pte1_xp ); 474 475 // check PTE1 attributes 476 assert( ((pte1 & TSAR_PTE_MAPPED) != 0), "unmapped PTE1\n"); 477 assert( ((pte1 & TSAR_PTE_SMALL ) != 0), "big page PTE1\n"); 478 479 // get pointer on PT2 base 480 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 481 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 549 482 550 483 // build extended pointers on PT2[ix2].attr 551 pte2_xp = XPTR( gpt_cxy , &pt2 _ptr[2 * ix2] );484 pte2_xp = XPTR( gpt_cxy , &pt2[2 * ix2] ); 552 485 553 486 // get PT2[ix2].attr … … 564 497 cycle = (uint32_t)hal_get_cycles(); 565 498 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 566 printk("\n[%s] :thread[%x,%x] unlocks vpn %x in cluster %x / cycle %d\n",499 printk("\n[%s] thread[%x,%x] unlocks vpn %x in cluster %x / cycle %d\n", 567 500 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle ); 568 501 #endif … … 580 513 gpt_t * gpt_ptr; // target GPT local pointer 581 514 582 uint32_t * pt1 _ptr;// local pointer on PT1 base515 uint32_t * pt1; // local pointer on PT1 base 583 516 xptr_t pte1_xp; // extended pointer on PT1 entry 584 517 uint32_t pte1; // PT1 entry value if PTE1 585 518 519 uint32_t * pt2; // local pointer on PT2 base 586 520 ppn_t pt2_ppn; // PPN of PT2 587 uint32_t * pt2_ptr; // local pointer on PT2 base588 521 xptr_t pte2_attr_xp; // extended pointer on PT2[ix2].attr 589 522 xptr_t pte2_ppn_xp; // extended pointer on PT2[ix2].ppn … … 604 537 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 605 538 606 pt1_ptr = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 607 small = attr & GPT_SMALL; 539 #if DEBUG_HAL_GPT_SET_PTE 540 thread_t * this = CURRENT_THREAD; 541 uint32_t cycle = (uint32_t)hal_get_cycles(); 542 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 543 printk("\n[%s] thread[%x,%x] enter gpt (%x,%x) / vpn %x / attr %x / ppn %x\n", 544 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, &gpt_ptr->ptr, vpn, attr, ppn ); 545 #endif 546 547 small = attr & GPT_SMALL; 548 549 // get local pointer on PT1 550 pt1 = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 608 551 609 552 // compute tsar attributes from generic attributes … … 611 554 612 555 // build extended pointer on PTE1 = PT1[ix1] 613 pte1_xp = XPTR( gpt_cxy , &pt1 _ptr[ix1] );556 pte1_xp = XPTR( gpt_cxy , &pt1[ix1] ); 614 557 615 558 // get current pte1 value … … 634 577 635 578 #if DEBUG_HAL_GPT_SET_PTE 636 thread_t * this = CURRENT_THREAD;637 uint32_t cycle = (uint32_t)hal_get_cycles();638 579 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 639 printk("\n[%s] :thread[%x,%x] map PTE1 / cxy %x / ix1 %x / pt1 %x / pte1 %x\n",640 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1 _ptr, pte1 );580 printk("\n[%s] thread[%x,%x] map PTE1 / cxy %x / ix1 %x / pt1 %x / pte1 %x\n", 581 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1, pte1 ); 641 582 #endif 642 583 … … 648 589 assert( (pte1 & TSAR_PTE_MAPPED), "PTE1 must be mapped\n" ); 649 590 650 // get PT2 base from PTE1651 pt2_ppn = TSAR_MMU_P TBA_FROM_PTE1( pte1 );652 pt2 _ptr= GET_PTR( ppm_ppn2base( pt2_ppn ) );591 // get PT2 base 592 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 593 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 653 594 654 595 // build extended pointers on PT2[ix2].attr and PT2[ix2].ppn 655 pte2_attr_xp = XPTR( gpt_cxy , &pt2 _ptr[2 * ix2] );656 pte2_ppn_xp = XPTR( gpt_cxy , &pt2 _ptr[2 * ix2 + 1] );596 pte2_attr_xp = XPTR( gpt_cxy , &pt2[2 * ix2] ); 597 pte2_ppn_xp = XPTR( gpt_cxy , &pt2[2 * ix2 + 1] ); 657 598 658 599 // get current value of PTE2.attr … … 672 613 uint32_t cycle = (uint32_t)hal_get_cycles(); 673 614 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 674 printk("\n[%s] :thread[%x,%x] map PTE2 / cxy %x / ix2 %x / pt2 %x / attr %x / ppn %x\n",675 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix2, pt2 _ptr, tsar_attr, ppn );615 printk("\n[%s] thread[%x,%x] map PTE2 / cxy %x / ix2 %x / pt2 %x / attr %x / ppn %x\n", 616 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix2, pt2, tsar_attr, ppn ); 676 617 #endif 677 618 … … 689 630 uint32_t ix2; // index in PT2 690 631 691 uint32_t * pt1 _ptr;// PT1 base address632 uint32_t * pt1; // PT1 base address 692 633 xptr_t pte1_xp; // extended pointer on PT1[ix1] 693 634 uint32_t pte1; // PT1 entry value 694 635 636 uint32_t * pt2; // PT2 base address 695 637 ppn_t pt2_ppn; // PPN of PT2 696 uint32_t * pt2_ptr; // PT2 base address697 638 xptr_t pte2_attr_xp; // extended pointer on PT2[ix2].attr 698 639 xptr_t pte2_ppn_xp; // extended pointer on PT2[ix2].ppn … … 707 648 708 649 // get local pointer on PT1 base 709 pt1 _ptr= hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );650 pt1 = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 710 651 711 652 // build extended pointer on PTE1 = PT1[ix1] 712 pte1_xp = XPTR( gpt_cxy , &pt1 _ptr[ix1] );653 pte1_xp = XPTR( gpt_cxy , &pt1[ix1] ); 713 654 714 655 // get current PTE1 value … … 729 670 uint32_t cycle = (uint32_t)hal_get_cycles(); 730 671 if( DEBUG_HAL_GPT_RESET_PTE < cycle ) 731 printk("\n[%s] :thread[%x,%x] unmap PTE1 / cxy %x / vpn %x / ix1 %x\n",672 printk("\n[%s] thread[%x,%x] unmap PTE1 / cxy %x / vpn %x / ix1 %x\n", 732 673 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, vpn, ix1 ); 733 674 #endif … … 737 678 else // it's a PTE2 => unmap it from PT2 738 679 { 739 // compute PT2 base address740 pt2_ppn = TSAR_MMU_P TBA_FROM_PTE1( pte1 );741 pt2 _ptr= GET_PTR( ppm_ppn2base( pt2_ppn ) );680 // get PT2 base 681 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 682 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 742 683 743 684 // build extended pointer on PT2[ix2].attr and PT2[ix2].ppn 744 pte2_attr_xp = XPTR( gpt_cxy , &pt2 _ptr[2 * ix2] );745 pte2_ppn_xp = XPTR( gpt_cxy , &pt2 _ptr[2 * ix2 + 1] );685 pte2_attr_xp = XPTR( gpt_cxy , &pt2[2 * ix2] ); 686 pte2_ppn_xp = XPTR( gpt_cxy , &pt2[2 * ix2 + 1] ); 746 687 747 688 // unmap the PTE2 … … 755 696 uint32_t cycle = (uint32_t)hal_get_cycles(); 756 697 if( DEBUG_HAL_GPT_RESET_PTE < cycle ) 757 printk("\n[%s] :thread[%x,%x] unmap PTE2 / cxy %x / vpn %x / ix2 %x\n",698 printk("\n[%s] thread[%x,%x] unmap PTE2 / cxy %x / vpn %x / ix2 %x\n", 758 699 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, vpn, ix2 ); 759 700 #endif … … 804 745 if( (pte1 & TSAR_PTE_SMALL) == 0 ) // it's a PTE1 805 746 { 806 // get PPN & ATTR from PT1747 // get PPN & ATTR 807 748 *attr = tsar2gpt( TSAR_MMU_ATTR_FROM_PTE1( pte1 ) ); 808 *ppn = TSAR_MMU_PPN _FROM_PTE1( pte1 ) | (vpn & ((1<<TSAR_MMU_IX2_WIDTH)-1));749 *ppn = TSAR_MMU_PPN1_FROM_PTE1( pte1 ) | (vpn & ((1<<TSAR_MMU_IX2_WIDTH)-1)); 809 750 } 810 751 else // it's a PTE2 811 752 { 812 753 // compute PT2 base address 813 pt2_ppn = TSAR_MMU_P TBA_FROM_PTE1( pte1 );754 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 814 755 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 815 756 … … 849 790 uint32_t * src_pt1; // local pointer on SRC PT1 850 791 uint32_t * dst_pt1; // local pointer on DST PT1 792 851 793 uint32_t * src_pt2; // local pointer on SRC PT2 852 794 uint32_t * dst_pt2; // local pointer on DST PT2 … … 874 816 thread_t * this = CURRENT_THREAD; 875 817 if( DEBUG_HAL_GPT_COPY < cycle ) 876 printk("\n[%s] :thread[%x,%x] enter / src_cxy %x / dst_cxy %x / cycle %d\n",818 printk("\n[%s] thread[%x,%x] enter / src_cxy %x / dst_cxy %x / cycle %d\n", 877 819 __FUNCTION__, this->process->pid, this->trdid, src_cxy, local_cxy, cycle ); 878 820 #endif 879 880 // get remote src_gpt cluster and local pointer881 src_cxy = GET_CXY( src_gpt_xp );882 src_gpt = GET_PTR( src_gpt_xp );883 821 884 822 // get remote src_pt1 and local dst_pt1 … … 907 845 dst_pte1 = dst_pt1[dst_ix1]; 908 846 909 // map dst_pte1 if required847 // map dst_pte1 when this entry is not mapped 910 848 if( (dst_pte1 & TSAR_PTE_MAPPED) == 0 ) 911 849 { 912 850 // allocate one physical page for a new PT2 913 req.type = KMEM_P AGE;914 req. size= 0; // 1 small page851 req.type = KMEM_PPM; 852 req.order = 0; // 1 small page 915 853 req.flags = AF_KERNEL | AF_ZERO; 916 page = (page_t *)kmem_alloc( &req );917 918 if( page== NULL )854 dst_pt2 = kmem_alloc( &req ); 855 856 if( dst_pt2 == NULL ) 919 857 { 920 858 printk("\n[ERROR] in %s : cannot allocate PT2\n", __FUNCTION__ ); … … 926 864 927 865 // get PPN for this new PT2 928 dst_pt2_ppn = (ppn_t)ppm_page2ppn( page_xp);929 930 // build thenew dst_pte1866 dst_pt2_ppn = ppm_base2ppn( XPTR( local_cxy , dst_pt2 ) ); 867 868 // build new dst_pte1 931 869 dst_pte1 = TSAR_PTE_MAPPED | TSAR_PTE_SMALL | dst_pt2_ppn; 932 870 … … 936 874 937 875 // get pointer on src_pt2 938 src_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( src_pte1 );876 src_pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( src_pte1 ); 939 877 src_pt2 = GET_PTR( ppm_ppn2base( src_pt2_ppn ) ); 940 878 941 879 // get pointer on dst_pt2 942 dst_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( dst_pte1 );880 dst_pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( dst_pte1 ); 943 881 dst_pt2 = GET_PTR( ppm_ppn2base( dst_pt2_ppn ) ); 944 882 … … 970 908 cycle = (uint32_t)hal_get_cycles; 971 909 if( DEBUG_HAL_GPT_COPY < cycle ) 972 printk("\n[%s] :thread[%x,%x] exit / copy done for src_vpn %x / dst_vpn %x / cycle %d\n",910 printk("\n[%s] thread[%x,%x] exit / copy done for src_vpn %x / dst_vpn %x / cycle %d\n", 973 911 __FUNCTION__, this->process->pid, this->trdid, src_vpn, dst_vpn, cycle ); 974 912 #endif … … 987 925 cycle = (uint32_t)hal_get_cycles; 988 926 if( DEBUG_HAL_GPT_COPY < cycle ) 989 printk("\n[%s] :thread[%x,%x] exit / nothing done / cycle %d\n",927 printk("\n[%s] thread[%x,%x] exit / nothing done / cycle %d\n", 990 928 __FUNCTION__, this->process->pid, this->trdid, cycle ); 991 929 #endif … … 1005 943 gpt_t * gpt_ptr; 1006 944 1007 vpn_t vpn; 1008 1009 uint32_t ix1; 1010 uint32_t ix2; 945 uint32_t ix1; // current 946 uint32_t ix2; // current 947 948 vpn_t vpn_min; 949 vpn_t vpn_max; // included 950 951 uint32_t ix1_min; 952 uint32_t ix1_max; // included 953 954 uint32_t ix2_min; 955 uint32_t ix2_max; // included 1011 956 1012 957 uint32_t * pt1; … … 1021 966 gpt_ptr = GET_PTR( gpt_xp ); 1022 967 1023 // get local PT1 pointer 968 #if DEBUG_HAL_GPT_SET_COW 969 uint32_t cycle = (uint32_t)hal_get_cycles(); 970 thread_t * this = CURRENT_THREAD; 971 if(DEBUG_HAL_GPT_SET_COW < cycle ) 972 printk("\n[%s] thread[%x,%x] enter / gpt[%x,%x] / vpn_base %x / vpn_size %x / cycle %d\n", 973 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, gpt_ptr, vpn_base, vpn_size, cycle ); 974 #endif 975 976 // get PT1 pointer 1024 977 pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 1025 978 1026 // loop on pages 1027 for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) 979 #if (DEBUG_HAL_GPT_SET_COW & 1) 980 if(DEBUG_HAL_GPT_SET_COW < cycle ) 981 printk("\n[%s] thread[%x,%x] get pt1 = %x\n", 982 __FUNCTION__, this->process->pid, this->trdid, pt1 ); 983 #endif 984 985 vpn_min = vpn_base; 986 vpn_max = vpn_base + vpn_size - 1; 987 988 ix1_min = TSAR_MMU_IX1_FROM_VPN( vpn_base ); 989 ix1_max = TSAR_MMU_IX1_FROM_VPN( vpn_max ); 990 991 for( ix1 = ix1_min ; ix1 <= ix1_max ; ix1++ ) 1028 992 { 1029 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 1030 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 1031 993 994 #if (DEBUG_HAL_GPT_SET_COW & 1) 995 if(DEBUG_HAL_GPT_SET_COW < cycle ) 996 printk("\n[%s] thread[%x,%x] : &pt1[%x] = %x\n", 997 __FUNCTION__, this->process->pid, this->trdid, ix1, &pt1[ix1] ); 998 #endif 1032 999 // get PTE1 value 1033 1000 pte1 = hal_remote_l32( XPTR( gpt_cxy , &pt1[ix1] ) ); 1001 1002 #if (DEBUG_HAL_GPT_SET_COW & 1) 1003 if(DEBUG_HAL_GPT_SET_COW < cycle ) 1004 printk("\n[%s] thread[%x,%x] : pt1[%x] = %x\n", 1005 __FUNCTION__, this->process->pid, this->trdid, ix1, pte1 ); 1006 #endif 1034 1007 1035 1008 // only MAPPED & SMALL PTEs are modified 1036 1009 if( (pte1 & TSAR_PTE_MAPPED) && (pte1 & TSAR_PTE_SMALL) ) 1037 1010 { 1038 // compute PT2 base address1039 pt2_ppn = TSAR_MMU_P TBA_FROM_PTE1( pte1 );1011 // get PT2 pointer 1012 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 1040 1013 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1041 1014 1042 assert( (GET_CXY( ppm_ppn2base( pt2_ppn ) ) == gpt_cxy ), 1043 "PT2 and PT1 must be in the same cluster\n"); 1044 1045 // get current PTE2 attributes 1046 attr = hal_remote_l32( XPTR( gpt_cxy , &pt2[2*ix2] ) ); 1047 1048 // only MAPPED PTEs are modified 1049 if( attr & TSAR_PTE_MAPPED ) 1015 #if (DEBUG_HAL_GPT_SET_COW & 1) 1016 if(DEBUG_HAL_GPT_SET_COW < cycle ) 1017 printk("\n[%s] thread[%x,%x] : get pt2 = %x\n", 1018 __FUNCTION__, this->process->pid, this->trdid, pt2 ); 1019 #endif 1020 ix2_min = (ix1 == ix1_min) ? TSAR_MMU_IX2_FROM_VPN(vpn_min) : 0; 1021 ix2_max = (ix1 == ix1_max) ? TSAR_MMU_IX2_FROM_VPN(vpn_max) : 511; 1022 1023 for( ix2 = ix2_min ; ix2 <= ix2_max ; ix2++ ) 1050 1024 { 1051 attr = (attr | TSAR_PTE_COW) & (~TSAR_PTE_WRITABLE); 1052 hal_remote_s32( XPTR( gpt_cxy , &pt2[2*ix2] ) , attr ); 1053 } 1054 } 1055 } // end loop on pages 1025 1026 #if (DEBUG_HAL_GPT_SET_COW & 1) 1027 if(DEBUG_HAL_GPT_SET_COW < cycle ) 1028 printk("\n[%s] thread[%x,%x] : &pte2[%x] = %x\n", 1029 __FUNCTION__, this->process->pid, this->trdid, 2*ix2, &pt2[2*ix2] ); 1030 #endif 1031 // get current PTE2 attributes 1032 attr = hal_remote_l32( XPTR( gpt_cxy , &pt2[2*ix2] ) ); 1033 1034 #if (DEBUG_HAL_GPT_SET_COW & 1) 1035 if(DEBUG_HAL_GPT_SET_COW < cycle ) 1036 printk("\n[%s] thread[%x,%x] : pte2[%x] (attr) = %x\n", 1037 __FUNCTION__, this->process->pid, this->trdid, 2*ix2, attr ); 1038 #endif 1039 // only MAPPED PTEs are modified 1040 if( attr & TSAR_PTE_MAPPED ) 1041 { 1042 attr = (attr | TSAR_PTE_COW) & (~TSAR_PTE_WRITABLE); 1043 hal_remote_s32( XPTR( gpt_cxy , &pt2[2*ix2] ) , attr ); 1044 } 1045 } // end loop on ix2 1046 } 1047 } // end loop on ix1 1048 1049 #if DEBUG_HAL_GPT_SET_COW 1050 cycle = (uint32_t)hal_get_cycles(); 1051 if(DEBUG_HAL_GPT_SET_COW < cycle ) 1052 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 1053 __FUNCTION__, this->process->pid, this->trdid, cycle ); 1054 #endif 1056 1055 1057 1056 } // end hal_gpt_set_cow() … … 1068 1067 ppn_t pt2_ppn; // PPN of PT2 1069 1068 uint32_t * pt2; // PT2 base address 1070 xptr_t pte2_xp; // exended pointer on PTE2 1069 xptr_t pte2_attr_xp; // exended pointer on pte2.attr 1070 xptr_t pte2_ppn_xp; // exended pointer on pte2.ppn 1071 1071 1072 1072 uint32_t ix1; // index in PT1 1073 1073 uint32_t ix2; // index in PT2 1074 1075 1076 uint32_t tsar_attr; // PTE attributes for TSAR MMU1077 1074 1078 1075 // check MAPPED, SMALL, and not LOCKED in attr argument … … 1092 1089 pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 1093 1090 1094 // compute tsar_attr from generic attributes1095 tsar_attr = gpt2tsar( attr );1096 1097 1091 // get PTE1 value 1098 1092 pte1 = hal_remote_l32( XPTR( gpt_cxy , &pt1[ix1] ) ); 1099 1093 1100 1094 // check MAPPED and SMALL in target PTE1 1101 assert( ((pte1 & GPT_MAPPED) != 0), "attribute MAPPED must be set in target PTE1\n" );1102 assert( ((pte1 & GPT_SMALL ) != 0), "attribute SMALL must be set in target PTE1\n" );1103 1104 // get PT2 base from PTE11105 pt2_ppn = TSAR_MMU_P TBA_FROM_PTE1( pte1 );1095 assert( ((pte1 & TSAR_PTE_MAPPED) != 0), "attribute MAPPED must be set in target PTE1\n" ); 1096 assert( ((pte1 & TSAR_PTE_SMALL ) != 0), "attribute SMALL must be set in target PTE1\n" ); 1097 1098 // get PT2 base 1099 pt2_ppn = TSAR_MMU_PPN2_FROM_PTE1( pte1 ); 1106 1100 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1107 1101 1108 // get extended pointer on PTE2 1109 pte2_xp = XPTR( gpt_cxy , &pt2[2*ix2] ); 1102 // build extended pointers on PT2[ix2].attr and PT2[ix2].ppn 1103 pte2_attr_xp = XPTR( gpt_cxy , &pt2[2 * ix2] ); 1104 pte2_ppn_xp = XPTR( gpt_cxy , &pt2[2 * ix2 + 1] ); 1105 1110 1106 1111 1107 // check MAPPED in target PTE2 1112 assert( ((hal_remote_l32(pte2_ xp) & GPT_MAPPED) != 0),1108 assert( ((hal_remote_l32(pte2_attr_xp) & TSAR_PTE_MAPPED) != 0), 1113 1109 "attribute MAPPED must be set in target PTE2\n" ); 1114 1110 1115 1111 // set PTE2 in this order 1116 hal_remote_s32( pte2_ xp, ppn );1112 hal_remote_s32( pte2_ppn_xp , ppn ); 1117 1113 hal_fence(); 1118 hal_remote_s32( pte2_ xp + 4 , tsar_attr);1114 hal_remote_s32( pte2_attr_xp , gpt2tsar( attr ) ); 1119 1115 hal_fence(); 1120 1116
Note: See TracChangeset
for help on using the changeset viewer.