Changeset 587 for trunk/hal/tsar_mips32/core/hal_gpt.c
- Timestamp:
- Nov 1, 2018, 12:39:27 PM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/tsar_mips32/core/hal_gpt.c
r570 r587 132 132 xptr_t page_xp; 133 133 134 thread_t * this = CURRENT_THREAD; 135 134 136 #if DEBUG_HAL_GPT_CREATE 135 uint32_t cycle = (uint32_t)hal_get_cycles ;137 uint32_t cycle = (uint32_t)hal_get_cycles(); 136 138 if( DEBUG_HAL_GPT_CREATE < cycle ) 137 printk("\n[DBG] %s : thread %xenter / cycle %d\n",138 __FUNCTION__, CURRENT_THREAD, cycle );139 printk("\n[DBG] %s : thread[%x,%x] enter / cycle %d\n", 140 __FUNCTION__, this->process->pid, this->trdid, cycle ); 139 141 #endif 140 142 … … 152 154 if( page == NULL ) 153 155 { 154 printk("\n[ERROR] in %s : cannot allocate memory for PT1\n", __FUNCTION__ ); 156 printk("\n[PANIC] in %s : no memory for PT1 / process %x / cluster %x\n", 157 __FUNCTION__, this->process->pid, local_cxy ); 155 158 return ENOMEM; 156 159 } … … 162 165 163 166 #if DEBUG_HAL_GPT_CREATE 164 cycle = (uint32_t)hal_get_cycles ;167 cycle = (uint32_t)hal_get_cycles(); 165 168 if( DEBUG_HAL_GPT_CREATE < cycle ) 166 printk("\n[DBG] %s : thread %xexit / cycle %d\n",167 __FUNCTION__, CURRENT_THREAD, cycle );169 printk("\n[DBG] %s : thread[%x,%x] exit / cycle %d\n", 170 __FUNCTION__, this->process->pid, this->trdid, cycle ); 168 171 #endif 169 172 … … 188 191 189 192 #if DEBUG_HAL_GPT_DESTROY 190 uint32_t cycle = (uint32_t)hal_get_cycles; 193 uint32_t cycle = (uint32_t)hal_get_cycles(); 194 thread_t * this = CURRENT_THREAD; 191 195 if( DEBUG_HAL_GPT_DESTROY < cycle ) 192 printk("\n[DBG] %s : thread %xenter / cycle %d\n",193 __FUNCTION__, CURRENT_THREAD, cycle );196 printk("\n[DBG] %s : thread[%x,%x] enter / cycle %d\n", 197 __FUNCTION__, this->process->pid, this->trdid, cycle ); 194 198 #endif 195 199 … … 230 234 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 231 235 xptr_t base_xp = ppm_ppn2base( pt2_ppn ); 232 pt2 = (uint32_t *)GET_PTR( base_xp );236 pt2 = GET_PTR( base_xp ); 233 237 234 238 // scan the PT2 to release all entries VALID and USER if reference cluster … … 261 265 262 266 #if DEBUG_HAL_GPT_DESTROY 263 cycle = (uint32_t)hal_get_cycles ;267 cycle = (uint32_t)hal_get_cycles(); 264 268 if( DEBUG_HAL_GPT_DESTROY < cycle ) 265 printk("\n[DBG] %s : thread %xexit / cycle %d\n",266 __FUNCTION__, CURRENT_THREAD, cycle );269 printk("\n[DBG] %s : thread[%x,%x] exit / cycle %d\n", 270 __FUNCTION__, this->process->pid, this->trdid, cycle ); 267 271 #endif 268 272 … … 309 313 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 310 314 xptr_t base_xp = ppm_ppn2base ( pt2_ppn ); 311 pt2 = (uint32_t *)GET_PTR( base_xp );315 pt2 = GET_PTR( base_xp ); 312 316 313 317 // scan the PT2 … … 330 334 331 335 332 /////////////////////////////////////// 333 error_t hal_gpt_set_pte( gpt_t * gpt,336 ////////////////////////////////////////// 337 error_t hal_gpt_set_pte( xptr_t gpt_xp, 334 338 vpn_t vpn, 335 uint32_t attr, // genericGPT attributes339 uint32_t attr, // GPT attributes 336 340 ppn_t ppn ) 337 341 { 338 uint32_t * pt1; // PT1 base addres 339 uint32_t * pte1_ptr; // pointer on PT1 entry 340 uint32_t pte1; // PT1 entry value 342 cxy_t gpt_cxy; // target GPT cluster 343 gpt_t * gpt_ptr; // target GPT local pointer 344 uint32_t * pt1_ptr; // local pointer on PT1 345 xptr_t pte1_xp; // extended pointer on PT1 entry 346 uint32_t pte1; // PT1 entry value if PTE1 341 347 342 348 ppn_t pt2_ppn; // PPN of PT2 343 uint32_t * pt2 ;// PT2 base address349 uint32_t * pt2_ptr; // PT2 base address 344 350 345 351 uint32_t small; // requested PTE is for a small page 346 bool_t success; // exit condition for while loop below347 352 348 353 page_t * page; // pointer on new physical page descriptor … … 354 359 uint32_t tsar_attr; // PTE attributes for TSAR MMU 355 360 356 #if DEBUG_HAL_GPT_ACCESS 357 uint32_t cycle = (uint32_t)hal_get_cycles; 358 if( DEBUG_HAL_GPT_ACCESS < cycle ) 359 printk("\n[DBG] %s : thread %x enter / vpn %x / attr %x / ppn %x / cycle %d\n", 360 __FUNCTION__, CURRENT_THREAD, vpn, attr, ppn, cycle ); 361 thread_t * this = CURRENT_THREAD; 362 363 // get cluster and local pointer on GPT 364 gpt_cxy = GET_CXY( gpt_xp ); 365 gpt_ptr = GET_PTR( gpt_xp ); 366 367 #if DEBUG_HAL_GPT_SET_PTE 368 uint32_t cycle = (uint32_t)hal_get_cycles(); 369 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 370 printk("\n[DBG] %s : thread[%x,%x] enter / vpn %x / attr %x / ppn %x / cluster %x / cycle %d\n", 371 __FUNCTION__, this->process->pid, this->trdid, vpn, attr, ppn, gpt_cxy, cycle ); 361 372 #endif 362 373 … … 365 376 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 366 377 367 pt1 = gpt->ptr;368 small = attr & GPT_SMALL;378 pt1_ptr = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 379 small = attr & GPT_SMALL; 369 380 370 381 // compute tsar attributes from generic attributes 371 382 tsar_attr = gpt2tsar( attr ); 372 383 373 #if (DEBUG_HAL_GPT_ACCESS & 1) 374 if( DEBUG_HAL_GPT_ACCESS < cycle ) 375 printk("\n[DBG] %s : thread %x / vpn %x / &pt1 %x / tsar_attr %x\n", 376 __FUNCTION__, CURRENT_THREAD, vpn, pt1, tsar_attr ); 377 #endif 378 379 // get pointer on PT1[ix1] 380 pte1_ptr = &pt1[ix1]; 381 382 // PTE1 (big page) are only set for the kernel vsegs, in the kernel init phase. 383 // There is no risk of concurrent access. 384 if( small == 0 ) 385 { 386 // get current pte1 value 387 pte1 = *pte1_ptr; 388 384 // build extended pointer on PTE1 = PT1[ix1] 385 pte1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] ); 386 387 // get current pte1 value 388 pte1 = hal_remote_l32( pte1_xp ); 389 390 if( small == 0 ) // map a big page in PT1 391 { 389 392 assert( (pte1 == 0) , 390 393 "try to set a big page in a mapped PT1 entry / PT1[%d] = %x\n", ix1 , pte1 ); 391 394 392 // set the PTE1 393 *pte1_ptr = (tsar_attr & TSAR_MMU_PTE1_ATTR_MASK) | 394 ((ppn >> 9) & TSAR_MMU_PTE1_PPN_MASK);395 // set the PTE1 value in PT1 396 pte1 = (tsar_attr & TSAR_MMU_PTE1_ATTR_MASK) | ((ppn >> 9) & TSAR_MMU_PTE1_PPN_MASK); 397 hal_remote_s32( pte1_xp , pte1 ); 395 398 hal_fence(); 399 400 #if DEBUG_HAL_GPT_SET_PTE 401 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 402 printk("\n[DBG] %s : thread[%x,%x] map PTE1 / cxy %x / ix1 %x / pt1 %x / pte1 %x\n", 403 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, pte1 ); 404 #endif 405 396 406 return 0; 397 407 } 398 399 // From this point, the requested PTE is a PTE2 (small page) 400 401 // loop to access PTE1 and get pointer on PT2 402 success = false; 403 do 404 { 405 // get current pte1 value 406 pte1 = *pte1_ptr; 407 408 #if (DEBUG_HAL_GPT_ACCESS & 1) 409 if( DEBUG_HAL_GPT_ACCESS < cycle ) 410 printk("\n[DBG] %s : thread %x / vpn %x / current_pte1 %x\n", 411 __FUNCTION__, CURRENT_THREAD, vpn, pte1 ); 412 #endif 413 414 // allocate a PT2 if PT1 entry not valid 415 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // PT1 entry not valid 416 { 417 // allocate one physical page for the PT2 418 kmem_req_t req; 419 req.type = KMEM_PAGE; 420 req.size = 0; // 1 small page 421 req.flags = AF_KERNEL | AF_ZERO; 422 page = (page_t *)kmem_alloc( &req ); 408 else // map a small page in PT1 & PT2 409 { 410 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // PT1 entry unmapped => map it 411 { 412 // allocate one physical page for PT2 413 if( gpt_cxy == local_cxy ) 414 { 415 kmem_req_t req; 416 req.type = KMEM_PAGE; 417 req.size = 0; // 1 small page 418 req.flags = AF_KERNEL | AF_ZERO; 419 page = (page_t *)kmem_alloc( &req ); 420 } 421 else 422 { 423 rpc_pmem_get_pages_client( gpt_cxy , 0 , &page ); 424 } 425 423 426 if( page == NULL ) 424 427 { 425 printk("\n[ERROR] in %s : cannot allocate PT2\n", __FUNCTION__ ); 428 printk("\n[PANIC] in %s : no memory for GPT PT2 / process %x / cluster %x\n", 429 __FUNCTION__, this->process->pid, gpt_cxy ); 426 430 return ENOMEM; 427 431 } 428 432 429 433 // get the PT2 PPN 430 page_xp = XPTR( local_cxy , page );434 page_xp = XPTR( gpt_cxy , page ); 431 435 pt2_ppn = ppm_page2ppn( page_xp ); 432 436 433 // try to atomicaly set the PT1 entry437 // build PTD1 value 434 438 pte1 = TSAR_MMU_MAPPED | TSAR_MMU_SMALL | pt2_ppn; 435 success = hal_atomic_cas( pte1_ptr , 0 , pte1 ); 436 437 // release allocated PT2 if PT1 entry modified by another thread 438 if( success == false ) ppm_free_pages( page ); 439 440 // set the PTD1 value in PT1 441 hal_remote_s32( pte1_xp , pte1 ); 442 443 #if DEBUG_HAL_GPT_SET_PTE 444 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 445 printk("\n[DBG] %s : thread[%x,%x] map PTD1 / cxy %x / ix1 %d / pt1 %x / ptd1 %x\n", 446 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, pte1 ); 447 #endif 439 448 } 440 else // PT1 entry is valid449 else // pt1 entry mapped => use it 441 450 { 442 // This valid entry must be a PTD1 443 assert( (pte1 & TSAR_MMU_SMALL) , 444 "try to set a small page in a big PT1 entry / PT1[%d] = %x\n", ix1 , pte1 ); 445 446 success = true; 451 452 #if DEBUG_HAL_GPT_SET_PTE 453 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 454 printk("\n[DBG] %s : thread[%x,%x] get PTD1 / cxy %x / ix1 %d / pt1 %x / ptd1 %x\n", 455 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, pte1 ); 456 #endif 457 447 458 } 448 459 449 460 // get PT2 base from pte1 450 461 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 451 pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 452 453 #if (DEBUG_HAL_GPT_ACCESS & 1) 454 if( DEBUG_HAL_GPT_ACCESS < cycle ) 455 printk("\n[DBG] %s : thread %x / vpn %x / pte1 %x / &pt2 %x\n", 456 __FUNCTION__, CURRENT_THREAD, vpn, pte1, pt2 ); 457 #endif 458 462 pt2_ptr = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 463 464 // set PTE2 in PT2 (in this order) 465 hal_remote_s32( XPTR( gpt_cxy , &pt2_ptr[2 * ix2 + 1] ) , ppn ); 466 hal_fence(); 467 hal_remote_s32( XPTR( gpt_cxy , &pt2_ptr[2 * ix2] ) , tsar_attr ); 468 hal_fence(); 469 470 #if DEBUG_HAL_GPT_SET_PTE 471 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 472 printk("\n[DBG] %s : thread[%x,%x] map PTE2 / cxy %x / ix2 %x / pt2 %x / attr %x / ppn %x\n", 473 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix2, pt2_ptr, tsar_attr, ppn ); 474 #endif 475 476 return 0; 459 477 } 460 while (success == false);461 462 // set PTE2 in this order463 pt2[2 * ix2 + 1] = ppn;464 hal_fence();465 pt2[2 * ix2] = tsar_attr;466 hal_fence();467 468 #if DEBUG_HAL_GPT_ACCESS469 cycle = (uint32_t)hal_get_cycles;470 if( DEBUG_HAL_GPT_ACCESS < cycle )471 printk("\n[DBG] %s : thread %x exit / vpn %x / pte2_attr %x / pte2_ppn %x / cycle %d\n",472 __FUNCTION__, CURRENT_THREAD, vpn, pt2[2 * ix2], pt2[2 * ix2 + 1], cycle );473 #endif474 475 return 0;476 477 478 } // end of hal_gpt_set_pte() 478 479 479 480 ///////////////////////////////////// 481 void hal_gpt_get_pte( gpt_t * gpt, 480 //////////////////////////////////////// 481 void hal_gpt_get_pte( xptr_t gpt_xp, 482 482 vpn_t vpn, 483 483 uint32_t * attr, … … 490 490 ppn_t pt2_ppn; 491 491 492 // get cluster and local pointer on GPT 493 cxy_t gpt_cxy = GET_CXY( gpt_xp ); 494 gpt_t * gpt_ptr = GET_PTR( gpt_xp ); 495 496 // compute indexes in PT1 and PT2 492 497 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 493 498 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 494 499 495 // get PTE1 value 496 pt1 = gpt->ptr; 497 pte1 = pt1[ix1]; 498 500 // get PT1 base 501 pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 502 503 // get pte1 504 pte1 = hal_remote_l32( XPTR( gpt_cxy , &pt1[ix1] ) ); 505 506 // check PTE1 mapped 499 507 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // PT1 entry not present 500 508 { 501 509 *attr = 0; 502 510 *ppn = 0; 511 return; 503 512 } 504 513 514 // access GPT 505 515 if( (pte1 & TSAR_MMU_SMALL) == 0 ) // it's a PTE1 506 516 { 517 // get PPN & ATTR from PT1 507 518 *attr = tsar2gpt( TSAR_MMU_ATTR_FROM_PTE1( pte1 ) ); 508 519 *ppn = TSAR_MMU_PPN_FROM_PTE1( pte1 ) | (vpn & ((1<<TSAR_MMU_IX2_WIDTH)-1)); 509 520 } 510 else // it's a PTD1521 else // it's a PTD1 511 522 { 512 523 // compute PT2 base address 513 524 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 514 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 515 516 *ppn = pt2[2*ix2+1] & ((1<<TSAR_MMU_PPN_WIDTH)-1); 517 *attr = tsar2gpt( pt2[2*ix2] ); 525 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 526 527 // get PPN & ATTR from PT2 528 *ppn = hal_remote_l32( XPTR( gpt_cxy , &pt2[2*ix2+1] ) ) & ((1<<TSAR_MMU_PPN_WIDTH)-1); 529 *attr = tsar2gpt( hal_remote_l32( XPTR( gpt_cxy , &pt2[2*ix2] ) ) ); 518 530 } 519 531 } // end hal_gpt_get_pte() … … 528 540 ppn_t pt2_ppn; // PPN of PT2 529 541 uint32_t * pt2; // PT2 base address 530 531 ppn_t ppn; // PPN of page to be released532 542 533 543 // get ix1 & ix2 indexes … … 546 556 if( (pte1 & TSAR_MMU_SMALL) == 0 ) // it's a PTE1 547 557 { 548 // get PPN549 ppn = TSAR_MMU_PPN_FROM_PTE1( pte1 );550 551 558 // unmap the big page 552 559 pt1[ix1] = 0; … … 559 566 // compute PT2 base address 560 567 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 561 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) );568 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 562 569 563 // get PPN564 ppn = TSAR_MMU_PPN_FROM_PTE2( pt2[2*ix2+1] );565 566 570 // unmap the small page 567 pt2[2*ix2] = 0; // only attr is reset571 pt2[2*ix2] = 0; 568 572 hal_fence(); 569 573 … … 623 627 page_xp = XPTR( local_cxy , page ); 624 628 pt2_ppn = ppm_page2ppn( page_xp ); 625 pt2 = (uint32_t *)GET_PTR( ppm_page2base( page_xp ) );629 pt2 = GET_PTR( ppm_page2base( page_xp ) ); 626 630 627 631 // try to set the PT1 entry … … 643 647 // get the PT2 base address 644 648 pt2_ppn = TSAR_MMU_PPN_FROM_PTE1( pte1 ); 645 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) );649 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 646 650 } 647 651 } … … 660 664 661 665 // compute pointer on PT2 base 662 pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) );666 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 663 667 } 664 668 … … 720 724 // get pointer on PT2 base 721 725 pt2_ppn = TSAR_MMU_PPN_FROM_PTE1( pte1 ); 722 pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) );726 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 723 727 724 728 // get pointer on PTE2 … … 762 766 uint32_t * dst_pt2; // local pointer on DST PT2 763 767 764 kmem_req_t req; // for dynamicPT2 allocation768 kmem_req_t req; // for PT2 allocation 765 769 766 770 uint32_t src_pte1; … … 776 780 ppn_t dst_pt2_ppn; 777 781 778 #if DEBUG_HAL_GPT_ACCESS779 uint32_t cycle = (uint32_t)hal_get_cycles;780 if( DEBUG_HAL_GPT_ACCESS < cycle )781 printk("\n[DBG] %s : thread %x enter / vpn %x / cycle %d\n",782 __FUNCTION__, CURRENT_THREAD, vpn, cycle );783 #endif784 785 782 // get remote src_gpt cluster and local pointer 786 783 src_cxy = GET_CXY( src_gpt_xp ); 787 src_gpt = (gpt_t *)GET_PTR( src_gpt_xp ); 784 src_gpt = GET_PTR( src_gpt_xp ); 785 786 #if DEBUG_HAL_GPT_COPY 787 uint32_t cycle = (uint32_t)hal_get_cycles(); 788 thread_t * this = CURRENT_THREAD; 789 if( DEBUG_HAL_GPT_COPY < cycle ) 790 printk("\n[DBG] %s : thread[%x,%x] enter / vpn %x / src_cxy %x / dst_cxy %x / cycle %d\n", 791 __FUNCTION__, this->process->pid, this->trdid, vpn, src_cxy, local_cxy, cycle ); 792 #endif 793 794 // get remote src_gpt cluster and local pointer 795 src_cxy = GET_CXY( src_gpt_xp ); 796 src_gpt = GET_PTR( src_gpt_xp ); 788 797 789 798 // get remote src_pt1 and local dst_pt1 … … 837 846 // get pointer on src_pt2 838 847 src_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( src_pte1 ); 839 src_pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( src_pt2_ppn ) );848 src_pt2 = GET_PTR( ppm_ppn2base( src_pt2_ppn ) ); 840 849 841 850 // get pointer on dst_pt2 842 851 dst_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( dst_pte1 ); 843 dst_pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( dst_pt2_ppn ) );852 dst_pt2 = GET_PTR( ppm_ppn2base( dst_pt2_ppn ) ); 844 853 845 854 // get attr and ppn from SRC_PT2 … … 867 876 *ppn = src_pte2_ppn; 868 877 869 #if DEBUG_HAL_GPT_ ACCESS878 #if DEBUG_HAL_GPT_COPY 870 879 cycle = (uint32_t)hal_get_cycles; 871 if( DEBUG_HAL_GPT_ ACCESS< cycle )872 printk("\n[DBG] %s : thread %xexit / copy done for vpn %x / cycle %d\n",873 __FUNCTION__, CURRENT_THREAD, vpn, cycle );880 if( DEBUG_HAL_GPT_COPY < cycle ) 881 printk("\n[DBG] %s : thread[%x,%x] exit / copy done for vpn %x / cycle %d\n", 882 __FUNCTION__, this->process->pid, this->trdid, vpn, cycle ); 874 883 #endif 875 884 … … 884 893 *ppn = 0; 885 894 886 #if DEBUG_HAL_GPT_ ACCESS895 #if DEBUG_HAL_GPT_COPY 887 896 cycle = (uint32_t)hal_get_cycles; 888 if( DEBUG_HAL_GPT_ ACCESS< cycle )889 printk("\n[DBG] %s : thread %xexit / nothing done for vpn %x / cycle %d\n",890 __FUNCTION__, CURRENT_THREAD, vpn, cycle );897 if( DEBUG_HAL_GPT_COPY < cycle ) 898 printk("\n[DBG] %s : thread[%x,%x] exit / nothing done for vpn %x / cycle %d\n", 899 __FUNCTION__, this->process->pid, this->trdid, vpn, cycle ); 891 900 #endif 892 901 … … 921 930 // compute PT2 base address 922 931 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 923 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) );932 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 924 933 925 934 // get pte2_attr … … 955 964 // compute PT2 base address 956 965 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 957 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) );966 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 958 967 959 968 // get pte2_attr … … 989 998 // get GPT cluster and local pointer 990 999 gpt_cxy = GET_CXY( gpt_xp ); 991 gpt_ptr = (gpt_t *)GET_PTR( gpt_xp );1000 gpt_ptr = GET_PTR( gpt_xp ); 992 1001 993 1002 // get local PT1 pointer … … 1008 1017 // compute PT2 base address 1009 1018 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 1010 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) );1019 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1011 1020 1012 1021 assert( (GET_CXY( ppm_ppn2base( pt2_ppn ) ) == gpt_cxy ), … … 1050 1059 // get cluster and local pointer on remote GPT 1051 1060 cxy_t gpt_cxy = GET_CXY( gpt_xp ); 1052 gpt_t * gpt_ptr = (gpt_t *)GET_PTR( gpt_xp );1061 gpt_t * gpt_ptr = GET_PTR( gpt_xp ); 1053 1062 1054 1063 // compute indexes in PT1 and PT2 … … 1070 1079 // get PT2 base from PTE1 1071 1080 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 1072 pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1073 1074 // reset PTE2 1075 hal_remote_s32( XPTR( gpt_cxy, &pt2[2 * ix2] ) , 0 ); 1076 hal_fence(); 1081 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1077 1082 1078 1083 // set PTE2 in this order
Note: See TracChangeset
for help on using the changeset viewer.