Changeset 391 for trunk/hal/tsar_mips32/core/hal_gpt.c
- Timestamp:
- Aug 16, 2017, 3:27:05 PM (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/tsar_mips32/core/hal_gpt.c
r315 r391 36 36 //////////////////////////////////////////////////////////////////////////////////////// 37 37 // This define the masks for the TSAR MMU PTE attributes. (from TSAR MMU specification) 38 // the GPT masks are derived from the TSAR MMU PTE attributes39 // in the TSAR specific hal_gpt_create() function.40 38 //////////////////////////////////////////////////////////////////////////////////////// 41 39 … … 76 74 #define TSAR_MMU_ATTR_FROM_PTE2( pte2 ) (pte2 & 0xFFC000FF) 77 75 78 /****************************************************************************************79 * These global variables defines the masks for the Generic Page Table Entry attributes,80 * and must be defined in all GPT implementation.81 ***************************************************************************************/82 83 uint32_t GPT_MAPPED;84 uint32_t GPT_SMALL;85 uint32_t GPT_READABLE;86 uint32_t GPT_WRITABLE;87 uint32_t GPT_EXECUTABLE;88 uint32_t GPT_CACHABLE;89 uint32_t GPT_USER;90 uint32_t GPT_DIRTY;91 uint32_t GPT_ACCESSED;92 uint32_t GPT_GLOBAL;93 uint32_t GPT_COW;94 uint32_t GPT_SWAP;95 uint32_t GPT_LOCKED;96 97 76 ///////////////////////////////////// 98 77 error_t hal_gpt_create( gpt_t * gpt ) … … 128 107 gpt->page = GET_PTR( page_xp ); 129 108 109 /* 130 110 // initialize PTE entries attributes masks 131 111 GPT_MAPPED = TSAR_MMU_PRESENT; … … 142 122 GPT_SWAP = TSAR_MMU_SWAP; 143 123 GPT_LOCKED = TSAR_MMU_LOCKED; 144 124 */ 145 125 return 0; 146 126 } // end hal_gpt_create() … … 174 154 { 175 155 pte1 = pt1[ix1]; 176 if( (pte1 & GPT_MAPPED) != 0 ) // PTE1 valid156 if( (pte1 & TSAR_MMU_PRESENT) != 0 ) // PTE1 valid 177 157 { 178 if( (pte1 & GPT_SMALL) == 0 ) // BIG page158 if( (pte1 & TSAR_MMU_PTD1) == 0 ) // BIG page 179 159 { 180 if( (pte1 & GPT_USER) != 0 )160 if( (pte1 & TSAR_MMU_USER) != 0 ) 181 161 { 182 162 // warning message 183 163 printk("\n[WARNING] in %s : found an USER BIG page / ix1 = %d\n", 184 164 __FUNCTION__ , ix1 ); 185 165 186 166 // release the big physical page if reference cluster … … 192 172 } 193 173 } 194 else // SMALL page174 else // SMALL page 195 175 { 196 176 // get local pointer on PT2 … … 205 185 { 206 186 attr = TSAR_MMU_ATTR_FROM_PTE2( pt2[2 * ix2] ); 207 if( ((attr & GPT_MAPPED) != 0 ) && ((attr & GPT_USER) != 0) )187 if( ((attr & TSAR_MMU_PRESENT) != 0 ) && ((attr & TSAR_MMU_USER) != 0) ) 208 188 { 209 189 // release the physical page … … 250 230 { 251 231 pte1 = pt1[ix1]; 252 if( (pte1 & GPT_MAPPED) != 0 )232 if( (pte1 & TSAR_MMU_PRESENT) != 0 ) 253 233 { 254 if( (pte1 & GPT_SMALL) == 0 ) // BIG page234 if( (pte1 & TSAR_MMU_PTD1) == 0 ) // BIG page 255 235 { 256 236 printk(" - BIG : pt1[%d] = %x\n", ix1 , pte1 ); … … 267 247 pte2_attr = TSAR_MMU_ATTR_FROM_PTE2( pt2[2 * ix2] ); 268 248 pte2_ppn = TSAR_MMU_PPN_FROM_PTE2( pt2[2 * ix2 + 1] ); 269 if( (pte2_attr & GPT_MAPPED) != 0 )249 if( (pte2_attr & TSAR_MMU_PRESENT) != 0 ) 270 250 { 271 251 printk(" - SMALL : pt1[%d] = %x / pt2[%d] / pt2[%d]\n", … … 306 286 307 287 pt1 = gpt->ptr; 308 small = (attr & GPT_SMALL);288 small = (attr & TSAR_MMU_PTD1); 309 289 310 290 // get PT1 entry value … … 331 311 // From this point, the requested PTE is a PTE2 (small page) 332 312 333 if( (pte1 & GPT_MAPPED) == 0 ) // the PT1 entry is not valid313 if( (pte1 & TSAR_MMU_PRESENT) == 0 ) // the PT1 entry is not valid 334 314 { 335 315 // allocate one physical page for the PT2 … … 376 356 { 377 357 // This valid entry must be a PTD1 378 if( (pte1 & GPT_SMALL) == 0 )358 if( (pte1 & TSAR_MMU_PTD1) == 0 ) 379 359 { 380 360 printk("\n[ERROR] in %s : set a small page in a big PT1 entry / PT1[%d] = %x\n", … … 418 398 pte1 = pt1[ix1]; 419 399 420 if( (pte1 & GPT_MAPPED) == 0 ) // PT1 entry not present400 if( (pte1 & TSAR_MMU_PRESENT) == 0 ) // PT1 entry not present 421 401 { 422 402 *attr = 0; … … 424 404 } 425 405 426 if( (pte1 & GPT_SMALL) == 0 ) // it's a PTE1406 if( (pte1 & TSAR_MMU_PTD1) == 0 ) // it's a PTE1 427 407 { 428 408 *attr = TSAR_MMU_ATTR_FROM_PTE1( pte1 ); … … 454 434 kmem_req_t req; 455 435 436 // get ix1 & ix2 indexes 456 437 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 457 438 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 439 440 // get pointer on calling process 441 process_t * process = CURRENT_THREAD->process; 442 443 // compute is_ref 444 bool_t is_ref = ( GET_CXY( process->ref_xp ) == local_cxy ); 458 445 459 446 // get PTE1 value … … 461 448 pte1 = pt1[ix1]; 462 449 463 if( (pte1 & GPT_MAPPED) == 0 ) // PT1 entry not present450 if( (pte1 & TSAR_MMU_PRESENT) == 0 ) // PT1 entry not present 464 451 { 465 452 return; 466 453 } 467 454 468 if( (pte1 & GPT_SMALL) == 0 )// it's a PTE1455 if( (pte1 & TSAR_MMU_PTD1) == 0 ) // it's a PTE1 469 456 { 470 457 // get PPN … … 475 462 hal_fence(); 476 463 477 // releases the big page478 req.type = KMEM_PAGE;479 req.size = 9;480 req.ptr = (void*)(ppn << CONFIG_PPM_PAGE_SHIFT);481 kmem_free( &req );464 // releases the physical page if local 465 // req.type = KMEM_PAGE; 466 // req.size = 9; 467 // req.ptr = (void*)(ppn << CONFIG_PPM_PAGE_SHIFT); 468 // kmem_free( &req ); 482 469 483 470 return; 484 471 } 485 else // it's a PTD1472 else // it's a PTD1 486 473 { 487 474 // compute PT2 base address … … 493 480 494 481 // unmap the small page 495 pt2[2*ix2] = 0; 496 hal_fence(); 497 pt2[2*ix2+1] = 0; 498 hal_fence(); 499 500 // releases the small page 501 req.type = KMEM_PAGE; 502 req.size = 0; 503 req.ptr = (void*)(ppn << CONFIG_PPM_PAGE_SHIFT); 504 kmem_free( &req ); 482 pt2[2*ix2] = 0; // only attr is reset 483 hal_fence(); 484 485 // releases the small page 486 // req.type = KMEM_PAGE; 487 // req.size = 0; 488 // req.ptr = (void*)(ppn << CONFIG_PPM_PAGE_SHIFT); 489 // kmem_free( &req ); 505 490 506 491 return; … … 534 519 535 520 // If present, the page must be small 536 if( ((pte1 & GPT_MAPPED) != 0) && ((pte1 & GPT_SMALL) == 0) )521 if( ((pte1 & TSAR_MMU_PRESENT) != 0) && ((pte1 & TSAR_MMU_PTD1) == 0) ) 537 522 { 538 523 printk("\n[ERROR] in %s : try to lock a big page / PT1[%d] = %x\n", … … 541 526 } 542 527 543 if( (pte1 & GPT_MAPPED) == 0 ) // missing PT1 entry528 if( (pte1 & TSAR_MMU_PRESENT) == 0 ) // missing PT1 entry 544 529 { 545 530 // allocate one physical page for PT2 … … 585 570 { 586 571 // This valid entry must be a PTD1 587 if( (pte1 & GPT_SMALL) == 0 )572 if( (pte1 & TSAR_MMU_PTD1) == 0 ) 588 573 { 589 574 printk("\n[ERROR] in %s : set a small page in a big PT1 entry / PT1[%d] = %x\n", … … 647 632 648 633 // check PTE1 present and small page 649 if( ((pte1 & GPT_MAPPED) == 0) || ((pte1 & GPT_SMALL) == 0) )634 if( ((pte1 & TSAR_MMU_PRESENT) == 0) || ((pte1 & TSAR_MMU_PTD1) == 0) ) 650 635 { 651 636 printk("\n[ERROR] in %s : try to unlock a big or undefined page / PT1[%d] = %x\n", … … 665 650 666 651 // check PTE2 present and locked 667 if( ((attr & GPT_MAPPED) == 0) || ((attr & GPT_LOCKED) == 0) );652 if( ((attr & TSAR_MMU_PRESENT) == 0) || ((attr & GPT_LOCKED) == 0) ); 668 653 { 669 654 printk("\n[ERROR] in %s : try to unlock an undefined page / PT1[%d] = %x\n", … … 710 695 { 711 696 pte1 = src_pt1[ix1]; 712 if( (pte1 & GPT_MAPPED) != 0 )697 if( (pte1 & TSAR_MMU_PRESENT) != 0 ) 713 698 { 714 if( (pte1 & GPT_SMALL) == 0 ) // PTE1 => big kernel page699 if( (pte1 & TSAR_MMU_PTD1) == 0 ) // PTE1 => big kernel page 715 700 { 716 701 // big kernel pages are shared by all processes => copy it … … 754 739 pte2_attr = TSAR_MMU_ATTR_FROM_PTE2( src_pt2[2 * ix2] ); 755 740 756 if( (pte2_attr & GPT_MAPPED) != 0 ) // valid PTE2 in SRC_GPT741 if( (pte2_attr & TSAR_MMU_PRESENT) != 0 ) // valid PTE2 in SRC_GPT 757 742 { 758 743 // get GPT_WRITABLE & PPN
Note: See TracChangeset
for help on using the changeset viewer.