Changeset 401 for trunk/hal/tsar_mips32/core
- Timestamp:
- Aug 17, 2017, 3:02:18 PM (7 years ago)
- Location:
- trunk/hal/tsar_mips32/core
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/tsar_mips32/core/hal_exception.c
r391 r401 132 132 static error_t hal_mmu_exception( thread_t * this ) 133 133 { 134 vseg_t * vseg; // vseg containing the bad_vaddr135 134 process_t * process; // local process descriptor 136 135 error_t error; // return value … … 176 175 "thread %x is a kernel thread / vaddr = %x\n", this->trdid , bad_vaddr ); 177 176 178 // vaddr must be contained in a registered vseg179 error = vmm_get_vseg( process , bad_vaddr , &vseg );180 181 vmm_dmsg("\n[INFO] %s : found vseg for thread %x / vseg_min = %x / vseg_max = %x\n",182 __FUNCTION__ , this->trdid , vseg->min , vseg->max );183 184 177 // analyse exception code 185 178 if( excp_code & MMU_EXCP_PAGE_UNMAPPED ) … … 190 183 // try to map the unmapped PTE 191 184 error = vmm_handle_page_fault( process, 192 vseg,193 185 bad_vaddr >> CONFIG_PPM_PAGE_SHIFT ); // vpn 194 186 // disable IRQs -
trunk/hal/tsar_mips32/core/hal_gpt.c
r391 r401 35 35 36 36 //////////////////////////////////////////////////////////////////////////////////////// 37 // This define the masks for the TSAR MMU PTE attributes .(from TSAR MMU specification)37 // This define the masks for the TSAR MMU PTE attributes (from TSAR MMU specification) 38 38 //////////////////////////////////////////////////////////////////////////////////////// 39 39 40 #define TSAR_MMU_ PRESENT0x8000000041 #define TSAR_MMU_ PTD10x4000000040 #define TSAR_MMU_MAPPED 0x80000000 41 #define TSAR_MMU_SMALL 0x40000000 42 42 #define TSAR_MMU_LOCAL 0x20000000 43 43 #define TSAR_MMU_REMOTE 0x10000000 … … 49 49 #define TSAR_MMU_DIRTY 0x00400000 50 50 51 #define TSAR_MMU_COW 0x00000001 52 #define TSAR_MMU_SWAP 0x00000004 53 #define TSAR_MMU_LOCKED 0x00000008 51 #define TSAR_MMU_COW 0x00000001 // only for small pages 52 #define TSAR_MMU_SWAP 0x00000004 // only for small pages 53 #define TSAR_MMU_LOCKED 0x00000008 // only for small pages 54 54 55 55 //////////////////////////////////////////////////////////////////////////////////////// … … 64 64 #define TSAR_MMU_PPN_WIDTH 28 65 65 66 #define TSAR_MMU_PTE1_ATTR_MASK 0xFFC00000 67 #define TSAR_MMU_PTE1_PPN_MASK 0x0007FFFF 68 66 69 #define TSAR_MMU_IX1_FROM_VPN( vpn ) ((vpn >> 9) & 0x7FF) 67 70 #define TSAR_MMU_IX2_FROM_VPN( vpn ) (vpn & 0x1FF) … … 73 76 #define TSAR_MMU_PPN_FROM_PTE2( pte2 ) (pte2 & 0x0FFFFFFF) 74 77 #define TSAR_MMU_ATTR_FROM_PTE2( pte2 ) (pte2 & 0xFFC000FF) 78 79 80 /////////////////////////////////////////////////////////////////////////////////////// 81 // This static function translates the GPT attributes to the TSAR attributes 82 /////////////////////////////////////////////////////////////////////////////////////// 83 static inline uint32_t gpt2tsar( uint32_t gpt_attr ) 84 { 85 uint32_t tsar_attr = 0; 86 87 if( gpt_attr & GPT_MAPPED ) tsar_attr |= TSAR_MMU_MAPPED; 88 if( gpt_attr & GPT_SMALL ) tsar_attr |= TSAR_MMU_SMALL; 89 if( gpt_attr & GPT_WRITABLE ) tsar_attr |= TSAR_MMU_WRITABLE; 90 if( gpt_attr & GPT_EXECUTABLE ) tsar_attr |= TSAR_MMU_EXECUTABLE; 91 if( gpt_attr & GPT_CACHABLE ) tsar_attr |= TSAR_MMU_CACHABLE; 92 if( gpt_attr & GPT_USER ) tsar_attr |= TSAR_MMU_USER; 93 if( gpt_attr & GPT_DIRTY ) tsar_attr |= TSAR_MMU_DIRTY; 94 if( gpt_attr & GPT_ACCESSED ) tsar_attr |= TSAR_MMU_LOCAL; 95 if( gpt_attr & GPT_GLOBAL ) tsar_attr |= TSAR_MMU_GLOBAL; 96 if( gpt_attr & GPT_COW ) tsar_attr |= TSAR_MMU_COW; 97 if( gpt_attr & GPT_SWAP ) tsar_attr |= TSAR_MMU_SWAP; 98 if( gpt_attr & GPT_LOCKED ) tsar_attr |= TSAR_MMU_LOCKED; 99 100 return tsar_attr; 101 } 102 103 /////////////////////////////////////////////////////////////////////////////////////// 104 // This static function translates the TSAR attributes to the GPT attributes 105 /////////////////////////////////////////////////////////////////////////////////////// 106 static inline uint32_t tsar2gpt( uint32_t tsar_attr ) 107 { 108 uint32_t gpt_attr = 0; 109 110 if( tsar_attr & TSAR_MMU_MAPPED ) gpt_attr |= GPT_MAPPED; 111 if( tsar_attr & TSAR_MMU_MAPPED ) gpt_attr |= GPT_READABLE; 112 if( tsar_attr & TSAR_MMU_SMALL ) gpt_attr |= GPT_SMALL; 113 if( tsar_attr & TSAR_MMU_WRITABLE ) gpt_attr |= GPT_WRITABLE; 114 if( tsar_attr & TSAR_MMU_EXECUTABLE ) gpt_attr |= GPT_EXECUTABLE; 115 if( tsar_attr & TSAR_MMU_CACHABLE ) gpt_attr |= GPT_CACHABLE; 116 if( tsar_attr & TSAR_MMU_USER ) gpt_attr |= GPT_USER; 117 if( tsar_attr & TSAR_MMU_DIRTY ) gpt_attr |= GPT_DIRTY; 118 if( tsar_attr & TSAR_MMU_LOCAL ) gpt_attr |= GPT_ACCESSED; 119 if( tsar_attr & TSAR_MMU_REMOTE ) gpt_attr |= GPT_ACCESSED; 120 if( tsar_attr & TSAR_MMU_GLOBAL ) gpt_attr |= GPT_GLOBAL; 121 if( tsar_attr & TSAR_MMU_COW ) gpt_attr |= GPT_COW; 122 if( tsar_attr & TSAR_MMU_SWAP ) gpt_attr |= GPT_SWAP; 123 if( tsar_attr & TSAR_MMU_LOCKED ) gpt_attr |= GPT_LOCKED; 124 125 return gpt_attr; 126 } 75 127 76 128 ///////////////////////////////////// … … 107 159 gpt->page = GET_PTR( page_xp ); 108 160 109 /*110 // initialize PTE entries attributes masks111 GPT_MAPPED = TSAR_MMU_PRESENT;112 GPT_SMALL = TSAR_MMU_PTD1;113 GPT_READABLE = TSAR_MMU_PRESENT;114 GPT_WRITABLE = TSAR_MMU_WRITABLE;115 GPT_EXECUTABLE = TSAR_MMU_EXECUTABLE;116 GPT_CACHABLE = TSAR_MMU_CACHABLE;117 GPT_USER = TSAR_MMU_USER;118 GPT_DIRTY = TSAR_MMU_DIRTY;119 GPT_ACCESSED = TSAR_MMU_LOCAL | TSAR_MMU_REMOTE;120 GPT_GLOBAL = TSAR_MMU_GLOBAL;121 GPT_COW = TSAR_MMU_COW;122 GPT_SWAP = TSAR_MMU_SWAP;123 GPT_LOCKED = TSAR_MMU_LOCKED;124 */125 161 return 0; 126 162 } // end hal_gpt_create() … … 154 190 { 155 191 pte1 = pt1[ix1]; 156 if( (pte1 & TSAR_MMU_ PRESENT) != 0 ) // PTE1 valid192 if( (pte1 & TSAR_MMU_MAPPED) != 0 ) // PTE1 valid 157 193 { 158 if( (pte1 & TSAR_MMU_ PTD1) == 0 ) // BIG page194 if( (pte1 & TSAR_MMU_SMALL) == 0 ) // BIG page 159 195 { 160 196 if( (pte1 & TSAR_MMU_USER) != 0 ) … … 185 221 { 186 222 attr = TSAR_MMU_ATTR_FROM_PTE2( pt2[2 * ix2] ); 187 if( ((attr & TSAR_MMU_ PRESENT) != 0 ) && ((attr & TSAR_MMU_USER) != 0) )223 if( ((attr & TSAR_MMU_MAPPED) != 0 ) && ((attr & TSAR_MMU_USER) != 0) ) 188 224 { 189 225 // release the physical page … … 230 266 { 231 267 pte1 = pt1[ix1]; 232 if( (pte1 & TSAR_MMU_ PRESENT) != 0 )268 if( (pte1 & TSAR_MMU_MAPPED) != 0 ) 233 269 { 234 if( (pte1 & TSAR_MMU_ PTD1) == 0 ) // BIG page270 if( (pte1 & TSAR_MMU_SMALL) == 0 ) // BIG page 235 271 { 236 272 printk(" - BIG : pt1[%d] = %x\n", ix1 , pte1 ); … … 247 283 pte2_attr = TSAR_MMU_ATTR_FROM_PTE2( pt2[2 * ix2] ); 248 284 pte2_ppn = TSAR_MMU_PPN_FROM_PTE2( pt2[2 * ix2 + 1] ); 249 if( (pte2_attr & TSAR_MMU_ PRESENT) != 0 )285 if( (pte2_attr & TSAR_MMU_MAPPED) != 0 ) 250 286 { 251 287 printk(" - SMALL : pt1[%d] = %x / pt2[%d] / pt2[%d]\n", … … 263 299 vpn_t vpn, 264 300 ppn_t ppn, 265 uint32_t attr ) 266 { 267 uint32_t * pt1; // virtual base addres of PT1268 volatile uint32_t * pte1_ptr; // pointer on PT1 entry269 uint32_t pte1; // PT1 entry value270 271 ppn_t pt2_ppn; // PPN of PT2272 uint32_t * pt2; // virtual base address of PT2273 274 uint32_t small; // requested PTE is for a small page301 uint32_t attr ) // generic GPT attributes 302 { 303 uint32_t * pt1; // virtual base addres of PT1 304 volatile uint32_t * pte1_ptr; // pointer on PT1 entry 305 uint32_t pte1; // PT1 entry value 306 307 ppn_t pt2_ppn; // PPN of PT2 308 uint32_t * pt2; // virtual base address of PT2 309 310 uint32_t small; // requested PTE is for a small page 275 311 bool_t atomic; 276 312 277 page_t * page; // pointer on new physical page descriptor 278 xptr_t page_xp; // extended pointer on new page descriptor 279 280 uint32_t ix1; // index in PT1 281 uint32_t ix2; // index in PT2 313 page_t * page; // pointer on new physical page descriptor 314 xptr_t page_xp; // extended pointer on new page descriptor 315 316 uint32_t ix1; // index in PT1 317 uint32_t ix2; // index in PT2 318 319 uint32_t tsar_attr; // PTE attributes for TSAR MMU 282 320 283 321 // compute indexes in PT1 and PT2 … … 286 324 287 325 pt1 = gpt->ptr; 288 small = (attr & TSAR_MMU_PTD1); 326 small = attr & GPT_SMALL; 327 328 // compute tsar_attr from generic attributes 329 tsar_attr = gpt2tsar( attr ); 289 330 290 331 // get PT1 entry value … … 294 335 // Big pages (PTE1) are only set for the kernel vsegs, in the kernel init phase. 295 336 // There is no risk of concurrent access. 296 if( small == 0 ) 297 298 if( (pte1 != 0) || (attr & GPT_COW))337 if( small == 0 ) 338 { 339 if( pte1 != 0 ) 299 340 { 300 p rintk("\n[ERROR] in %s : set a big page in a mapped PT1 entry / PT1[%d] = %x\n",341 panic("\n[PANIC] in %s : set a big page in a mapped PT1 entry / PT1[%d] = %x\n", 301 342 __FUNCTION__ , ix1 , pte1 ); 302 return EINVAL;303 343 } 304 344 305 345 // set the PTE1 306 *pte1_ptr = attr | (ppn >> 9); 346 *pte1_ptr = (tsar_attr & TSAR_MMU_PTE1_ATTR_MASK) | 347 ((ppn >> 9) & TSAR_MMU_PTE1_PPN_MASK); 307 348 hal_fence(); 308 349 return 0; … … 311 352 // From this point, the requested PTE is a PTE2 (small page) 312 353 313 if( (pte1 & TSAR_MMU_ PRESENT) == 0 ) // the PT1 entry is not valid354 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // the PT1 entry is not valid 314 355 { 315 356 // allocate one physical page for the PT2 … … 334 375 { 335 376 atomic = hal_atomic_cas( (void*)pte1, 0 , 336 TSAR_MMU_ PRESENT | TSAR_MMU_PTD1| pt2_ppn );377 TSAR_MMU_MAPPED | TSAR_MMU_SMALL | pt2_ppn ); 337 378 } 338 379 while( (atomic == false) && (*pte1_ptr == 0) ); … … 356 397 { 357 398 // This valid entry must be a PTD1 358 if( (pte1 & TSAR_MMU_ PTD1) == 0 )399 if( (pte1 & TSAR_MMU_SMALL) == 0 ) 359 400 { 360 401 printk("\n[ERROR] in %s : set a small page in a big PT1 entry / PT1[%d] = %x\n", … … 373 414 pt2[2 * ix2 + 1] = ppn; 374 415 hal_fence(); 375 pt2[2 * ix2] = attr;416 pt2[2 * ix2] = tsar_attr; 376 417 hal_fence(); 377 418 378 419 return 0; 420 379 421 } // end of hal_gpt_set_pte() 380 422 … … 398 440 pte1 = pt1[ix1]; 399 441 400 if( (pte1 & TSAR_MMU_ PRESENT) == 0 ) // PT1 entry not present442 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // PT1 entry not present 401 443 { 402 444 *attr = 0; … … 404 446 } 405 447 406 if( (pte1 & TSAR_MMU_ PTD1) == 0 ) // it's a PTE1407 { 408 *attr = TSAR_MMU_ATTR_FROM_PTE1( pte1);448 if( (pte1 & TSAR_MMU_SMALL) == 0 ) // it's a PTE1 449 { 450 *attr = tsar2gpt( TSAR_MMU_ATTR_FROM_PTE1( pte1 ) ); 409 451 *ppn = TSAR_MMU_PPN_FROM_PTE1( pte1 ) | (vpn & ((1<<TSAR_MMU_IX2_WIDTH)-1)); 410 452 } … … 416 458 417 459 *ppn = pt2[2*ix2+1] & ((1<<TSAR_MMU_PPN_WIDTH)-1); 418 *attr = pt2[2*ix2];460 *attr = tsar2gpt( pt2[2*ix2] ); 419 461 } 420 462 } // end hal_gpt_get_pte() … … 432 474 ppn_t ppn; // PPN of page to be released 433 475 434 kmem_req_t req;435 436 476 // get ix1 & ix2 indexes 437 477 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 438 478 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 439 479 440 // get pointer on calling process441 process_t * process = CURRENT_THREAD->process;442 443 // compute is_ref444 bool_t is_ref = ( GET_CXY( process->ref_xp ) == local_cxy );445 446 480 // get PTE1 value 447 481 pt1 = gpt->ptr; 448 482 pte1 = pt1[ix1]; 449 483 450 if( (pte1 & TSAR_MMU_ PRESENT) == 0 ) // PT1 entry not present484 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // PT1 entry not present 451 485 { 452 486 return; 453 487 } 454 488 455 if( (pte1 & TSAR_MMU_ PTD1) == 0 ) // it's a PTE1489 if( (pte1 & TSAR_MMU_SMALL) == 0 ) // it's a PTE1 456 490 { 457 491 // get PPN … … 461 495 pt1[ix1] = 0; 462 496 hal_fence(); 463 464 // releases the physical page if local465 // req.type = KMEM_PAGE;466 // req.size = 9;467 // req.ptr = (void*)(ppn << CONFIG_PPM_PAGE_SHIFT);468 // kmem_free( &req );469 497 470 498 return; … … 483 511 hal_fence(); 484 512 485 // releases the small page486 // req.type = KMEM_PAGE;487 // req.size = 0;488 // req.ptr = (void*)(ppn << CONFIG_PPM_PAGE_SHIFT);489 // kmem_free( &req );490 491 513 return; 492 514 } … … 519 541 520 542 // If present, the page must be small 521 if( ((pte1 & TSAR_MMU_ PRESENT) != 0) && ((pte1 & TSAR_MMU_PTD1) == 0) )543 if( ((pte1 & TSAR_MMU_MAPPED) != 0) && ((pte1 & TSAR_MMU_SMALL) == 0) ) 522 544 { 523 545 printk("\n[ERROR] in %s : try to lock a big page / PT1[%d] = %x\n", … … 526 548 } 527 549 528 if( (pte1 & TSAR_MMU_ PRESENT) == 0 ) // missing PT1 entry550 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // missing PT1 entry 529 551 { 530 552 // allocate one physical page for PT2 … … 550 572 { 551 573 atomic = hal_atomic_cas( (void*)pte1_ptr , 0 , 552 TSAR_MMU_ PRESENT | TSAR_MMU_PTD1| pt2_ppn );574 TSAR_MMU_MAPPED | TSAR_MMU_SMALL | pt2_ppn ); 553 575 } 554 576 while( (atomic == false) && (*pte1_ptr == 0) ); … … 570 592 { 571 593 // This valid entry must be a PTD1 572 if( (pte1 & TSAR_MMU_ PTD1) == 0 )594 if( (pte1 & TSAR_MMU_SMALL) == 0 ) 573 595 { 574 596 printk("\n[ERROR] in %s : set a small page in a big PT1 entry / PT1[%d] = %x\n", … … 592 614 do 593 615 { 594 // busy waiting until GPT_LOCK == 0616 // busy waiting until TSAR_MMU_LOCK == 0 595 617 do 596 618 { … … 598 620 hal_rdbar(); 599 621 } 600 while( (attr & GPT_LOCKED) != 0 ); 601 602 // try to set the GPT_LOCK wit a CAS 603 atomic = hal_atomic_cas( (void*)pte2_ptr, attr , (attr | GPT_LOCKED) ); 622 while( (attr & TSAR_MMU_LOCKED) != 0 ); 623 624 atomic = hal_atomic_cas( (void*)pte2_ptr, attr , (attr | TSAR_MMU_LOCKED) ); 604 625 } 605 626 while( atomic == 0 ); 606 627 607 628 return 0; 629 608 630 } // end hal_gpt_lock_pte() 609 631 … … 632 654 633 655 // check PTE1 present and small page 634 if( ((pte1 & TSAR_MMU_ PRESENT) == 0) || ((pte1 & TSAR_MMU_PTD1) == 0) )656 if( ((pte1 & TSAR_MMU_MAPPED) == 0) || ((pte1 & TSAR_MMU_SMALL) == 0) ) 635 657 { 636 658 printk("\n[ERROR] in %s : try to unlock a big or undefined page / PT1[%d] = %x\n", … … 650 672 651 673 // check PTE2 present and locked 652 if( ((attr & TSAR_MMU_ PRESENT) == 0) || ((attr & GPT_LOCKED) == 0) );653 { 654 printk("\n[ERROR] in %s : try to unlock an undefined page / PT1[%d] = %x\n",674 if( ((attr & TSAR_MMU_MAPPED) == 0) || ((attr & TSAR_MMU_LOCKED) == 0) ); 675 { 676 printk("\n[ERROR] in %s : unlock an unlocked/unmapped page / PT1[%d] = %x\n", 655 677 __FUNCTION__ , ix1 , pte1 ); 656 678 return EINVAL; … … 658 680 659 681 // reset GPT_LOCK 660 *pte2_ptr = attr & !GPT_LOCKED;682 *pte2_ptr = attr & ~TSAR_MMU_LOCKED; 661 683 662 684 return 0; 685 663 686 } // end hal_gpt_unlock_pte() 664 687 … … 695 718 { 696 719 pte1 = src_pt1[ix1]; 697 if( (pte1 & TSAR_MMU_ PRESENT) != 0 )720 if( (pte1 & TSAR_MMU_MAPPED) != 0 ) 698 721 { 699 if( (pte1 & TSAR_MMU_ PTD1) == 0 ) // PTE1 => big kernel page722 if( (pte1 & TSAR_MMU_SMALL) == 0 ) // PTE1 => big kernel page 700 723 { 701 724 // big kernel pages are shared by all processes => copy it … … 727 750 // set a new PTD1 in DST_GPT 728 751 dst_pt2_ppn = (ppn_t)ppm_page2ppn( page_xp ); 729 dst_pt1[ix1] = TSAR_MMU_ PRESENT | TSAR_MMU_PTD1| dst_pt2_ppn;752 dst_pt1[ix1] = TSAR_MMU_MAPPED | TSAR_MMU_SMALL | dst_pt2_ppn; 730 753 731 754 // get pointer on PT2 in SRC_GPT … … 739 762 pte2_attr = TSAR_MMU_ATTR_FROM_PTE2( src_pt2[2 * ix2] ); 740 763 741 if( (pte2_attr & TSAR_MMU_ PRESENT) != 0 ) // valid PTE2 in SRC_GPT764 if( (pte2_attr & TSAR_MMU_MAPPED) != 0 ) // valid PTE2 in SRC_GPT 742 765 { 743 766 // get GPT_WRITABLE & PPN
Note: See TracChangeset
for help on using the changeset viewer.