Changeset 391
- Timestamp:
- Aug 16, 2017, 3:27:05 PM (7 years ago)
- Location:
- trunk/hal/tsar_mips32/core
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/tsar_mips32/core/hal_exception.c
r381 r391 172 172 __FUNCTION__, this->trdid , process->pid , bad_vaddr , excp_code ); 173 173 174 // a kernel thread should not rise an MMU exception 175 if( this->type != THREAD_USER ) 176 { 177 printk("\n[PANIC] in %s : thread %x is a kernel thread / vaddr = %x\n", 178 __FUNCTION__ , this->trdid , bad_vaddr ); 179 return EXCP_KERNEL_PANIC; 180 } 181 182 // enable IRQs 183 hal_enable_irq( NULL ); 184 174 // on TSAR, a kernel thread should not rise an MMU exception 175 assert( (this->type != THREAD_USER) , __FUNCTION__ , 176 "thread %x is a kernel thread / vaddr = %x\n", this->trdid , bad_vaddr ); 177 185 178 // vaddr must be contained in a registered vseg 186 vseg = vmm_get_vseg( process , bad_vaddr ); 187 188 if( vseg == NULL ) // vseg not found in local cluster 189 { 190 // get extended pointer on reference process 191 xptr_t ref_xp = process->ref_xp; 192 193 // get cluster and local pointer on reference process 194 cxy_t ref_cxy = GET_CXY( ref_xp ); 195 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 196 197 if( local_cxy != ref_cxy ) // reference process is remote 198 { 199 // get extended pointer on reference vseg 200 xptr_t vseg_xp; 201 rpc_vmm_get_ref_vseg_client( ref_cxy , ref_ptr , bad_vaddr , &vseg_xp ); 202 203 204 if( vseg == NULL ) // vseg not found => illegal user vaddr 205 { 206 printk("\n[ERROR] in %s for thread %x : illegal vaddr = %x\n", 207 __FUNCTION__ , this->trdid , bad_vaddr ); 208 209 hal_disable_irq( NULL ); 210 return EXCP_USER_ERROR; 211 } 212 else // vseg found => make a local copy 213 { 214 // allocate a vseg in local cluster 215 vseg = vseg_alloc(); 216 217 if( vseg == NULL ) 218 { 219 printk("\n[PANIC] in %s : no memory for vseg / thread = %x\n", 220 __FUNCTION__ , this->trdid ); 221 hal_disable_irq( NULL ); 222 return EXCP_KERNEL_PANIC; 223 } 224 225 // initialise local vseg from reference 226 vseg_init_from_ref( vseg , ref_xp ); 227 228 // register local vseg in local VMM 229 error = vseg_attach( &process->vmm , vseg ); 230 } 231 } 232 else // reference is local => illegal user vaddr 233 { 234 printk("\n[ERROR] in %s for thread %x : illegal vaddr = %x\n", 235 __FUNCTION__ , this->trdid , bad_vaddr ); 236 237 hal_disable_irq( NULL ); 238 return EXCP_USER_ERROR; 239 } 240 } 179 error = vmm_get_vseg( process , bad_vaddr , &vseg ); 241 180 242 181 vmm_dmsg("\n[INFO] %s : found vseg for thread %x / vseg_min = %x / vseg_max = %x\n", … … 246 185 if( excp_code & MMU_EXCP_PAGE_UNMAPPED ) 247 186 { 187 // enable IRQs before handling page fault 188 hal_enable_irq( NULL ); 189 248 190 // try to map the unmapped PTE 249 191 error = vmm_handle_page_fault( process, 250 192 vseg, 251 193 bad_vaddr >> CONFIG_PPM_PAGE_SHIFT ); // vpn 252 253 if( error ) 254 { 255 printk("\n[PANIC] in %s for thread %x : cannot map legal vaddr = %x\n", 194 // disable IRQs 195 hal_disable_irq( NULL ); 196 197 if( error ) // not enough memory 198 { 199 printk("\n[ERROR] in %s for thread %x : cannot map legal vaddr = %x\n", 256 200 __FUNCTION__ , this->trdid , bad_vaddr ); 257 201 258 hal_disable_irq( NULL ); 259 return EXCP_KERNEL_PANIC; 260 } 261 else 202 return EXCP_USER_ERROR; 203 } 204 else // page fault successfully handled 262 205 { 263 206 vmm_dmsg("\n[INFO] %s : page fault handled for vaddr = %x in thread %x\n", 264 207 __FUNCTION__ , bad_vaddr , this->trdid ); 265 208 266 // page fault successfully handled267 hal_disable_irq( NULL );268 209 return EXCP_NON_FATAL; 269 210 } … … 274 215 __FUNCTION__ , this->trdid , bad_vaddr ); 275 216 276 hal_disable_irq( NULL );277 217 return EXCP_USER_ERROR; 278 218 } … … 282 222 __FUNCTION__ , this->trdid , bad_vaddr ); 283 223 284 hal_disable_irq( NULL );285 224 return EXCP_USER_ERROR; 286 225 } … … 290 229 __FUNCTION__ , this->trdid , bad_vaddr ); 291 230 292 hal_disable_irq( NULL );293 231 return EXCP_USER_ERROR; 294 232 } … … 299 237 __FUNCTION__ , this->trdid , excp_code , bad_vaddr ); 300 238 301 hal_disable_irq( NULL );302 239 return EXCP_KERNEL_PANIC; 303 240 } -
trunk/hal/tsar_mips32/core/hal_gpt.c
r315 r391 36 36 //////////////////////////////////////////////////////////////////////////////////////// 37 37 // This define the masks for the TSAR MMU PTE attributes. (from TSAR MMU specification) 38 // the GPT masks are derived from the TSAR MMU PTE attributes39 // in the TSAR specific hal_gpt_create() function.40 38 //////////////////////////////////////////////////////////////////////////////////////// 41 39 … … 76 74 #define TSAR_MMU_ATTR_FROM_PTE2( pte2 ) (pte2 & 0xFFC000FF) 77 75 78 /****************************************************************************************79 * These global variables defines the masks for the Generic Page Table Entry attributes,80 * and must be defined in all GPT implementation.81 ***************************************************************************************/82 83 uint32_t GPT_MAPPED;84 uint32_t GPT_SMALL;85 uint32_t GPT_READABLE;86 uint32_t GPT_WRITABLE;87 uint32_t GPT_EXECUTABLE;88 uint32_t GPT_CACHABLE;89 uint32_t GPT_USER;90 uint32_t GPT_DIRTY;91 uint32_t GPT_ACCESSED;92 uint32_t GPT_GLOBAL;93 uint32_t GPT_COW;94 uint32_t GPT_SWAP;95 uint32_t GPT_LOCKED;96 97 76 ///////////////////////////////////// 98 77 error_t hal_gpt_create( gpt_t * gpt ) … … 128 107 gpt->page = GET_PTR( page_xp ); 129 108 109 /* 130 110 // initialize PTE entries attributes masks 131 111 GPT_MAPPED = TSAR_MMU_PRESENT; … … 142 122 GPT_SWAP = TSAR_MMU_SWAP; 143 123 GPT_LOCKED = TSAR_MMU_LOCKED; 144 124 */ 145 125 return 0; 146 126 } // end hal_gpt_create() … … 174 154 { 175 155 pte1 = pt1[ix1]; 176 if( (pte1 & GPT_MAPPED) != 0 ) // PTE1 valid156 if( (pte1 & TSAR_MMU_PRESENT) != 0 ) // PTE1 valid 177 157 { 178 if( (pte1 & GPT_SMALL) == 0 ) // BIG page158 if( (pte1 & TSAR_MMU_PTD1) == 0 ) // BIG page 179 159 { 180 if( (pte1 & GPT_USER) != 0 )160 if( (pte1 & TSAR_MMU_USER) != 0 ) 181 161 { 182 162 // warning message 183 163 printk("\n[WARNING] in %s : found an USER BIG page / ix1 = %d\n", 184 164 __FUNCTION__ , ix1 ); 185 165 186 166 // release the big physical page if reference cluster … … 192 172 } 193 173 } 194 else // SMALL page174 else // SMALL page 195 175 { 196 176 // get local pointer on PT2 … … 205 185 { 206 186 attr = TSAR_MMU_ATTR_FROM_PTE2( pt2[2 * ix2] ); 207 if( ((attr & GPT_MAPPED) != 0 ) && ((attr & GPT_USER) != 0) )187 if( ((attr & TSAR_MMU_PRESENT) != 0 ) && ((attr & TSAR_MMU_USER) != 0) ) 208 188 { 209 189 // release the physical page … … 250 230 { 251 231 pte1 = pt1[ix1]; 252 if( (pte1 & GPT_MAPPED) != 0 )232 if( (pte1 & TSAR_MMU_PRESENT) != 0 ) 253 233 { 254 if( (pte1 & GPT_SMALL) == 0 ) // BIG page234 if( (pte1 & TSAR_MMU_PTD1) == 0 ) // BIG page 255 235 { 256 236 printk(" - BIG : pt1[%d] = %x\n", ix1 , pte1 ); … … 267 247 pte2_attr = TSAR_MMU_ATTR_FROM_PTE2( pt2[2 * ix2] ); 268 248 pte2_ppn = TSAR_MMU_PPN_FROM_PTE2( pt2[2 * ix2 + 1] ); 269 if( (pte2_attr & GPT_MAPPED) != 0 )249 if( (pte2_attr & TSAR_MMU_PRESENT) != 0 ) 270 250 { 271 251 printk(" - SMALL : pt1[%d] = %x / pt2[%d] / pt2[%d]\n", … … 306 286 307 287 pt1 = gpt->ptr; 308 small = (attr & GPT_SMALL);288 small = (attr & TSAR_MMU_PTD1); 309 289 310 290 // get PT1 entry value … … 331 311 // From this point, the requested PTE is a PTE2 (small page) 332 312 333 if( (pte1 & GPT_MAPPED) == 0 ) // the PT1 entry is not valid313 if( (pte1 & TSAR_MMU_PRESENT) == 0 ) // the PT1 entry is not valid 334 314 { 335 315 // allocate one physical page for the PT2 … … 376 356 { 377 357 // This valid entry must be a PTD1 378 if( (pte1 & GPT_SMALL) == 0 )358 if( (pte1 & TSAR_MMU_PTD1) == 0 ) 379 359 { 380 360 printk("\n[ERROR] in %s : set a small page in a big PT1 entry / PT1[%d] = %x\n", … … 418 398 pte1 = pt1[ix1]; 419 399 420 if( (pte1 & GPT_MAPPED) == 0 ) // PT1 entry not present400 if( (pte1 & TSAR_MMU_PRESENT) == 0 ) // PT1 entry not present 421 401 { 422 402 *attr = 0; … … 424 404 } 425 405 426 if( (pte1 & GPT_SMALL) == 0 ) // it's a PTE1406 if( (pte1 & TSAR_MMU_PTD1) == 0 ) // it's a PTE1 427 407 { 428 408 *attr = TSAR_MMU_ATTR_FROM_PTE1( pte1 ); … … 454 434 kmem_req_t req; 455 435 436 // get ix1 & ix2 indexes 456 437 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 457 438 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 439 440 // get pointer on calling process 441 process_t * process = CURRENT_THREAD->process; 442 443 // compute is_ref 444 bool_t is_ref = ( GET_CXY( process->ref_xp ) == local_cxy ); 458 445 459 446 // get PTE1 value … … 461 448 pte1 = pt1[ix1]; 462 449 463 if( (pte1 & GPT_MAPPED) == 0 ) // PT1 entry not present450 if( (pte1 & TSAR_MMU_PRESENT) == 0 ) // PT1 entry not present 464 451 { 465 452 return; 466 453 } 467 454 468 if( (pte1 & GPT_SMALL) == 0 )// it's a PTE1455 if( (pte1 & TSAR_MMU_PTD1) == 0 ) // it's a PTE1 469 456 { 470 457 // get PPN … … 475 462 hal_fence(); 476 463 477 // releases the big page478 req.type = KMEM_PAGE;479 req.size = 9;480 req.ptr = (void*)(ppn << CONFIG_PPM_PAGE_SHIFT);481 kmem_free( &req );464 // releases the physical page if local 465 // req.type = KMEM_PAGE; 466 // req.size = 9; 467 // req.ptr = (void*)(ppn << CONFIG_PPM_PAGE_SHIFT); 468 // kmem_free( &req ); 482 469 483 470 return; 484 471 } 485 else // it's a PTD1472 else // it's a PTD1 486 473 { 487 474 // compute PT2 base address … … 493 480 494 481 // unmap the small page 495 pt2[2*ix2] = 0; 496 hal_fence(); 497 pt2[2*ix2+1] = 0; 498 hal_fence(); 499 500 // releases the small page 501 req.type = KMEM_PAGE; 502 req.size = 0; 503 req.ptr = (void*)(ppn << CONFIG_PPM_PAGE_SHIFT); 504 kmem_free( &req ); 482 pt2[2*ix2] = 0; // only attr is reset 483 hal_fence(); 484 485 // releases the small page 486 // req.type = KMEM_PAGE; 487 // req.size = 0; 488 // req.ptr = (void*)(ppn << CONFIG_PPM_PAGE_SHIFT); 489 // kmem_free( &req ); 505 490 506 491 return; … … 534 519 535 520 // If present, the page must be small 536 if( ((pte1 & GPT_MAPPED) != 0) && ((pte1 & GPT_SMALL) == 0) )521 if( ((pte1 & TSAR_MMU_PRESENT) != 0) && ((pte1 & TSAR_MMU_PTD1) == 0) ) 537 522 { 538 523 printk("\n[ERROR] in %s : try to lock a big page / PT1[%d] = %x\n", … … 541 526 } 542 527 543 if( (pte1 & GPT_MAPPED) == 0 ) // missing PT1 entry528 if( (pte1 & TSAR_MMU_PRESENT) == 0 ) // missing PT1 entry 544 529 { 545 530 // allocate one physical page for PT2 … … 585 570 { 586 571 // This valid entry must be a PTD1 587 if( (pte1 & GPT_SMALL) == 0 )572 if( (pte1 & TSAR_MMU_PTD1) == 0 ) 588 573 { 589 574 printk("\n[ERROR] in %s : set a small page in a big PT1 entry / PT1[%d] = %x\n", … … 647 632 648 633 // check PTE1 present and small page 649 if( ((pte1 & GPT_MAPPED) == 0) || ((pte1 & GPT_SMALL) == 0) )634 if( ((pte1 & TSAR_MMU_PRESENT) == 0) || ((pte1 & TSAR_MMU_PTD1) == 0) ) 650 635 { 651 636 printk("\n[ERROR] in %s : try to unlock a big or undefined page / PT1[%d] = %x\n", … … 665 650 666 651 // check PTE2 present and locked 667 if( ((attr & GPT_MAPPED) == 0) || ((attr & GPT_LOCKED) == 0) );652 if( ((attr & TSAR_MMU_PRESENT) == 0) || ((attr & GPT_LOCKED) == 0) ); 668 653 { 669 654 printk("\n[ERROR] in %s : try to unlock an undefined page / PT1[%d] = %x\n", … … 710 695 { 711 696 pte1 = src_pt1[ix1]; 712 if( (pte1 & GPT_MAPPED) != 0 )697 if( (pte1 & TSAR_MMU_PRESENT) != 0 ) 713 698 { 714 if( (pte1 & GPT_SMALL) == 0 ) // PTE1 => big kernel page699 if( (pte1 & TSAR_MMU_PTD1) == 0 ) // PTE1 => big kernel page 715 700 { 716 701 // big kernel pages are shared by all processes => copy it … … 754 739 pte2_attr = TSAR_MMU_ATTR_FROM_PTE2( src_pt2[2 * ix2] ); 755 740 756 if( (pte2_attr & GPT_MAPPED) != 0 ) // valid PTE2 in SRC_GPT741 if( (pte2_attr & TSAR_MMU_PRESENT) != 0 ) // valid PTE2 in SRC_GPT 757 742 { 758 743 // get GPT_WRITABLE & PPN
Note: See TracChangeset
for help on using the changeset viewer.