Changeset 632 for trunk/hal/tsar_mips32
- Timestamp:
- May 28, 2019, 2:56:04 PM (6 years ago)
- Location:
- trunk/hal/tsar_mips32/core
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/tsar_mips32/core/hal_exception.c
r625 r632 165 165 // in case of illegal virtual address. Finally, it updates the local page table from the 166 166 // reference cluster. 167 // WARNING : In order to prevent deadlocks, this function enable IRQs before calling the 168 // vmm_handle_page_fault() and the vmm_handle_cow() functions, because concurrent calls 169 // to these functions can create cross dependencies... 167 170 ////////////////////////////////////////////////////////////////////////////////////////// 168 171 // @ this : pointer on faulty thread descriptor. … … 187 190 188 191 // check thread type 189 192 if( CURRENT_THREAD->type != THREAD_USER ) 190 193 { 191 194 printk("\n[PANIC] in %s : illegal thread type %s\n", -
trunk/hal/tsar_mips32/core/hal_gpt.c
r630 r632 77 77 #define TSAR_MMU_ATTR_FROM_PTE2( pte2 ) (pte2 & 0xFFC000FF) 78 78 79 80 79 /////////////////////////////////////////////////////////////////////////////////////// 81 80 // This static function translates the GPT attributes to the TSAR attributes … … 125 124 return gpt_attr; 126 125 } 126 127 /////////////////////////////////////////////////////////////////////////////////////// 128 // The blocking hal_gpt_lock_pte() function implements a busy-waiting policy to get 129 // exclusive access to a specific GPT entry. 130 // - when non zero, the following variable defines the max number of iterations 131 // in the busy waiting loop. 132 // - when zero, the watchdog mechanism is deactivated. 133 /////////////////////////////////////////////////////////////////////////////////////// 134 135 #define GPT_LOCK_WATCHDOG 100000 127 136 128 137 ///////////////////////////////////// … … 317 326 */ 318 327 319 /////////////////////////////////////////////////////////////////////////////////////////320 // This static function returns in the <ptd1_value> buffer the current value of321 // the PT1 entry identified by the <pte1_xp> argument, that must contain a PTD1322 // (i.e. a pointer on a PT2). If this PT1 entry is not mapped yet, it allocates a323 // new PT2 and updates the PT1 entry, using the TSAR_MMU_LOCKED attribute in PT1324 // entry, to handle possible concurrent mappings of the missing PTD1:325 // 1) If the PT1 entry is unmapped, it tries to atomically lock this PTD1.326 // - if the atomic lock is successful it allocates a new PT1, and updates the PTD1.327 // - else, it simply waits, in a polling loop, the mapping done by another thread.328 // In both cases, returns the PTD1 value, when the mapping is completed.329 // 2) If the PT1 entry is already mapped, it returns the PTD1 value, and does330 // nothing else.331 /////////////////////////////////////////////////////////////////////////////////////////332 static error_t hal_gpt_allocate_pt2( xptr_t ptd1_xp,333 uint32_t * ptd1_value )334 {335 cxy_t gpt_cxy; // target GPT cluster = GET_CXY( ptd1_xp );336 uint32_t ptd1; // PTD1 value337 ppn_t pt2_ppn; // PPN of page containing the new PT2338 bool_t atomic;339 page_t * page;340 xptr_t page_xp;341 342 // get GPT cluster identifier343 gpt_cxy = GET_CXY( ptd1_xp );344 345 // get current ptd1 value346 ptd1 = hal_remote_l32( ptd1_xp );347 348 if( (ptd1 & TSAR_PTE_MAPPED) == 0) // PTD1 unmapped and unlocked349 {350 // atomically lock the PTD1 to prevent concurrent PTD1 mappings351 atomic = hal_remote_atomic_cas( ptd1_xp,352 ptd1,353 ptd1 | TSAR_PTE_LOCKED );354 355 if( atomic ) // PTD1 successfully locked356 {357 // allocate one physical page for PT2358 if( gpt_cxy == local_cxy )359 {360 kmem_req_t req;361 req.type = KMEM_PAGE;362 req.size = 0; // 1 small page363 req.flags = AF_KERNEL | AF_ZERO;364 page = (page_t *)kmem_alloc( &req );365 }366 else367 {368 rpc_pmem_get_pages_client( gpt_cxy , 0 , &page );369 }370 371 if( page == NULL ) return -1;372 373 // get the PT2 PPN374 page_xp = XPTR( gpt_cxy , page );375 pt2_ppn = ppm_page2ppn( page_xp );376 377 // build PTD1378 ptd1 = TSAR_PTE_MAPPED | TSAR_PTE_SMALL | pt2_ppn;379 380 // set the PTD1 value in PT1381 hal_remote_s32( ptd1_xp , ptd1 );382 hal_fence();383 384 #if DEBUG_HAL_GPT_ALLOCATE_PT2385 thread_t * this = CURRENT_THREAD;386 uint32_t cycle = (uint32_t)hal_get_cycles();387 if( DEBUG_HAL_GPT_ALLOCATE_PT2 < cycle )388 printk("\n[%s] : thread[%x,%x] map PTD1 / cxy %x / ix1 %d / pt1 %x / ptd1 %x\n",389 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, ptd1 );390 #endif391 }392 else // PTD1 modified by another thread393 {394 // poll PTD1 until mapped by another thread395 while( (ptd1 & TSAR_PTE_MAPPED) == 0 ) ptd1 = hal_remote_l32( ptd1_xp );396 }397 }398 else // PTD1 mapped => just use it399 {400 401 #if DEBUG_HAL_GPT_ALLOCATE_PT2402 thread_t * this = CURRENT_THREAD;403 uint32_t cycle = (uint32_t)hal_get_cycles();404 if( DEBUG_HAL_GPT_ALLOCATE_PT2 < cycle )405 printk("\n[%s] : thread[%x,%x] PTD1 mapped / cxy %x / ix1 %d / pt1 %x / ptd1 %x\n",406 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, ptd1 );407 #endif408 409 }410 411 *ptd1_value = ptd1;412 return 0;413 414 } // end hal_gpt_allocate_pt2415 416 417 418 419 328 //////////////////////////////////////////// 420 329 error_t hal_gpt_lock_pte( xptr_t gpt_xp, … … 423 332 ppn_t * ppn ) 424 333 { 425 error_t error;426 334 uint32_t * pt1_ptr; // local pointer on PT1 base 427 xptr_t pte1_xp; // extended pointer on PT1[x1] entry 428 uint32_t pte1; // value of PT1[x1] entry 335 xptr_t ptd1_xp; // extended pointer on PT1[x1] entry 336 uint32_t ptd1; // value of PT1[x1] entry 337 338 xptr_t page_xp; 429 339 430 340 ppn_t pt2_ppn; // PPN of page containing PT2 431 341 uint32_t * pt2_ptr; // local pointer on PT2 base 432 xptr_t pte2_ attr_xp;// extended pointer on PT2[ix2].attr342 xptr_t pte2_xp; // extended pointer on PT2[ix2].attr 433 343 uint32_t pte2_attr; // PT2[ix2].attr current value 434 xptr_t pte2_ppn_xp; // extended pointer on PT2[ix2].ppn435 344 uint32_t pte2_ppn; // PT2[ix2].ppn current value 436 345 bool_t atomic; 346 347 #if GPT_LOCK_WATCHDOG 348 uint32_t count; 349 #endif 437 350 438 351 // get cluster and local pointer on GPT … … 440 353 gpt_t * gpt_ptr = GET_PTR( gpt_xp ); 441 354 442 // get indexes in PTI & PT2 355 #if DEBUG_HAL_GPT_LOCK_PTE 356 thread_t * this = CURRENT_THREAD; 357 uint32_t cycle = (uint32_t)hal_get_cycles(); 358 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 359 printk("\n[%s] : thread[%x,%x] enters / vpn %x in cluster %x / cycle %d\n", 360 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle ); 361 #endif 362 363 // get indexes in PTI & PT2 from vpn 443 364 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); // index in PT1 444 365 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); // index in PT2 … … 447 368 pt1_ptr = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 448 369 449 // build extended pointer on PTE1 == PT1[ix1] 450 pte1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] ); 451 452 // get PTE1 value from PT1 453 // allocate a new PT2 for this PTE1 if required 454 error = hal_gpt_allocate_pt2( pte1_xp , &pte1 ); 455 456 if( error ) 370 // build extended pointer on PTD1 == PT1[ix1] 371 ptd1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] ); 372 373 // get current PT1 entry value 374 ptd1 = hal_remote_l32( ptd1_xp ); 375 376 // If PTD1 is unmapped and unlocked, try to atomically lock this PT1 entry. 377 // This PTD1 lock prevent multiple concurrent PT2 allocations 378 // - only the thread that successfully locked the PTD1 allocates a new PT2 379 // and updates the PTD1 380 // - all other threads simply wait until the missing PTD1 is mapped. 381 382 if( ptd1 == 0 ) 383 { 384 // try to atomically lock the PTD1 to prevent concurrent PT2 allocations 385 atomic = hal_remote_atomic_cas( ptd1_xp, 386 ptd1, 387 ptd1 | TSAR_PTE_LOCKED ); 388 if( atomic ) 389 { 390 // allocate one 4 Kbytes physical page for PT2 391 page_xp = ppm_remote_alloc_pages( gpt_cxy , 0 ); 392 393 if( page_xp == NULL ) 394 { 395 printk("\n[ERROR] in %s : cannot allocate memory for PT2\n", __FUNCTION__ ); 396 return -1; 397 } 398 399 // get the PT2 PPN 400 pt2_ppn = ppm_page2ppn( page_xp ); 401 402 // build PTD1 403 ptd1 = TSAR_PTE_MAPPED | TSAR_PTE_SMALL | pt2_ppn; 404 405 // set the PTD1 value in PT1 406 // this unlocks the PTD1 407 hal_remote_s32( ptd1_xp , ptd1 ); 408 hal_fence(); 409 410 #if (DEBUG_HAL_GPT_LOCK_PTE & 1) 411 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 412 printk("\n[%s] : thread[%x,%x] allocates a new PT2 for vpn %x in cluster %x\n", 413 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy ); 414 #endif 415 416 } // end if atomic 417 } // end if (ptd1 == 0) 418 419 // wait until PTD1 is mapped by another thread 420 while( (ptd1 & TSAR_PTE_MAPPED) == 0 ) 457 421 { 458 printk("\n[ERROR] in %s : cannot allocate memory for PT2\n", __FUNCTION__ ); 459 return -1; 422 ptd1 = hal_remote_l32( ptd1_xp ); 423 424 #if GPT_LOCK_WATCHDOG 425 if( count > GPT_LOCK_WATCHDOG ) 426 { 427 thread_t * thread = CURRENT_THREAD; 428 printk("\n[PANIC] in %s : thread[%x,%x] waiting PTD1 / vpn %x / cxy %x / %d iterations\n", 429 __FUNCTION__, thread->process->pid, thread->trdid, vpn, gpt_cxy, count ); 430 hal_core_sleep(); 431 } 432 count++; 433 #endif 434 460 435 } 461 436 462 if( (pte1 & TSAR_PTE_SMALL) == 0 ) 463 { 464 printk("\n[ERROR] in %s : cannot lock a small page\n", __FUNCTION__ ); 465 return -1; 466 } 467 468 // get pointer on PT2 base from PTE1 469 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 437 // check ptd1 because only small page can be locked 438 assert( (ptd1 & TSAR_PTE_SMALL), "cannot lock a big page\n"); 439 440 #if (DEBUG_HAL_GPT_LOCK_PTE & 1) 441 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 442 printk("\n[%s] : thread[%x,%x] get ptd1 %x for vpn %x in cluster %x\n", 443 __FUNCTION__, this->process->pid, this->trdid, ptd1, vpn, gpt_cxy ); 444 #endif 445 446 // get pointer on PT2 base from PTD1 447 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( ptd1 ); 470 448 pt2_ptr = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 471 449 472 // build extended pointers on PT2[ix2].attr and PT2[ix2].ppn 473 pte2_attr_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2] ); 474 pte2_ppn_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2 + 1] ); 475 476 // wait until PTE2 unlocked, get PTE2.attr and set lock 450 // build extended pointers on PT2[ix2].attr 451 pte2_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2] ); 452 453 // wait until PTE2 atomically set using a remote CAS 477 454 do 478 455 { 479 // busy waiting until TSAR_MMU_LOCK == 0 456 457 #if GPT_LOCK_WATCHDOG 458 count = 0; 459 #endif 460 461 // wait until PTE lock released by the current owner 480 462 do 481 463 { 482 pte2_attr = hal_remote_l32( pte2_attr_xp ); 464 pte2_attr = hal_remote_l32( pte2_xp ); 465 466 #if GPT_LOCK_WATCHDOG 467 if( count > GPT_LOCK_WATCHDOG ) 468 { 469 thread_t * thread = CURRENT_THREAD; 470 printk("\n[PANIC] in %s : thread[%x,%x] waiting PTE2 / vpn %x / cxy %x / %d iterations\n", 471 __FUNCTION__, thread->process->pid, thread->trdid, vpn, gpt_cxy, count ); 472 hal_core_sleep(); 473 } 474 count++; 475 #endif 476 483 477 } 484 478 while( (pte2_attr & TSAR_PTE_LOCKED) != 0 ); 485 479 486 // try to atomically set the TSAR_ MMU_LOCKattribute487 atomic = hal_remote_atomic_cas( pte2_ attr_xp,480 // try to atomically set the TSAR_PTE_LOCKED attribute 481 atomic = hal_remote_atomic_cas( pte2_xp, 488 482 pte2_attr, 489 483 (pte2_attr | TSAR_PTE_LOCKED) ); … … 492 486 493 487 // get PTE2.ppn 494 pte2_ppn = hal_remote_l32( pte2_ppn_xp ); 488 pte2_ppn = hal_remote_l32( pte2_xp + 4 ); 489 490 #if DEBUG_HAL_GPT_LOCK_PTE 491 cycle = (uint32_t)hal_get_cycles(); 492 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 493 printk("\n[%s] : thread[%x,%x] exit / vpn %x in cluster %x / attr %x / ppn %x / cycle %d\n", 494 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, pte2_attr, pte2_ppn, cycle ); 495 #endif 496 497 // return PPN and GPT attributes 498 *ppn = pte2_ppn & ((1<<TSAR_MMU_PPN_WIDTH)-1); 499 *attr = tsar2gpt( pte2_attr ); 500 return 0; 501 502 } // end hal_gpt_lock_pte() 503 504 //////////////////////////////////////// 505 void hal_gpt_unlock_pte( xptr_t gpt_xp, 506 vpn_t vpn ) 507 { 508 uint32_t * pt1_ptr; // local pointer on PT1 base 509 xptr_t ptd1_xp; // extended pointer on PT1[ix1] 510 uint32_t ptd1; // value of PT1[ix1] entry 511 512 ppn_t pt2_ppn; // PPN of page containing PT2 513 uint32_t * pt2_ptr; // PT2 base address 514 xptr_t pte2_xp; // extended pointer on PT2[ix2].attr 515 uint32_t pte2_attr; // PTE2 attribute 516 517 // get cluster and local pointer on GPT 518 cxy_t gpt_cxy = GET_CXY( gpt_xp ); 519 gpt_t * gpt_ptr = GET_PTR( gpt_xp ); 495 520 496 521 #if DEBUG_HAL_GPT_LOCK_PTE … … 498 523 uint32_t cycle = (uint32_t)hal_get_cycles(); 499 524 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 500 printk("\n[%s] : thread[%x,%x] locks vpn %x / attr %x / ppn %x / cluster %x / cycle %d\n", 501 __FUNCTION__, this->process->pid, this->trdid, vpn, attr, ppn, gpt_cxy, cycle ); 502 #endif 503 504 // return PPN and GPT attributes 505 *ppn = hal_remote_l32( pte2_ppn_xp ) & ((1<<TSAR_MMU_PPN_WIDTH)-1); 506 *attr = tsar2gpt( pte2_attr ); 507 return 0; 508 509 } // end hal_gpt_lock_pte() 510 511 //////////////////////////////////////// 512 void hal_gpt_unlock_pte( xptr_t gpt_xp, 513 vpn_t vpn ) 514 { 515 uint32_t * pt1_ptr; // local pointer on PT1 base 516 xptr_t pte1_xp; // extended pointer on PT1[ix1] 517 uint32_t pte1; // value of PT1[ix1] entry 518 519 ppn_t pt2_ppn; // PPN of page containing PT2 520 uint32_t * pt2_ptr; // PT2 base address 521 uint32_t pte2_attr_xp; // extended pointer on PT2[ix2].attr 522 523 uint32_t attr; // PTE2 attribute 524 525 // get cluster and local pointer on GPT 526 cxy_t gpt_cxy = GET_CXY( gpt_xp ); 527 gpt_t * gpt_ptr = GET_PTR( gpt_xp ); 525 printk("\n[%s] : thread[%x,%x] enters for vpn %x in cluster %x / cycle %d\n", 526 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle ); 527 #endif 528 528 529 529 // compute indexes in P1 and PT2 … … 534 534 pt1_ptr = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 535 535 536 // build extended pointer on PT E1 == PT1[ix1]537 pt e1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] );538 539 // get current pt e1 value540 pt e1 = hal_remote_l32( pte1_xp );541 542 // check PT E1 attributes543 assert( (( (pte1 & TSAR_PTE_MAPPED) != 0) && ((pte1 & TSAR_PTE_SMALL) != 0)),544 "try to unlock a big or unmappedPTE1\n");545 546 // get pointer on PT2 base from PT E1547 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pt e1 );536 // build extended pointer on PTD1 == PT1[ix1] 537 ptd1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] ); 538 539 // get current ptd1 value 540 ptd1 = hal_remote_l32( ptd1_xp ); 541 542 // check PTD1 attributes 543 assert( ((ptd1 & TSAR_PTE_MAPPED) != 0), "unmapped PTE1\n"); 544 assert( ((ptd1 & TSAR_PTE_SMALL ) != 0), "big page PTE1\n"); 545 546 // get pointer on PT2 base from PTD1 547 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( ptd1 ); 548 548 pt2_ptr = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 549 549 550 550 // build extended pointers on PT2[ix2].attr 551 pte2_ attr_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2] );551 pte2_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2] ); 552 552 553 553 // get PT2[ix2].attr 554 attr = hal_remote_l32( pte2_attr_xp ); 555 556 // reset TSAR_MMU_LOCK attribute 557 hal_remote_s32( pte2_attr_xp , attr & ~TSAR_PTE_LOCKED ); 554 pte2_attr = hal_remote_l32( pte2_xp ); 555 556 // check PTE2 attributes 557 assert( ((pte2_attr & TSAR_PTE_MAPPED) != 0), "unmapped PTE2\n"); 558 assert( ((pte2_attr & TSAR_PTE_LOCKED) != 0), "unlocked PTE2\n"); 559 560 // reset TSAR_PTE_LOCKED attribute 561 hal_remote_s32( pte2_xp , pte2_attr & ~TSAR_PTE_LOCKED ); 558 562 559 563 #if DEBUG_HAL_GPT_LOCK_PTE 560 thread_t * this = CURRENT_THREAD; 561 uint32_t cycle = (uint32_t)hal_get_cycles(); 564 cycle = (uint32_t)hal_get_cycles(); 562 565 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 563 printk("\n[%s] : thread[%x,%x] unlocks vpn %x / attr %x / ppn %x /cluster %x / cycle %d\n",564 __FUNCTION__, this->process->pid, this->trdid, vpn, attr, ppn,gpt_cxy, cycle );565 #endif 566 566 printk("\n[%s] : thread[%x,%x] unlocks vpn %x in cluster %x / cycle %d\n", 567 __FUNCTION__, this->process->pid, this->trdid, vpn, gpt_cxy, cycle ); 568 #endif 569 567 570 } // end hal_gpt_unlock_pte() 571 568 572 569 573 /////////////////////////////////////// … … 693 697 xptr_t pte2_attr_xp; // extended pointer on PT2[ix2].attr 694 698 xptr_t pte2_ppn_xp; // extended pointer on PT2[ix2].ppn 695 uint32_t pte2_attr; // current value of PT2[ix2].attr696 699 697 700 // get cluster and local pointer on GPT … … 1065 1068 ppn_t pt2_ppn; // PPN of PT2 1066 1069 uint32_t * pt2; // PT2 base address 1070 xptr_t pte2_xp; // exended pointer on PTE2 1067 1071 1068 1072 uint32_t ix1; // index in PT1 1069 1073 uint32_t ix2; // index in PT2 1070 1074 1075 1071 1076 uint32_t tsar_attr; // PTE attributes for TSAR MMU 1072 1077 1073 // check attr argument MAPPED and SMALL 1074 if( (attr & GPT_MAPPED) == 0 ) return; 1075 if( (attr & GPT_SMALL ) == 0 ) return; 1078 // check MAPPED, SMALL, and not LOCKED in attr argument 1079 assert( ((attr & GPT_MAPPED) != 0), "attribute MAPPED must be set in new attributes\n" ); 1080 assert( ((attr & GPT_SMALL ) != 0), "attribute SMALL must be set in new attributes\n" ); 1081 assert( ((attr & GPT_LOCKED) == 0), "attribute LOCKED must not be set in new attributes\n" ); 1076 1082 1077 1083 // get cluster and local pointer on remote GPT … … 1092 1098 pte1 = hal_remote_l32( XPTR( gpt_cxy , &pt1[ix1] ) ); 1093 1099 1094 if( (pte1 & TSAR_PTE_MAPPED) == 0 ) return; 1095 if( (pte1 & TSAR_PTE_SMALL ) == 0 ) return; 1100 // check MAPPED and SMALL in target PTE1 1101 assert( ((pte1 & GPT_MAPPED) != 0), "attribute MAPPED must be set in target PTE1\n" ); 1102 assert( ((pte1 & GPT_SMALL ) != 0), "attribute SMALL must be set in target PTE1\n" ); 1096 1103 1097 1104 // get PT2 base from PTE1 … … 1099 1106 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1100 1107 1108 // get extended pointer on PTE2 1109 pte2_xp = XPTR( gpt_cxy , &pt2[2*ix2] ); 1110 1111 // check MAPPED in target PTE2 1112 assert( ((hal_remote_l32(pte2_xp) & GPT_MAPPED) != 0), 1113 "attribute MAPPED must be set in target PTE2\n" ); 1114 1101 1115 // set PTE2 in this order 1102 hal_remote_s32( XPTR( gpt_cxy, &pt2[2 * ix2 + 1] ), ppn );1116 hal_remote_s32( pte2_xp , ppn ); 1103 1117 hal_fence(); 1104 hal_remote_s32( XPTR( gpt_cxy, &pt2[2 * ix2] ), tsar_attr );1118 hal_remote_s32( pte2_xp + 4 , tsar_attr ); 1105 1119 hal_fence(); 1106 1120 … … 1110 1124 1111 1125 1112 /* unused until now (march 2019) [AG] 1113 1114 ////////////////////////////////////// 1115 void hal_gpt_reset_range( gpt * gpt, 1116 vpn_t vpn_min, 1117 vpn_t vpn_max ) 1118 { 1119 vpn_t vpn; // current vpn 1120 1121 uint32_t * pt1; // PT1 base address 1122 uint32_t pte1; // PT1 entry value 1123 1124 ppn_t pt2_ppn; // PPN of PT2 1125 uint32_t * pt2; // PT2 base address 1126 1127 uint32_t ix1; // index in PT1 1128 uint32_t ix2; // index in PT2 1129 1130 // get PT1 1131 pt1 = gpt->ptr; 1132 1133 // initialize current index 1134 vpn = vpn_min; 1135 1136 // loop on pages 1137 while( vpn <= vpn_max ) 1138 { 1139 // get ix1 index from vpn 1140 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 1141 1142 // get PTE1 1143 pte1 = pt1[ix1] 1144 1145 if( (pte1 & TSAR_PTE_MAPPED) == 0 ) // PT1[ix1] unmapped 1146 { 1147 // update vpn (next big page) 1148 (vpn = ix1 + 1) << 9; 1149 } 1150 if( (pte1 & TSAR_PTE_SMALL) == 0 ) // it's a PTE1 (big page) 1151 { 1152 // unmap the big page 1153 pt1[ix1] = 0; 1154 hal_fence(); 1155 1156 // update vpn (next big page) 1157 (vpn = ix1 + 1) << 9; 1158 } 1159 else // it's a PTD1 (small page) 1160 { 1161 // compute PT2 base address 1162 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 1163 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1164 1165 // get ix2 index from vpn 1166 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 1167 1168 // unmap the small page 1169 pt2[2*ix2] = 0; 1170 hal_fence(); 1171 1172 // update vpn (next small page) 1173 vpn++; 1174 } 1175 } 1176 } // hal_gpt_reset_range() 1177 */ 1178 1179 1126 -
trunk/hal/tsar_mips32/core/hal_irqmask.c
r457 r632 33 33 __asm__ volatile 34 34 (".set noat \n" 35 "mfc0 $1, $12 \n" 36 "or %0, $0, $1 \n" 35 "mfc0 $1, $12 \n" /* $1 <= c0_sr */ 36 "or %0, $0, $1 \n" /* old <= $1 */ 37 37 "srl $1, $1, 1 \n" 38 "sll $1, $1, 1 \n" 39 "mtc0 $1, $12 \n" 38 "sll $1, $1, 1 \n" /* clear IE bit in $1 */ 39 "mtc0 $1, $12 \n" /* c0_sr <= $1 */ 40 40 ".set at \n" 41 41 : "=&r" (sr) ); … … 51 51 __asm__ volatile 52 52 (".set noat \n" 53 "mfc0 $1, $12 \n" 54 "or %0, $0, $1 \n" 55 "ori $1, $1, 0x FF01 \n"56 "mtc0 $1, $12 \n" 53 "mfc0 $1, $12 \n" /* s1 <= c0_sr */ 54 "or %0, $0, $1 \n" /* old <= $1 */ 55 "ori $1, $1, 0x1 \n" /* set IE bit in $1 */ 56 "mtc0 $1, $12 \n" /* c0_sr <= $1 */ 57 57 ".set at \n" 58 58 : "=&r" (sr) ); -
trunk/hal/tsar_mips32/core/hal_ppm.c
r610 r632 2 2 * hal_ppm.c - Generic Physical Page Manager API implementation for TSAR 3 3 * 4 * Authors Alain Greiner (2016,2017,2018 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 60 60 61 61 // initialize lock protecting the free_pages[] lists 62 busylock_init( &ppm->free_lock, LOCK_PPM_FREE );62 remote_busylock_init( XPTR( local_cxy , &ppm->free_lock ) , LOCK_PPM_FREE ); 63 63 64 64 // initialize lock protecting the dirty_pages list … … 117 117 118 118 // check consistency 119 return ppm_assert_order( ppm);119 return ppm_assert_order(); 120 120 121 121 } // end hal_ppm_init()
Note: See TracChangeset
for help on using the changeset viewer.