Changeset 629 for trunk/hal/tsar_mips32/core
- Timestamp:
- May 17, 2019, 9:27:04 AM (6 years ago)
- Location:
- trunk/hal/tsar_mips32/core
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/tsar_mips32/core/hal_gpt.c
r625 r629 38 38 //////////////////////////////////////////////////////////////////////////////////////// 39 39 40 #define TSAR_ MMU_MAPPED 0x8000000041 #define TSAR_ MMU_SMALL 0x4000000042 #define TSAR_ MMU_LOCAL 0x2000000043 #define TSAR_ MMU_REMOTE 0x1000000044 #define TSAR_ MMU_CACHABLE 0x0800000045 #define TSAR_ MMU_WRITABLE 0x0400000046 #define TSAR_ MMU_EXECUTABLE 0x0200000047 #define TSAR_ MMU_USER 0x0100000048 #define TSAR_ MMU_GLOBAL 0x0080000049 #define TSAR_ MMU_DIRTY 0x0040000050 51 #define TSAR_ MMU_COW 0x00000001 // only for small pages52 #define TSAR_ MMU_SWAP 0x00000004 // only for small pages53 #define TSAR_ MMU_LOCKED 0x00000008 // only for small pages40 #define TSAR_PTE_MAPPED 0x80000000 41 #define TSAR_PTE_SMALL 0x40000000 42 #define TSAR_PTE_LOCAL 0x20000000 43 #define TSAR_PTE_REMOTE 0x10000000 44 #define TSAR_PTE_CACHABLE 0x08000000 45 #define TSAR_PTE_WRITABLE 0x04000000 46 #define TSAR_PTE_EXECUTABLE 0x02000000 47 #define TSAR_PTE_USER 0x01000000 48 #define TSAR_PTE_GLOBAL 0x00800000 49 #define TSAR_PTE_DIRTY 0x00400000 50 51 #define TSAR_PTE_COW 0x00000001 // only for small pages 52 #define TSAR_PTE_SWAP 0x00000004 // only for small pages 53 #define TSAR_PTE_LOCKED 0x00000008 // only for small pages 54 54 55 55 //////////////////////////////////////////////////////////////////////////////////////// … … 85 85 uint32_t tsar_attr = 0; 86 86 87 if( gpt_attr & GPT_MAPPED ) tsar_attr |= TSAR_ MMU_MAPPED;88 if( gpt_attr & GPT_SMALL ) tsar_attr |= TSAR_ MMU_SMALL;89 if( gpt_attr & GPT_WRITABLE ) tsar_attr |= TSAR_ MMU_WRITABLE;90 if( gpt_attr & GPT_EXECUTABLE ) tsar_attr |= TSAR_ MMU_EXECUTABLE;91 if( gpt_attr & GPT_CACHABLE ) tsar_attr |= TSAR_ MMU_CACHABLE;92 if( gpt_attr & GPT_USER ) tsar_attr |= TSAR_ MMU_USER;93 if( gpt_attr & GPT_DIRTY ) tsar_attr |= TSAR_ MMU_DIRTY;94 if( gpt_attr & GPT_ACCESSED ) tsar_attr |= TSAR_ MMU_LOCAL;95 if( gpt_attr & GPT_GLOBAL ) tsar_attr |= TSAR_ MMU_GLOBAL;96 if( gpt_attr & GPT_COW ) tsar_attr |= TSAR_ MMU_COW;97 if( gpt_attr & GPT_SWAP ) tsar_attr |= TSAR_ MMU_SWAP;98 if( gpt_attr & GPT_LOCKED ) tsar_attr |= TSAR_ MMU_LOCKED;87 if( gpt_attr & GPT_MAPPED ) tsar_attr |= TSAR_PTE_MAPPED; 88 if( gpt_attr & GPT_SMALL ) tsar_attr |= TSAR_PTE_SMALL; 89 if( gpt_attr & GPT_WRITABLE ) tsar_attr |= TSAR_PTE_WRITABLE; 90 if( gpt_attr & GPT_EXECUTABLE ) tsar_attr |= TSAR_PTE_EXECUTABLE; 91 if( gpt_attr & GPT_CACHABLE ) tsar_attr |= TSAR_PTE_CACHABLE; 92 if( gpt_attr & GPT_USER ) tsar_attr |= TSAR_PTE_USER; 93 if( gpt_attr & GPT_DIRTY ) tsar_attr |= TSAR_PTE_DIRTY; 94 if( gpt_attr & GPT_ACCESSED ) tsar_attr |= TSAR_PTE_LOCAL; 95 if( gpt_attr & GPT_GLOBAL ) tsar_attr |= TSAR_PTE_GLOBAL; 96 if( gpt_attr & GPT_COW ) tsar_attr |= TSAR_PTE_COW; 97 if( gpt_attr & GPT_SWAP ) tsar_attr |= TSAR_PTE_SWAP; 98 if( gpt_attr & GPT_LOCKED ) tsar_attr |= TSAR_PTE_LOCKED; 99 99 100 100 return tsar_attr; … … 108 108 uint32_t gpt_attr = 0; 109 109 110 if( tsar_attr & TSAR_ MMU_MAPPED ) gpt_attr |= GPT_MAPPED;111 if( tsar_attr & TSAR_ MMU_MAPPED ) gpt_attr |= GPT_READABLE;112 if( tsar_attr & TSAR_ MMU_SMALL ) gpt_attr |= GPT_SMALL;113 if( tsar_attr & TSAR_ MMU_WRITABLE ) gpt_attr |= GPT_WRITABLE;114 if( tsar_attr & TSAR_ MMU_EXECUTABLE ) gpt_attr |= GPT_EXECUTABLE;115 if( tsar_attr & TSAR_ MMU_CACHABLE ) gpt_attr |= GPT_CACHABLE;116 if( tsar_attr & TSAR_ MMU_USER ) gpt_attr |= GPT_USER;117 if( tsar_attr & TSAR_ MMU_DIRTY ) gpt_attr |= GPT_DIRTY;118 if( tsar_attr & TSAR_ MMU_LOCAL ) gpt_attr |= GPT_ACCESSED;119 if( tsar_attr & TSAR_ MMU_REMOTE ) gpt_attr |= GPT_ACCESSED;120 if( tsar_attr & TSAR_ MMU_GLOBAL ) gpt_attr |= GPT_GLOBAL;121 if( tsar_attr & TSAR_ MMU_COW ) gpt_attr |= GPT_COW;122 if( tsar_attr & TSAR_ MMU_SWAP ) gpt_attr |= GPT_SWAP;123 if( tsar_attr & TSAR_ MMU_LOCKED ) gpt_attr |= GPT_LOCKED;110 if( tsar_attr & TSAR_PTE_MAPPED ) gpt_attr |= GPT_MAPPED; 111 if( tsar_attr & TSAR_PTE_MAPPED ) gpt_attr |= GPT_READABLE; 112 if( tsar_attr & TSAR_PTE_SMALL ) gpt_attr |= GPT_SMALL; 113 if( tsar_attr & TSAR_PTE_WRITABLE ) gpt_attr |= GPT_WRITABLE; 114 if( tsar_attr & TSAR_PTE_EXECUTABLE ) gpt_attr |= GPT_EXECUTABLE; 115 if( tsar_attr & TSAR_PTE_CACHABLE ) gpt_attr |= GPT_CACHABLE; 116 if( tsar_attr & TSAR_PTE_USER ) gpt_attr |= GPT_USER; 117 if( tsar_attr & TSAR_PTE_DIRTY ) gpt_attr |= GPT_DIRTY; 118 if( tsar_attr & TSAR_PTE_LOCAL ) gpt_attr |= GPT_ACCESSED; 119 if( tsar_attr & TSAR_PTE_REMOTE ) gpt_attr |= GPT_ACCESSED; 120 if( tsar_attr & TSAR_PTE_GLOBAL ) gpt_attr |= GPT_GLOBAL; 121 if( tsar_attr & TSAR_PTE_COW ) gpt_attr |= GPT_COW; 122 if( tsar_attr & TSAR_PTE_SWAP ) gpt_attr |= GPT_SWAP; 123 if( tsar_attr & TSAR_PTE_LOCKED ) gpt_attr |= GPT_LOCKED; 124 124 125 125 return gpt_attr; … … 184 184 uint32_t * pt2; 185 185 uint32_t attr; 186 vpn_t vpn;187 186 kmem_req_t req; 188 bool_t is_ref;189 187 190 188 #if DEBUG_HAL_GPT_DESTROY … … 196 194 #endif 197 195 198 // get pointer on calling process199 process_t * process = CURRENT_THREAD->process;200 201 // compute is_ref202 is_ref = ( GET_CXY( process->ref_xp ) == local_cxy );203 204 196 // get pointer on PT1 205 197 pt1 = (uint32_t *)gpt->ptr; … … 209 201 { 210 202 pte1 = pt1[ix1]; 211 if( (pte1 & TSAR_MMU_MAPPED) != 0 ) // PTE1 valid 203 204 if( (pte1 & TSAR_PTE_MAPPED) != 0 ) // PTE1 mapped 212 205 { 213 if( (pte1 & TSAR_ MMU_SMALL) == 0 ) // BIG page206 if( (pte1 & TSAR_PTE_SMALL) == 0 ) // BIG page 214 207 { 215 if( (pte1 & TSAR_MMU_USER) != 0 ) 216 { 217 // warning message 218 printk("\n[WARNING] in %s : found an USER BIG page / ix1 = %d\n", 219 __FUNCTION__ , ix1 ); 220 221 // release the big physical page if reference cluster 222 if( is_ref ) 223 { 224 vpn = (vpn_t)(ix1 << TSAR_MMU_IX2_WIDTH); 225 hal_gpt_reset_pte( gpt , vpn ); 226 } 227 } 208 printk("\n[WARNING] in %s : mapped big page / ix1 %x\n", 209 __FUNCTION__ , ix1 ); 228 210 } 229 else // SMALL page211 else // PT2 exist 230 212 { 231 213 // get local pointer on PT2 … … 234 216 pt2 = GET_PTR( base_xp ); 235 217 236 // scan the PT2 to release all entries VALID and USER if reference cluster237 if( is_ref)218 // scan the PT2 219 for( ix2 = 0 ; ix2 < 512 ; ix2++ ) 238 220 { 239 for( ix2 = 0 ; ix2 < 512 ; ix2++ ) 221 attr = TSAR_MMU_ATTR_FROM_PTE2( pt2[2 * ix2] ); 222 223 if( (attr & TSAR_PTE_MAPPED) != 0 ) // PTE2 mapped 240 224 { 241 attr = TSAR_MMU_ATTR_FROM_PTE2( pt2[2 * ix2] ); 242 if( ((attr & TSAR_MMU_MAPPED) != 0 ) && ((attr & TSAR_MMU_USER) != 0) ) 243 { 244 // release the physical page 245 vpn = (vpn_t)((ix1 << TSAR_MMU_IX2_WIDTH) | ix2); 246 hal_gpt_reset_pte( gpt , vpn ); 247 } 225 printk("\n[WARNING] in %s : mapped small page / ix1 %x / ix2 %x\n", 226 __FUNCTION__ , ix1, ix2 ); 248 227 } 249 228 } 250 229 251 // release the PT2230 // release the page allocated for the PT2 252 231 req.type = KMEM_PAGE; 253 232 req.ptr = GET_PTR( ppm_base2page( XPTR(local_cxy , pt2 ) ) ); … … 271 250 } // end hal_gpt_destroy() 272 251 273 /////////////////////////////////////////// 274 void hal_gpt_display( process_t * process ) 252 /* 253 254 ///////////////////////////////////////////////////////////////////////////////////// 255 // This static function can be used for debug. 256 ///////////////////////////////////////////////////////////////////////////////////// 257 static void hal_gpt_display( process_t * process ) 275 258 { 276 259 gpt_t * gpt; … … 301 284 { 302 285 pte1 = pt1[ix1]; 303 if( (pte1 & TSAR_ MMU_MAPPED) != 0 )286 if( (pte1 & TSAR_PTE_MAPPED) != 0 ) 304 287 { 305 if( (pte1 & TSAR_ MMU_SMALL) == 0 ) // BIG page288 if( (pte1 & TSAR_PTE_SMALL) == 0 ) // BIG page 306 289 { 307 290 vpn = ix1 << 9; … … 320 303 pte2_ppn = TSAR_MMU_PPN_FROM_PTE2( pt2[2 * ix2 + 1] ); 321 304 322 if( (pte2_attr & TSAR_ MMU_MAPPED) != 0 )305 if( (pte2_attr & TSAR_PTE_MAPPED) != 0 ) 323 306 { 324 307 vpn = (ix1 << 9) | ix2; … … 332 315 } // end hal_gpt_display() 333 316 334 335 ////////////////////////////////////////// 336 error_t hal_gpt_set_pte( xptr_t gpt_xp, 337 vpn_t vpn, 338 uint32_t attr, // GPT attributes 339 ppn_t ppn ) 340 { 341 cxy_t gpt_cxy; // target GPT cluster 342 gpt_t * gpt_ptr; // target GPT local pointer 343 uint32_t * pt1_ptr; // local pointer on PT1 344 xptr_t pte1_xp; // extended pointer on PT1 entry 345 uint32_t pte1; // PT1 entry value if PTE1 346 347 ppn_t pt2_ppn; // PPN of PT2 348 uint32_t * pt2_ptr; // PT2 base address 349 350 uint32_t small; // requested PTE is for a small page 351 352 page_t * page; // pointer on new physical page descriptor 353 xptr_t page_xp; // extended pointer on new page descriptor 354 355 uint32_t ix1; // index in PT1 356 uint32_t ix2; // index in PT2 357 358 uint32_t tsar_attr; // PTE attributes for TSAR MMU 359 360 thread_t * this = CURRENT_THREAD; 361 362 // get cluster and local pointer on GPT 363 gpt_cxy = GET_CXY( gpt_xp ); 364 gpt_ptr = GET_PTR( gpt_xp ); 365 366 #if DEBUG_HAL_GPT_SET_PTE 367 uint32_t cycle = (uint32_t)hal_get_cycles(); 368 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 369 printk("\n[%s] : thread[%x,%x] enter / vpn %x / attr %x / ppn %x / cluster %x / cycle %d\n", 370 __FUNCTION__, this->process->pid, this->trdid, vpn, attr, ppn, gpt_cxy, cycle ); 371 #endif 372 373 // compute indexes in PT1 and PT2 374 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 375 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 376 377 pt1_ptr = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 378 small = attr & GPT_SMALL; 379 380 // compute tsar attributes from generic attributes 381 tsar_attr = gpt2tsar( attr ); 382 383 // build extended pointer on PTE1 = PT1[ix1] 384 pte1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] ); 317 */ 318 319 ///////////////////////////////////////////////////////////////////////////////////////// 320 // This static function check that a PTE1 entry, in the PT1 of a possibly remote GPT, 321 // identified by the <pte1_xp> argument is mapped. If this entry is not mapped, 322 // it allocates a - local or remote - PT2, updates the PTE1 value in PT1, and 323 // returns the PTE1 value in the <pte1> buffer. 324 // It uses the TSR_MMU_LOCKED attribute in PTE1 to handle possible concurrent 325 // mappings of the missing PTE1: 326 // - If the PTE1 is unmapped and unlocked => it tries to atomically lock this PTE1, 327 // and map it if lock is successful. 328 // - If the PTE1 is unmapped but locked => it poll the PTE1 value, unti the mapping 329 // is done by the other thread. 330 // - If the PTE1 is already mapped => it does nothing 331 // It returns an error if it cannot allocate memory fot a new PT2. 332 ///////////////////////////////////////////////////////////////////////////////////////// 333 static error_t hal_gpt_allocate_pt2( xptr_t pte1_xp, 334 uint32_t * pte1_value ) 335 { 336 cxy_t gpt_cxy; // target GPT cluster = GET_CXY( pte1_xp ); 337 uint32_t pte1; // PTE1 value 338 ppn_t pt2_ppn; // PPN of page containing the new PT2 339 bool_t atomic; 340 page_t * page; 341 xptr_t page_xp; 342 343 // get GPT cluster identifier 344 gpt_cxy = GET_CXY( pte1_xp ); 385 345 386 346 // get current pte1 value 387 347 pte1 = hal_remote_l32( pte1_xp ); 388 348 389 if( small == 0 ) // map a big page in PT1 390 { 391 392 // check PT1 entry not mapped 393 assert( (pte1 == 0) , "try to set a big page in a mapped PT1 entry\n" ); 394 395 // check VPN aligned 396 assert( (ix2 == 0) , "illegal vpn for a big page\n" ); 397 398 // check PPN aligned 399 assert( ((ppn & 0x1FF) == 0) , "illegal ppn for a big page\n" ); 400 401 // set the PTE1 value in PT1 402 pte1 = (tsar_attr & TSAR_MMU_PTE1_ATTR_MASK) | ((ppn >> 9) & TSAR_MMU_PTE1_PPN_MASK); 403 hal_remote_s32( pte1_xp , pte1 ); 404 hal_fence(); 405 406 #if DEBUG_HAL_GPT_SET_PTE 407 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 408 printk("\n[%s] : thread[%x,%x] map PTE1 / cxy %x / ix1 %x / pt1 %x / pte1 %x\n", 409 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, pte1 ); 410 #endif 411 412 return 0; 413 } 414 else // map a small page in PT1 & PT2 415 { 416 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // PT1 entry unmapped => map it 417 { 349 if( ((pte1 & TSAR_PTE_MAPPED) == 0) && // PTE1 unmapped and unlocked 350 ((pte1 & TSAR_PTE_LOCKED) == 0) ) // try to allocate a new PT2 351 { 352 // atomically lock the PTE1 to prevent concurrent PTE1 mappings 353 atomic = hal_remote_atomic_cas( pte1_xp, 354 pte1, 355 pte1 | TSAR_PTE_LOCKED ); 356 357 if( atomic ) // PTE1 successfully locked 358 { 418 359 // allocate one physical page for PT2 419 360 if( gpt_cxy == local_cxy ) … … 430 371 } 431 372 432 if( page == NULL ) 433 { 434 printk("\n[PANIC] in %s : no memory for GPT PT2 / process %x / cluster %x\n", 435 __FUNCTION__, this->process->pid, gpt_cxy ); 436 return ENOMEM; 437 } 373 if( page == NULL ) return -1; 438 374 439 375 // get the PT2 PPN … … 441 377 pt2_ppn = ppm_page2ppn( page_xp ); 442 378 443 // build PTD1 value444 pte1 = TSAR_ MMU_MAPPED | TSAR_MMU_SMALL | pt2_ppn;379 // build PTE1 value 380 pte1 = TSAR_PTE_MAPPED | TSAR_PTE_SMALL | pt2_ppn; 445 381 446 382 // set the PTD1 value in PT1 447 383 hal_remote_s32( pte1_xp , pte1 ); 384 hal_fence(); 385 386 #if DEBUG_HAL_GPT_ALLOCATE_PT2 387 thread_t * this = CURRENT_THREAD; 388 uint32_t cycle = (uint32_t)hal_get_cycles(); 389 if( DEBUG_HAL_GPT_ALLOCATE_PT2 < cycle ) 390 printk("\n[%s] : thread[%x,%x] map PTE1 / cxy %x / ix1 %d / pt1 %x / ptd1 %x\n", 391 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, pte1 ); 392 #endif 393 } 394 else 395 { 396 // poll PTE1 until mapped by another thread 397 while( (pte1 & TSAR_PTE_MAPPED) == 0 ) pte1 = hal_remote_l32( pte1_xp ); 398 } 399 } 400 else if( ((pte1 & TSAR_PTE_MAPPED) == 0) && 401 ((pte1 & TSAR_PTE_LOCKED) != 0) ) 402 { 403 // poll PTE1 until mapped by another thread 404 while( (pte1 & TSAR_PTE_MAPPED) == 0 ) pte1 = hal_remote_l32( pte1_xp ); 405 } 406 else // PTE1 mapped => just use it 407 { 408 409 #if DEBUG_HAL_GPT_ALLOCATE_PT2 410 thread_t * this = CURRENT_THREAD; 411 uint32_t cycle = (uint32_t)hal_get_cycles(); 412 if( DEBUG_HAL_GPT_ALLOCATE_PT2 < cycle ) 413 printk("\n[%s] : thread[%x,%x] PTE1 mapped / cxy %x / ix1 %d / pt1 %x / ptd1 %x\n", 414 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, pte1 ); 415 #endif 416 417 } 418 419 *pte1_value = pte1; 420 return 0; 421 422 } // end hal_gpt_allocate_pt2 423 424 425 426 427 //////////////////////////////////////////// 428 error_t hal_gpt_lock_pte( xptr_t gpt_xp, 429 vpn_t vpn, 430 uint32_t * attr, 431 ppn_t * ppn ) 432 { 433 error_t error; 434 uint32_t * pt1_ptr; // local pointer on PT1 base 435 xptr_t pte1_xp; // extended pointer on PT1[x1] entry 436 uint32_t pte1; // value of PT1[x1] entry 437 438 ppn_t pt2_ppn; // PPN of page containing PT2 439 uint32_t * pt2_ptr; // local pointer on PT2 base 440 xptr_t pte2_attr_xp; // extended pointer on PT2[ix2].attr 441 uint32_t pte2_attr; // PT2[ix2].attr current value 442 xptr_t pte2_ppn_xp; // extended pointer on PT2[ix2].ppn 443 uint32_t pte2_ppn; // PT2[ix2].ppn current value 444 bool_t atomic; 445 446 // get cluster and local pointer on GPT 447 cxy_t gpt_cxy = GET_CXY( gpt_xp ); 448 gpt_t * gpt_ptr = GET_PTR( gpt_xp ); 449 450 // get indexes in PTI & PT2 451 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); // index in PT1 452 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); // index in PT2 453 454 // get local pointer on PT1 455 pt1_ptr = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 456 457 // build extended pointer on PTE1 == PT1[ix1] 458 pte1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] ); 459 460 // get PTE1 value from PT1 461 // allocate a new PT2 for this PTE1 if required 462 error = hal_gpt_allocate_pt2( pte1_xp , &pte1 ); 463 464 if( error ) 465 { 466 printk("\n[ERROR] in %s : cannot allocate memory for PT2\n", __FUNCTION__ ); 467 return -1; 468 } 469 470 if( (pte1 & TSAR_PTE_SMALL) == 0 ) 471 { 472 printk("\n[ERROR] in %s : cannot lock a small page\n", __FUNCTION__ ); 473 return -1; 474 } 475 476 // get pointer on PT2 base from PTE1 477 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 478 pt2_ptr = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 479 480 // build extended pointers on PT2[ix2].attr and PT2[ix2].ppn 481 pte2_attr_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2] ); 482 pte2_ppn_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2 + 1] ); 483 484 // wait until PTE2 unlocked, get PTE2.attr and set lock 485 do 486 { 487 // busy waiting until TSAR_MMU_LOCK == 0 488 do 489 { 490 pte2_attr = hal_remote_l32( pte2_attr_xp ); 491 } 492 while( (pte2_attr & TSAR_PTE_LOCKED) != 0 ); 493 494 // try to atomically set the TSAR_MMU_LOCK attribute 495 atomic = hal_remote_atomic_cas( pte2_attr_xp, 496 pte2_attr, 497 (pte2_attr | TSAR_PTE_LOCKED) ); 498 } 499 while( atomic == 0 ); 500 501 // get PTE2.ppn 502 pte2_ppn = hal_remote_l32( pte2_ppn_xp ); 503 504 #if DEBUG_HAL_GPT_LOCK_PTE 505 thread_t * this = CURRENT_THREAD; 506 uint32_t cycle = (uint32_t)hal_get_cycles(); 507 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 508 printk("\n[%s] : thread[%x,%x] locks vpn %x / attr %x / ppn %x / cluster %x / cycle %d\n", 509 __FUNCTION__, this->process->pid, this->trdid, vpn, attr, ppn, gpt_cxy, cycle ); 510 #endif 511 512 // return PPN and GPT attributes 513 *ppn = hal_remote_l32( pte2_ppn_xp ) & ((1<<TSAR_MMU_PPN_WIDTH)-1); 514 *attr = tsar2gpt( pte2_attr ); 515 return 0; 516 517 } // end hal_gpt_lock_pte() 518 519 //////////////////////////////////////// 520 void hal_gpt_unlock_pte( xptr_t gpt_xp, 521 vpn_t vpn ) 522 { 523 uint32_t * pt1_ptr; // local pointer on PT1 base 524 xptr_t pte1_xp; // extended pointer on PT1[ix1] 525 uint32_t pte1; // value of PT1[ix1] entry 526 527 ppn_t pt2_ppn; // PPN of page containing PT2 528 uint32_t * pt2_ptr; // PT2 base address 529 uint32_t pte2_attr_xp; // extended pointer on PT2[ix2].attr 530 531 uint32_t attr; // PTE2 attribute 532 533 // get cluster and local pointer on GPT 534 cxy_t gpt_cxy = GET_CXY( gpt_xp ); 535 gpt_t * gpt_ptr = GET_PTR( gpt_xp ); 536 537 // compute indexes in P1 and PT2 538 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); // index in PT1 539 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); // index in PT2 540 541 // get local pointer on PT1 542 pt1_ptr = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 543 544 // build extended pointer on PTE1 == PT1[ix1] 545 pte1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] ); 546 547 // get current pte1 value 548 pte1 = hal_remote_l32( pte1_xp ); 549 550 // check PTE1 attributes 551 assert( (((pte1 & TSAR_PTE_MAPPED) != 0) && ((pte1 & TSAR_PTE_SMALL) != 0)), 552 "try to unlock a big or unmapped PTE1\n"); 553 554 // get pointer on PT2 base from PTE1 555 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 556 pt2_ptr = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 557 558 // build extended pointers on PT2[ix2].attr 559 pte2_attr_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2] ); 560 561 // get PT2[ix2].attr 562 attr = hal_remote_l32( pte2_attr_xp ); 563 564 // reset TSAR_MMU_LOCK attribute 565 hal_remote_s32( pte2_attr_xp , attr & ~TSAR_PTE_LOCKED ); 566 567 #if DEBUG_HAL_GPT_LOCK_PTE 568 thread_t * this = CURRENT_THREAD; 569 uint32_t cycle = (uint32_t)hal_get_cycles(); 570 if( DEBUG_HAL_GPT_LOCK_PTE < cycle ) 571 printk("\n[%s] : thread[%x,%x] unlocks vpn %x / attr %x / ppn %x / cluster %x / cycle %d\n", 572 __FUNCTION__, this->process->pid, this->trdid, vpn, attr, ppn, gpt_cxy, cycle ); 573 #endif 574 575 } // end hal_gpt_unlock_pte() 576 577 /////////////////////////////////////// 578 void hal_gpt_set_pte( xptr_t gpt_xp, 579 vpn_t vpn, 580 uint32_t attr, 581 ppn_t ppn ) 582 { 583 cxy_t gpt_cxy; // target GPT cluster 584 gpt_t * gpt_ptr; // target GPT local pointer 585 586 uint32_t * pt1_ptr; // local pointer on PT1 base 587 xptr_t pte1_xp; // extended pointer on PT1 entry 588 uint32_t pte1; // PT1 entry value if PTE1 589 590 ppn_t pt2_ppn; // PPN of PT2 591 uint32_t * pt2_ptr; // local pointer on PT2 base 592 xptr_t pte2_attr_xp; // extended pointer on PT2[ix2].attr 593 xptr_t pte2_ppn_xp; // extended pointer on PT2[ix2].ppn 594 uint32_t pte2_attr; // current value of PT2[ix2].attr 595 596 uint32_t ix1; // index in PT1 597 uint32_t ix2; // index in PT2 598 599 uint32_t tsar_attr; // PTE attributes for TSAR MMU 600 uint32_t small; // requested PTE is for a small page 601 602 // get cluster and local pointer on GPT 603 gpt_cxy = GET_CXY( gpt_xp ); 604 gpt_ptr = GET_PTR( gpt_xp ); 605 606 // compute indexes in PT1 and PT2 607 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 608 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 609 610 pt1_ptr = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 611 small = attr & GPT_SMALL; 612 613 // compute tsar attributes from generic attributes 614 tsar_attr = gpt2tsar( attr ); 615 616 // build extended pointer on PTE1 = PT1[ix1] 617 pte1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] ); 618 619 // get current pte1 value 620 pte1 = hal_remote_l32( pte1_xp ); 621 622 if( small == 0 ) ///////////////// map a big page in PT1 623 { 624 625 // check PT1 entry not mapped 626 assert( (pte1 == 0) , "try to set a big page in an already mapped PTE1\n" ); 627 628 // check VPN aligned 629 assert( (ix2 == 0) , "illegal vpn for a big page\n" ); 630 631 // check PPN aligned 632 assert( ((ppn & 0x1FF) == 0) , "illegal ppn for a big page\n" ); 633 634 // set the PTE1 value in PT1 635 pte1 = (tsar_attr & TSAR_MMU_PTE1_ATTR_MASK) | ((ppn >> 9) & TSAR_MMU_PTE1_PPN_MASK); 636 hal_remote_s32( pte1_xp , pte1 ); 637 hal_fence(); 448 638 449 639 #if DEBUG_HAL_GPT_SET_PTE 640 thread_t * this = CURRENT_THREAD; 641 uint32_t cycle = (uint32_t)hal_get_cycles(); 450 642 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 451 printk("\n[%s] : thread[%x,%x] map PT D1 / cxy %x / ix1 %d / pt1 %x / ptd1 %x\n",643 printk("\n[%s] : thread[%x,%x] map PTE1 / cxy %x / ix1 %x / pt1 %x / pte1 %x\n", 452 644 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, pte1 ); 453 645 #endif 454 } 455 else // pt1 entry mapped => use it 456 { 457 458 #if DEBUG_HAL_GPT_SET_PTE 459 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 460 printk("\n[%s] : thread[%x,%x] get PTD1 / cxy %x / ix1 %d / pt1 %x / ptd1 %x\n", 461 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, pte1 ); 462 #endif 463 464 } 465 466 // get PT2 base from pte1 646 647 } 648 else ///////////////// map a small page in PT2 649 { 650 651 // PTE1 must be mapped because PTE2 must be locked 652 assert( (pte1 & TSAR_PTE_MAPPED), "PTE1 must be mapped\n" ); 653 654 // get PT2 base from PTE1 467 655 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 468 656 pt2_ptr = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 469 657 658 // build extended pointers on PT2[ix2].attr and PT2[ix2].ppn 659 pte2_attr_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2] ); 660 pte2_ppn_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2 + 1] ); 661 662 // get current value of PTE2.attr 663 pte2_attr = hal_remote_l32( pte2_attr_xp ); 664 665 // PTE2 must be locked 666 assert( (pte2_attr & TSAR_PTE_LOCKED), "PTE2 must be locked\n" ); 667 470 668 // set PTE2 in PT2 (in this order) 471 hal_remote_s32( XPTR( gpt_cxy , &pt2_ptr[2 * ix2 + 1] ), ppn );669 hal_remote_s32( pte2_ppn_xp , ppn ); 472 670 hal_fence(); 473 hal_remote_s32( XPTR( gpt_cxy , &pt2_ptr[2 * ix2] ), tsar_attr );671 hal_remote_s32( pte2_attr_xp , tsar_attr ); 474 672 hal_fence(); 475 673 476 674 #if DEBUG_HAL_GPT_SET_PTE 675 thread_t * this = CURRENT_THREAD; 676 uint32_t cycle = (uint32_t)hal_get_cycles(); 477 677 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 478 678 printk("\n[%s] : thread[%x,%x] map PTE2 / cxy %x / ix2 %x / pt2 %x / attr %x / ppn %x\n", … … 480 680 #endif 481 681 482 return 0;483 682 } 484 683 } // end of hal_gpt_set_pte() 684 685 /////////////////////////////////////// 686 void hal_gpt_reset_pte( xptr_t gpt_xp, 687 vpn_t vpn ) 688 { 689 cxy_t gpt_cxy; // target GPT cluster 690 gpt_t * gpt_ptr; // target GPT local pointer 691 692 uint32_t ix1; // index in PT1 693 uint32_t ix2; // index in PT2 694 695 uint32_t * pt1_ptr; // PT1 base address 696 xptr_t pte1_xp; // extended pointer on PT1[ix1] 697 uint32_t pte1; // PT1 entry value 698 699 ppn_t pt2_ppn; // PPN of PT2 700 uint32_t * pt2_ptr; // PT2 base address 701 xptr_t pte2_attr_xp; // extended pointer on PT2[ix2].attr 702 xptr_t pte2_ppn_xp; // extended pointer on PT2[ix2].ppn 703 uint32_t pte2_attr; // current value of PT2[ix2].attr 704 705 // get cluster and local pointer on GPT 706 gpt_cxy = GET_CXY( gpt_xp ); 707 gpt_ptr = GET_PTR( gpt_xp ); 708 709 // get ix1 & ix2 indexes 710 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 711 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 712 713 // get local pointer on PT1 base 714 pt1_ptr = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 715 716 // build extended pointer on PTE1 = PT1[ix1] 717 pte1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] ); 718 719 // get current PTE1 value 720 pte1 = hal_remote_l32( pte1_xp ); 721 722 if( (pte1 & TSAR_PTE_MAPPED) == 0 ) // PTE1 unmapped => do nothing 723 { 724 return; 725 } 726 727 if( (pte1 & TSAR_PTE_SMALL) == 0 ) // it's a PTE1 => unmap it from PT1 728 { 729 hal_remote_s32( pte1_xp , 0 ); 730 hal_fence(); 731 732 #if DEBUG_HAL_GPT_RESET_PTE 733 thread_t * this = CURRENT_THREAD; 734 uint32_t cycle = (uint32_t)hal_get_cycles(); 735 if( DEBUG_HAL_GPT_RESET_PTE < cycle ) 736 printk("\n[%s] : thread[%x,%x] unmap PTE1 / cxy %x / vpn %x / ix1 %x\n", 737 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, vpn, ix1 ); 738 #endif 739 740 return; 741 } 742 else // it's a PTE2 => unmap it from PT2 743 { 744 // compute PT2 base address 745 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 746 pt2_ptr = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 747 748 // build extended pointer on PT2[ix2].attr and PT2[ix2].ppn 749 pte2_attr_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2] ); 750 pte2_ppn_xp = XPTR( gpt_cxy , &pt2_ptr[2 * ix2 + 1] ); 751 752 // unmap the PTE2 753 hal_remote_s32( pte2_attr_xp , 0 ); 754 hal_fence(); 755 hal_remote_s32( pte2_ppn_xp , 0 ); 756 hal_fence(); 757 758 #if DEBUG_HAL_GPT_RESET_PTE 759 thread_t * this = CURRENT_THREAD; 760 uint32_t cycle = (uint32_t)hal_get_cycles(); 761 if( DEBUG_HAL_GPT_RESET_PTE < cycle ) 762 printk("\n[%s] : thread[%x,%x] unmap PTE2 / cxy %x / vpn %x / ix2 %x\n", 763 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, vpn, ix2 ); 764 #endif 765 766 return; 767 } 768 } // end hal_gpt_reset_pte() 485 769 486 770 //////////////////////////////////////// … … 490 774 ppn_t * ppn ) 491 775 { 492 uint32_t * pt1; 493 uint32_t pte1; 494 495 uint32_t * pt2; 496 ppn_t pt2_ppn; 776 uint32_t * pt1; // local pointer on PT1 base 777 uint32_t pte1; // PTE1 value 778 779 uint32_t * pt2; // local pointer on PT2 base 780 ppn_t pt2_ppn; // PPN of page containing the PT2 781 xptr_t pte2_attr_xp; // extended pointer on PT2[ix2].attr 782 xptr_t pte2_ppn_xp; // extended pointer on PT2[ix2].ppn 783 uint32_t pte2_attr; // current value of PT2[ix2].attr 784 ppn_t pte2_ppn; // current value of PT2[ix2].ppn 497 785 498 786 // get cluster and local pointer on GPT … … 505 793 506 794 // get PT1 base 507 pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) );795 pt1 = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 508 796 509 797 // get pte1 … … 511 799 512 800 // check PTE1 mapped 513 if( (pte1 & TSAR_ MMU_MAPPED) == 0 ) // PT1 entry not present801 if( (pte1 & TSAR_PTE_MAPPED) == 0 ) // PTE1 unmapped 514 802 { 515 803 *attr = 0; … … 519 807 520 808 // access GPT 521 if( (pte1 & TSAR_ MMU_SMALL) == 0 ) // it's a PTE1809 if( (pte1 & TSAR_PTE_SMALL) == 0 ) // it's a PTE1 522 810 { 523 811 // get PPN & ATTR from PT1 … … 525 813 *ppn = TSAR_MMU_PPN_FROM_PTE1( pte1 ) | (vpn & ((1<<TSAR_MMU_IX2_WIDTH)-1)); 526 814 } 527 else // it's a PT D1815 else // it's a PTE2 528 816 { 529 817 // compute PT2 base address … … 531 819 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 532 820 533 // get PPN & ATTR from PT2 534 *ppn = hal_remote_l32( XPTR( gpt_cxy , &pt2[2*ix2+1] ) ) & ((1<<TSAR_MMU_PPN_WIDTH)-1); 535 *attr = tsar2gpt( hal_remote_l32( XPTR( gpt_cxy , &pt2[2*ix2] ) ) ); 821 // build extended pointer on PT2[ix2].attr and PT2[ix2].ppn 822 pte2_attr_xp = XPTR( gpt_cxy , &pt2[2 * ix2] ); 823 pte2_ppn_xp = XPTR( gpt_cxy , &pt2[2 * ix2 + 1] ); 824 825 // get current value of PTE2.attr & PTE2.ppn 826 pte2_attr = hal_remote_l32( pte2_attr_xp ); 827 pte2_ppn = hal_remote_l32( pte2_ppn_xp ); 828 829 // return PPN & GPT attributes 830 *ppn = pte2_ppn & ((1<<TSAR_MMU_PPN_WIDTH)-1); 831 *attr = tsar2gpt( pte2_attr ); 536 832 } 537 833 } // end hal_gpt_get_pte() 538 834 539 //////////////////////////////////// 540 void hal_gpt_reset_pte( gpt_t * gpt, 541 vpn_t vpn ) 542 { 543 uint32_t * pt1; // PT1 base address 544 uint32_t pte1; // PT1 entry value 545 546 ppn_t pt2_ppn; // PPN of PT2 547 uint32_t * pt2; // PT2 base address 548 549 // get ix1 & ix2 indexes 550 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 551 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 552 553 // get PTE1 value 554 pt1 = gpt->ptr; 555 pte1 = pt1[ix1]; 556 557 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // PT1 entry not present 558 { 559 return; 560 } 561 562 if( (pte1 & TSAR_MMU_SMALL) == 0 ) // it's a PTE1 563 { 564 // unmap the big page 565 pt1[ix1] = 0; 566 hal_fence(); 567 568 return; 569 } 570 else // it's a PTD1 571 { 572 // compute PT2 base address 573 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 574 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 835 836 /////////////////////////////////////////// 837 error_t hal_gpt_pte_copy( gpt_t * dst_gpt, 838 vpn_t dst_vpn, 839 xptr_t src_gpt_xp, 840 vpn_t src_vpn, 841 bool_t cow, 842 ppn_t * ppn, 843 bool_t * mapped ) 844 { 845 uint32_t src_ix1; // index in SRC PT1 846 uint32_t src_ix2; // index in SRC PT2 847 848 uint32_t dst_ix1; // index in DST PT1 849 uint32_t dst_ix2; // index in DST PT2 850 851 cxy_t src_cxy; // SRC GPT cluster 852 gpt_t * src_gpt; // SRC GPT local pointer 853 854 uint32_t * src_pt1; // local pointer on SRC PT1 855 uint32_t * dst_pt1; // local pointer on DST PT1 856 uint32_t * src_pt2; // local pointer on SRC PT2 857 uint32_t * dst_pt2; // local pointer on DST PT2 858 859 kmem_req_t req; // for PT2 allocation 860 861 uint32_t src_pte1; 862 uint32_t dst_pte1; 863 864 uint32_t src_pte2_attr; 865 uint32_t src_pte2_ppn; 866 867 page_t * page; 868 xptr_t page_xp; 869 870 ppn_t src_pt2_ppn; 871 ppn_t dst_pt2_ppn; 872 873 // get remote src_gpt cluster and local pointer 874 src_cxy = GET_CXY( src_gpt_xp ); 875 src_gpt = GET_PTR( src_gpt_xp ); 876 877 #if DEBUG_HAL_GPT_COPY 878 uint32_t cycle = (uint32_t)hal_get_cycles(); 879 thread_t * this = CURRENT_THREAD; 880 if( DEBUG_HAL_GPT_COPY < cycle ) 881 printk("\n[%s] : thread[%x,%x] enter / src_cxy %x / dst_cxy %x / cycle %d\n", 882 __FUNCTION__, this->process->pid, this->trdid, src_cxy, local_cxy, cycle ); 883 #endif 884 885 // get remote src_gpt cluster and local pointer 886 src_cxy = GET_CXY( src_gpt_xp ); 887 src_gpt = GET_PTR( src_gpt_xp ); 888 889 // get remote src_pt1 and local dst_pt1 890 src_pt1 = (uint32_t *)hal_remote_lpt( XPTR( src_cxy , &src_gpt->ptr ) ); 891 dst_pt1 = (uint32_t *)dst_gpt->ptr; 892 893 // check src_pt1 and dst_pt1 existence 894 assert( (src_pt1 != NULL) , "src_pt1 does not exist\n"); 895 assert( (dst_pt1 != NULL) , "dst_pt1 does not exist\n"); 896 897 // compute SRC indexes 898 src_ix1 = TSAR_MMU_IX1_FROM_VPN( src_vpn ); 899 src_ix2 = TSAR_MMU_IX2_FROM_VPN( src_vpn ); 900 901 // compute DST indexes 902 dst_ix1 = TSAR_MMU_IX1_FROM_VPN( dst_vpn ); 903 dst_ix2 = TSAR_MMU_IX2_FROM_VPN( dst_vpn ); 904 905 // get src_pte1 906 src_pte1 = hal_remote_l32( XPTR( src_cxy , &src_pt1[src_ix1] ) ); 907 908 // do nothing if src_pte1 not MAPPED or not SMALL 909 if( (src_pte1 & TSAR_PTE_MAPPED) && (src_pte1 & TSAR_PTE_SMALL) ) 910 { 911 // get dst_pt1 entry 912 dst_pte1 = dst_pt1[dst_ix1]; 913 914 // map dst_pte1 if required 915 if( (dst_pte1 & TSAR_PTE_MAPPED) == 0 ) 916 { 917 // allocate one physical page for a new PT2 918 req.type = KMEM_PAGE; 919 req.size = 0; // 1 small page 920 req.flags = AF_KERNEL | AF_ZERO; 921 page = (page_t *)kmem_alloc( &req ); 922 923 if( page == NULL ) 924 { 925 printk("\n[ERROR] in %s : cannot allocate PT2\n", __FUNCTION__ ); 926 return -1; 927 } 928 929 // build extended pointer on page descriptor 930 page_xp = XPTR( local_cxy , page ); 931 932 // get PPN for this new PT2 933 dst_pt2_ppn = (ppn_t)ppm_page2ppn( page_xp ); 934 935 // build the new dst_pte1 936 dst_pte1 = TSAR_PTE_MAPPED | TSAR_PTE_SMALL | dst_pt2_ppn; 937 938 // register it in DST_GPT 939 dst_pt1[dst_ix1] = dst_pte1; 940 } 941 942 // get pointer on src_pt2 943 src_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( src_pte1 ); 944 src_pt2 = GET_PTR( ppm_ppn2base( src_pt2_ppn ) ); 945 946 // get pointer on dst_pt2 947 dst_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( dst_pte1 ); 948 dst_pt2 = GET_PTR( ppm_ppn2base( dst_pt2_ppn ) ); 949 950 // get attr and ppn from SRC_PT2 951 src_pte2_attr = hal_remote_l32( XPTR( src_cxy , &src_pt2[2 * src_ix2] ) ); 952 src_pte2_ppn = hal_remote_l32( XPTR( src_cxy , &src_pt2[2 * src_ix2 + 1] ) ); 953 954 // do nothing if src_pte2 not MAPPED 955 if( (src_pte2_attr & TSAR_PTE_MAPPED) != 0 ) 956 { 957 // set PPN in DST PTE2 958 dst_pt2[2 * dst_ix2 + 1] = src_pte2_ppn; 959 960 // set attributes in DST PTE2 961 if( cow && (src_pte2_attr & TSAR_PTE_WRITABLE) ) 962 { 963 dst_pt2[2 * dst_ix2] = (src_pte2_attr | TSAR_PTE_COW) & (~TSAR_PTE_WRITABLE); 964 } 965 else 966 { 967 dst_pt2[2 * dst_ix2] = src_pte2_attr; 968 } 969 970 // return "successfully copied" 971 *mapped = true; 972 *ppn = src_pte2_ppn; 575 973 576 // unmap the small page 577 pt2[2*ix2] = 0; 578 hal_fence(); 579 580 return; 581 } 582 } // end hal_gpt_reset_pte() 974 #if DEBUG_HAL_GPT_COPY 975 cycle = (uint32_t)hal_get_cycles; 976 if( DEBUG_HAL_GPT_COPY < cycle ) 977 printk("\n[%s] : thread[%x,%x] exit / copy done for src_vpn %x / dst_vpn %x / cycle %d\n", 978 __FUNCTION__, this->process->pid, this->trdid, src_vpn, dst_vpn, cycle ); 979 #endif 980 981 hal_fence(); 982 983 return 0; 984 } // end if PTE2 mapped 985 } // end if PTE1 mapped 986 987 // return "nothing done" 988 *mapped = false; 989 *ppn = 0; 990 991 #if DEBUG_HAL_GPT_COPY 992 cycle = (uint32_t)hal_get_cycles; 993 if( DEBUG_HAL_GPT_COPY < cycle ) 994 printk("\n[%s] : thread[%x,%x] exit / nothing done / cycle %d\n", 995 __FUNCTION__, this->process->pid, this->trdid, cycle ); 996 #endif 997 998 hal_fence(); 999 1000 return 0; 1001 1002 } // end hal_gpt_pte_copy() 1003 1004 ///////////////////////////////////////// 1005 void hal_gpt_set_cow( xptr_t gpt_xp, 1006 vpn_t vpn_base, 1007 vpn_t vpn_size ) 1008 { 1009 cxy_t gpt_cxy; 1010 gpt_t * gpt_ptr; 1011 1012 vpn_t vpn; 1013 1014 uint32_t ix1; 1015 uint32_t ix2; 1016 1017 uint32_t * pt1; 1018 uint32_t pte1; 1019 1020 uint32_t * pt2; 1021 ppn_t pt2_ppn; 1022 uint32_t attr; 1023 1024 // get GPT cluster and local pointer 1025 gpt_cxy = GET_CXY( gpt_xp ); 1026 gpt_ptr = GET_PTR( gpt_xp ); 1027 1028 // get local PT1 pointer 1029 pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 1030 1031 // loop on pages 1032 for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) 1033 { 1034 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 1035 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 1036 1037 // get PTE1 value 1038 pte1 = hal_remote_l32( XPTR( gpt_cxy , &pt1[ix1] ) ); 1039 1040 // only MAPPED & SMALL PTEs are modified 1041 if( (pte1 & TSAR_PTE_MAPPED) && (pte1 & TSAR_PTE_SMALL) ) 1042 { 1043 // compute PT2 base address 1044 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 1045 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1046 1047 assert( (GET_CXY( ppm_ppn2base( pt2_ppn ) ) == gpt_cxy ), 1048 "PT2 and PT1 must be in the same cluster\n"); 1049 1050 // get current PTE2 attributes 1051 attr = hal_remote_l32( XPTR( gpt_cxy , &pt2[2*ix2] ) ); 1052 1053 // only MAPPED PTEs are modified 1054 if( attr & TSAR_PTE_MAPPED ) 1055 { 1056 attr = (attr | TSAR_PTE_COW) & (~TSAR_PTE_WRITABLE); 1057 hal_remote_s32( XPTR( gpt_cxy , &pt2[2*ix2] ) , attr ); 1058 } 1059 } 1060 } // end loop on pages 1061 1062 } // end hal_gpt_set_cow() 1063 1064 ////////////////////////////////////////// 1065 void hal_gpt_update_pte( xptr_t gpt_xp, 1066 vpn_t vpn, 1067 uint32_t attr, // generic GPT attributes 1068 ppn_t ppn ) 1069 { 1070 uint32_t * pt1; // PT1 base addres 1071 uint32_t pte1; // PT1 entry value 1072 1073 ppn_t pt2_ppn; // PPN of PT2 1074 uint32_t * pt2; // PT2 base address 1075 1076 uint32_t ix1; // index in PT1 1077 uint32_t ix2; // index in PT2 1078 1079 uint32_t tsar_attr; // PTE attributes for TSAR MMU 1080 1081 // check attr argument MAPPED and SMALL 1082 if( (attr & GPT_MAPPED) == 0 ) return; 1083 if( (attr & GPT_SMALL ) == 0 ) return; 1084 1085 // get cluster and local pointer on remote GPT 1086 cxy_t gpt_cxy = GET_CXY( gpt_xp ); 1087 gpt_t * gpt_ptr = GET_PTR( gpt_xp ); 1088 1089 // compute indexes in PT1 and PT2 1090 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 1091 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 1092 1093 // get PT1 base 1094 pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 1095 1096 // compute tsar_attr from generic attributes 1097 tsar_attr = gpt2tsar( attr ); 1098 1099 // get PTE1 value 1100 pte1 = hal_remote_l32( XPTR( gpt_cxy , &pt1[ix1] ) ); 1101 1102 if( (pte1 & TSAR_PTE_MAPPED) == 0 ) return; 1103 if( (pte1 & TSAR_PTE_SMALL ) == 0 ) return; 1104 1105 // get PT2 base from PTE1 1106 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 1107 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1108 1109 // set PTE2 in this order 1110 hal_remote_s32( XPTR( gpt_cxy, &pt2[2 * ix2 + 1] ) , ppn ); 1111 hal_fence(); 1112 hal_remote_s32( XPTR( gpt_cxy, &pt2[2 * ix2] ) , tsar_attr ); 1113 hal_fence(); 1114 1115 } // end hal_gpt_update_pte() 1116 1117 583 1118 584 1119 … … 616 1151 pte1 = pt1[ix1] 617 1152 618 if( (pte1 & TSAR_ MMU_MAPPED) == 0 ) // PT1[ix1] unmapped1153 if( (pte1 & TSAR_PTE_MAPPED) == 0 ) // PT1[ix1] unmapped 619 1154 { 620 1155 // update vpn (next big page) 621 1156 (vpn = ix1 + 1) << 9; 622 1157 } 623 if( (pte1 & TSAR_ MMU_SMALL) == 0 ) // it's a PTE1 (big page)1158 if( (pte1 & TSAR_PTE_SMALL) == 0 ) // it's a PTE1 (big page) 624 1159 { 625 1160 // unmap the big page … … 650 1185 */ 651 1186 652 ////////////////////////////////////// 653 error_t hal_gpt_lock_pte( gpt_t * gpt, 654 vpn_t vpn ) 655 { 656 uint32_t * pt1; // PT1 base address 657 volatile uint32_t * pte1_ptr; // address of PT1 entry 658 uint32_t pte1; // value of PT1 entry 659 660 uint32_t * pt2; // PT2 base address 661 ppn_t pt2_ppn; // PPN of PT2 page if missing PT2 662 volatile uint32_t * pte2_ptr; // address of PT2 entry 663 664 uint32_t attr; 665 bool_t atomic; 666 page_t * page; 667 xptr_t page_xp; 668 669 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); // index in PT1 670 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); // index in PT2 671 672 // get the PTE1 value 673 pt1 = gpt->ptr; 674 pte1_ptr = &pt1[ix1]; 675 pte1 = *pte1_ptr; 676 677 // If present, the page must be small 678 if( ((pte1 & TSAR_MMU_MAPPED) != 0) && ((pte1 & TSAR_MMU_SMALL) == 0) ) 679 { 680 printk("\n[ERROR] in %s : try to lock a big page / PT1[%d] = %x\n", 681 __FUNCTION__ , ix1 , pte1 ); 682 return EINVAL; 683 } 684 685 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // missing PT1 entry 686 { 687 // allocate one physical page for PT2 688 kmem_req_t req; 689 req.type = KMEM_PAGE; 690 req.size = 0; // 1 small page 691 req.flags = AF_KERNEL | AF_ZERO; 692 page = (page_t *)kmem_alloc( &req ); 693 694 if( page == NULL ) 695 { 696 printk("\n[ERROR] in %s : try to set a small page but cannot allocate PT2\n", 697 __FUNCTION__ ); 698 return ENOMEM; 699 } 700 701 page_xp = XPTR( local_cxy , page ); 702 pt2_ppn = ppm_page2ppn( page_xp ); 703 pt2 = GET_PTR( ppm_page2base( page_xp ) ); 704 705 // try to set the PT1 entry 706 do 707 { 708 atomic = hal_atomic_cas( (void*)pte1_ptr , 0 , 709 TSAR_MMU_MAPPED | TSAR_MMU_SMALL | pt2_ppn ); 710 } 711 while( (atomic == false) && (*pte1_ptr == 0) ); 712 713 if( atomic == false ) // missing PT2 has been allocate by another core 714 { 715 // release the allocated page 716 ppm_free_pages( page ); 717 718 // read again the PTE1 719 pte1 = *pte1_ptr; 720 721 // get the PT2 base address 722 pt2_ppn = TSAR_MMU_PPN_FROM_PTE1( pte1 ); 723 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 724 } 725 } 726 else 727 { 728 // This valid entry must be a PTD1 729 if( (pte1 & TSAR_MMU_SMALL) == 0 ) 730 { 731 printk("\n[ERROR] in %s : set a small page in a big PT1 entry / PT1[%d] = %x\n", 732 __FUNCTION__ , ix1 , pte1 ); 733 return EINVAL; 734 } 735 736 // compute PPN of PT2 base 737 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 738 739 // compute pointer on PT2 base 740 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 741 } 742 743 // from here we have the PT2 pointer 744 745 // compute pointer on PTE2 746 pte2_ptr = &pt2[2 * ix2]; 747 748 // try to atomically lock the PTE2 until success 749 do 750 { 751 // busy waiting until TSAR_MMU_LOCK == 0 752 do 753 { 754 attr = *pte2_ptr; 755 hal_rdbar(); 756 } 757 while( (attr & TSAR_MMU_LOCKED) != 0 ); 758 759 atomic = hal_atomic_cas( (void*)pte2_ptr, attr , (attr | TSAR_MMU_LOCKED) ); 760 } 761 while( atomic == 0 ); 762 763 return 0; 764 765 } // end hal_gpt_lock_pte() 766 767 //////////////////////////////////////// 768 error_t hal_gpt_unlock_pte( gpt_t * gpt, 769 vpn_t vpn ) 770 { 771 uint32_t * pt1; // PT1 base address 772 uint32_t pte1; // value of PT1 entry 773 774 uint32_t * pt2; // PT2 base address 775 ppn_t pt2_ppn; // PPN of PT2 page if missing PT2 776 uint32_t * pte2_ptr; // address of PT2 entry 777 778 uint32_t attr; // PTE2 attribute 779 780 // compute indexes in P1 and PT2 781 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); // index in PT1 782 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); // index in PT2 783 784 // get pointer on PT1 base 785 pt1 = (uint32_t*)gpt->ptr; 786 787 // get PTE1 788 pte1 = pt1[ix1]; 789 790 // check PTE1 present and small page 791 if( ((pte1 & TSAR_MMU_MAPPED) == 0) || ((pte1 & TSAR_MMU_SMALL) == 0) ) 792 { 793 printk("\n[ERROR] in %s : try to unlock a big or undefined page / PT1[%d] = %x\n", 794 __FUNCTION__ , ix1 , pte1 ); 795 return EINVAL; 796 } 797 798 // get pointer on PT2 base 799 pt2_ppn = TSAR_MMU_PPN_FROM_PTE1( pte1 ); 800 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 801 802 // get pointer on PTE2 803 pte2_ptr = &pt2[2 * ix2]; 804 805 // get PTE2_ATTR 806 attr = *pte2_ptr; 807 808 // check PTE2 present and locked 809 if( ((attr & TSAR_MMU_MAPPED) == 0) || ((attr & TSAR_MMU_LOCKED) == 0) ) 810 { 811 printk("\n[ERROR] in %s : unlock an unlocked/unmapped page / PT1[%d] = %x\n", 812 __FUNCTION__ , ix1 , pte1 ); 813 return EINVAL; 814 } 815 816 // reset GPT_LOCK 817 *pte2_ptr = attr & ~TSAR_MMU_LOCKED; 818 819 return 0; 820 821 } // end hal_gpt_unlock_pte() 822 823 /////////////////////////////////////////// 824 error_t hal_gpt_pte_copy( gpt_t * dst_gpt, 825 vpn_t dst_vpn, 826 xptr_t src_gpt_xp, 827 vpn_t src_vpn, 828 bool_t cow, 829 ppn_t * ppn, 830 bool_t * mapped ) 831 { 832 uint32_t src_ix1; // index in SRC PT1 833 uint32_t src_ix2; // index in SRC PT2 834 835 uint32_t dst_ix1; // index in DST PT1 836 uint32_t dst_ix2; // index in DST PT2 837 838 cxy_t src_cxy; // SRC GPT cluster 839 gpt_t * src_gpt; // SRC GPT local pointer 840 841 uint32_t * src_pt1; // local pointer on SRC PT1 842 uint32_t * dst_pt1; // local pointer on DST PT1 843 uint32_t * src_pt2; // local pointer on SRC PT2 844 uint32_t * dst_pt2; // local pointer on DST PT2 845 846 kmem_req_t req; // for PT2 allocation 847 848 uint32_t src_pte1; 849 uint32_t dst_pte1; 850 851 uint32_t src_pte2_attr; 852 uint32_t src_pte2_ppn; 853 854 page_t * page; 855 xptr_t page_xp; 856 857 ppn_t src_pt2_ppn; 858 ppn_t dst_pt2_ppn; 859 860 // get remote src_gpt cluster and local pointer 861 src_cxy = GET_CXY( src_gpt_xp ); 862 src_gpt = GET_PTR( src_gpt_xp ); 863 864 #if DEBUG_HAL_GPT_COPY 865 uint32_t cycle = (uint32_t)hal_get_cycles(); 866 thread_t * this = CURRENT_THREAD; 867 if( DEBUG_HAL_GPT_COPY < cycle ) 868 printk("\n[%s] : thread[%x,%x] enter / src_cxy %x / dst_cxy %x / cycle %d\n", 869 __FUNCTION__, this->process->pid, this->trdid, src_cxy, local_cxy, cycle ); 870 #endif 871 872 // get remote src_gpt cluster and local pointer 873 src_cxy = GET_CXY( src_gpt_xp ); 874 src_gpt = GET_PTR( src_gpt_xp ); 875 876 // get remote src_pt1 and local dst_pt1 877 src_pt1 = (uint32_t *)hal_remote_lpt( XPTR( src_cxy , &src_gpt->ptr ) ); 878 dst_pt1 = (uint32_t *)dst_gpt->ptr; 879 880 // check src_pt1 and dst_pt1 existence 881 assert( (src_pt1 != NULL) , "src_pt1 does not exist\n"); 882 assert( (dst_pt1 != NULL) , "dst_pt1 does not exist\n"); 883 884 // compute SRC indexes 885 src_ix1 = TSAR_MMU_IX1_FROM_VPN( src_vpn ); 886 src_ix2 = TSAR_MMU_IX2_FROM_VPN( src_vpn ); 887 888 // compute DST indexes 889 dst_ix1 = TSAR_MMU_IX1_FROM_VPN( dst_vpn ); 890 dst_ix2 = TSAR_MMU_IX2_FROM_VPN( dst_vpn ); 891 892 // get src_pte1 893 src_pte1 = hal_remote_l32( XPTR( src_cxy , &src_pt1[src_ix1] ) ); 894 895 // do nothing if src_pte1 not MAPPED or not SMALL 896 if( (src_pte1 & TSAR_MMU_MAPPED) && (src_pte1 & TSAR_MMU_SMALL) ) 897 { 898 // get dst_pt1 entry 899 dst_pte1 = dst_pt1[dst_ix1]; 900 901 // map dst_pte1 if required 902 if( (dst_pte1 & TSAR_MMU_MAPPED) == 0 ) 903 { 904 // allocate one physical page for a new PT2 905 req.type = KMEM_PAGE; 906 req.size = 0; // 1 small page 907 req.flags = AF_KERNEL | AF_ZERO; 908 page = (page_t *)kmem_alloc( &req ); 909 910 if( page == NULL ) 911 { 912 printk("\n[ERROR] in %s : cannot allocate PT2\n", __FUNCTION__ ); 913 return -1; 914 } 915 916 // build extended pointer on page descriptor 917 page_xp = XPTR( local_cxy , page ); 918 919 // get PPN for this new PT2 920 dst_pt2_ppn = (ppn_t)ppm_page2ppn( page_xp ); 921 922 // build the new dst_pte1 923 dst_pte1 = TSAR_MMU_MAPPED | TSAR_MMU_SMALL | dst_pt2_ppn; 924 925 // register it in DST_GPT 926 dst_pt1[dst_ix1] = dst_pte1; 927 } 928 929 // get pointer on src_pt2 930 src_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( src_pte1 ); 931 src_pt2 = GET_PTR( ppm_ppn2base( src_pt2_ppn ) ); 932 933 // get pointer on dst_pt2 934 dst_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( dst_pte1 ); 935 dst_pt2 = GET_PTR( ppm_ppn2base( dst_pt2_ppn ) ); 936 937 // get attr and ppn from SRC_PT2 938 src_pte2_attr = hal_remote_l32( XPTR( src_cxy , &src_pt2[2 * src_ix2] ) ); 939 src_pte2_ppn = hal_remote_l32( XPTR( src_cxy , &src_pt2[2 * src_ix2 + 1] ) ); 940 941 // do nothing if src_pte2 not MAPPED 942 if( (src_pte2_attr & TSAR_MMU_MAPPED) != 0 ) 943 { 944 // set PPN in DST PTE2 945 dst_pt2[2 * dst_ix2 + 1] = src_pte2_ppn; 946 947 // set attributes in DST PTE2 948 if( cow && (src_pte2_attr & TSAR_MMU_WRITABLE) ) 949 { 950 dst_pt2[2 * dst_ix2] = (src_pte2_attr | TSAR_MMU_COW) & (~TSAR_MMU_WRITABLE); 951 } 952 else 953 { 954 dst_pt2[2 * dst_ix2] = src_pte2_attr; 955 } 956 957 // return "successfully copied" 958 *mapped = true; 959 *ppn = src_pte2_ppn; 960 961 #if DEBUG_HAL_GPT_COPY 962 cycle = (uint32_t)hal_get_cycles; 963 if( DEBUG_HAL_GPT_COPY < cycle ) 964 printk("\n[%s] : thread[%x,%x] exit / copy done for src_vpn %x / dst_vpn %x / cycle %d\n", 965 __FUNCTION__, this->process->pid, this->trdid, src_vpn, dst_vpn, cycle ); 966 #endif 967 968 hal_fence(); 969 970 return 0; 971 } // end if PTE2 mapped 972 } // end if PTE1 mapped 973 974 // return "nothing done" 975 *mapped = false; 976 *ppn = 0; 977 978 #if DEBUG_HAL_GPT_COPY 979 cycle = (uint32_t)hal_get_cycles; 980 if( DEBUG_HAL_GPT_COPY < cycle ) 981 printk("\n[%s] : thread[%x,%x] exit / nothing done / cycle %d\n", 982 __FUNCTION__, this->process->pid, this->trdid, cycle ); 983 #endif 984 985 hal_fence(); 986 987 return 0; 988 989 } // end hal_gpt_pte_copy() 990 991 ////////////////////////////////////////// 992 bool_t hal_gpt_pte_is_mapped( gpt_t * gpt, 993 vpn_t vpn ) 994 { 995 uint32_t * pt1; 996 uint32_t pte1; 997 uint32_t pte2_attr; 998 999 uint32_t * pt2; 1000 ppn_t pt2_ppn; 1001 1002 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 1003 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 1004 1005 // get PTE1 value 1006 pt1 = gpt->ptr; 1007 pte1 = pt1[ix1]; 1008 1009 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) return false; 1010 1011 if( (pte1 & TSAR_MMU_SMALL) == 0 ) return false; 1012 1013 // compute PT2 base address 1014 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 1015 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1016 1017 // get pte2_attr 1018 pte2_attr = pt2[2*ix2]; 1019 1020 if( (pte2_attr & TSAR_MMU_MAPPED) == 0 ) return false; 1021 else return true; 1022 1023 } // end hal_gpt_pte_is_mapped() 1024 1025 /////////////////////////////////////// 1026 bool_t hal_gpt_pte_is_cow( gpt_t * gpt, 1027 vpn_t vpn ) 1028 { 1029 uint32_t * pt1; 1030 uint32_t pte1; 1031 uint32_t pte2_attr; 1032 1033 uint32_t * pt2; 1034 ppn_t pt2_ppn; 1035 1036 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 1037 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 1038 1039 // get PTE1 value 1040 pt1 = gpt->ptr; 1041 pte1 = pt1[ix1]; 1042 1043 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) return false; 1044 1045 if( (pte1 & TSAR_MMU_SMALL) == 0 ) return false; 1046 1047 // compute PT2 base address 1048 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 1049 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1050 1051 // get pte2_attr 1052 pte2_attr = pt2[2*ix2]; 1053 1054 if( (pte2_attr & TSAR_MMU_MAPPED) == 0 ) return false; 1055 1056 if( (pte2_attr & TSAR_MMU_COW) == 0 ) return false; 1057 else return true; 1058 1059 } // end hal_gpt_pte_is_cow() 1060 1061 ///////////////////////////////////////// 1062 void hal_gpt_set_cow( xptr_t gpt_xp, 1063 vpn_t vpn_base, 1064 vpn_t vpn_size ) 1065 { 1066 cxy_t gpt_cxy; 1067 gpt_t * gpt_ptr; 1068 1069 vpn_t vpn; 1070 1071 uint32_t ix1; 1072 uint32_t ix2; 1073 1074 uint32_t * pt1; 1075 uint32_t pte1; 1076 1077 uint32_t * pt2; 1078 ppn_t pt2_ppn; 1079 uint32_t attr; 1080 1081 // get GPT cluster and local pointer 1082 gpt_cxy = GET_CXY( gpt_xp ); 1083 gpt_ptr = GET_PTR( gpt_xp ); 1084 1085 // get local PT1 pointer 1086 pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 1087 1088 // loop on pages 1089 for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) 1090 { 1091 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 1092 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 1093 1094 // get PTE1 value 1095 pte1 = hal_remote_l32( XPTR( gpt_cxy , &pt1[ix1] ) ); 1096 1097 // only MAPPED & SMALL PTEs are modified 1098 if( (pte1 & TSAR_MMU_MAPPED) && (pte1 & TSAR_MMU_SMALL) ) 1099 { 1100 // compute PT2 base address 1101 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 1102 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1103 1104 assert( (GET_CXY( ppm_ppn2base( pt2_ppn ) ) == gpt_cxy ), 1105 "PT2 and PT1 must be in the same cluster\n"); 1106 1107 // get current PTE2 attributes 1108 attr = hal_remote_l32( XPTR( gpt_cxy , &pt2[2*ix2] ) ); 1109 1110 // only MAPPED PTEs are modified 1111 if( attr & TSAR_MMU_MAPPED ) 1112 { 1113 attr = (attr | TSAR_MMU_COW) & (~TSAR_MMU_WRITABLE); 1114 hal_remote_s32( XPTR( gpt_cxy , &pt2[2*ix2] ) , attr ); 1115 } 1116 } 1117 } // end loop on pages 1118 1119 } // end hal_gpt_set_cow() 1120 1121 ////////////////////////////////////////// 1122 void hal_gpt_update_pte( xptr_t gpt_xp, 1123 vpn_t vpn, 1124 uint32_t attr, // generic GPT attributes 1125 ppn_t ppn ) 1126 { 1127 uint32_t * pt1; // PT1 base addres 1128 uint32_t pte1; // PT1 entry value 1129 1130 ppn_t pt2_ppn; // PPN of PT2 1131 uint32_t * pt2; // PT2 base address 1132 1133 uint32_t ix1; // index in PT1 1134 uint32_t ix2; // index in PT2 1135 1136 uint32_t tsar_attr; // PTE attributes for TSAR MMU 1137 1138 // check attr argument MAPPED and SMALL 1139 if( (attr & GPT_MAPPED) == 0 ) return; 1140 if( (attr & GPT_SMALL ) == 0 ) return; 1141 1142 // get cluster and local pointer on remote GPT 1143 cxy_t gpt_cxy = GET_CXY( gpt_xp ); 1144 gpt_t * gpt_ptr = GET_PTR( gpt_xp ); 1145 1146 // compute indexes in PT1 and PT2 1147 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 1148 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 1149 1150 // get PT1 base 1151 pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 1152 1153 // compute tsar_attr from generic attributes 1154 tsar_attr = gpt2tsar( attr ); 1155 1156 // get PTE1 value 1157 pte1 = hal_remote_l32( XPTR( gpt_cxy , &pt1[ix1] ) ); 1158 1159 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) return; 1160 if( (pte1 & TSAR_MMU_SMALL ) == 0 ) return; 1161 1162 // get PT2 base from PTE1 1163 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 1164 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1165 1166 // set PTE2 in this order 1167 hal_remote_s32( XPTR( gpt_cxy, &pt2[2 * ix2 + 1] ) , ppn ); 1168 hal_fence(); 1169 hal_remote_s32( XPTR( gpt_cxy, &pt2[2 * ix2] ) , tsar_attr ); 1170 hal_fence(); 1171 1172 } // end hal_gpt_update_pte() 1173 1187 -
trunk/hal/tsar_mips32/core/hal_vmm.c
r625 r629 81 81 uint32_t ppn = cxy << 20; 82 82 83 // register PTE1 in slot[0] of kernel GPT 84 error = hal_gpt_set_pte( XPTR( cxy , gpt ) , 0 , attr , ppn ); 85 86 if( error ) 87 { 88 printk("\n[PANIC] in %s : cannot initialize kernel GPT in cluster %x\n", 89 __FUNCTION__ , cxy ); 90 hal_core_sleep(); 91 } 83 // set PT1[0] 84 hal_gpt_set_pte( XPTR( cxy , gpt ) , 0 , attr , ppn ); 92 85 93 86 #if DEBUG_HAL_VMM … … 159 152 160 153 // update user GPT : set PTE1 in slot[0] 161 error = hal_gpt_set_pte( u_gpt_xp , 0 , attr , ppn ); 162 163 if( error ) 164 { 165 printk("\n[ERROR] in %s : cannot update user GPT in cluster %x\n", 166 __FUNCTION__ , cxy ); 167 return -1; 168 } 154 hal_gpt_set_pte( u_gpt_xp , 0 , attr , ppn ); 169 155 170 156 #if DEBUG_HAL_VMM … … 220 206 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 221 207 222 // build extended pointer s on TXT0 lock, GPTlock and VSL lock208 // build extended pointer on TXT0 lock and VSL lock 223 209 xptr_t txt_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 224 210 xptr_t vsl_lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 225 xptr_t gpt_lock_xp = XPTR( local_cxy , &vmm->gpt_lock );226 211 227 212 // get root of vsegs list … … 230 215 // get the locks protecting TXT0, VSL, and GPT 231 216 remote_rwlock_rd_acquire( vsl_lock_xp ); 232 remote_rwlock_rd_acquire( gpt_lock_xp );233 217 remote_busylock_acquire( txt_lock_xp ); 234 218 … … 291 275 // release locks 292 276 remote_busylock_release( txt_lock_xp ); 293 remote_rwlock_rd_release( gpt_lock_xp );294 277 remote_rwlock_rd_release( vsl_lock_xp ); 295 278
Note: See TracChangeset
for help on using the changeset viewer.