Changeset 406 for trunk/hal/tsar_mips32/core/hal_gpt.c
- Timestamp:
- Aug 29, 2017, 12:03:37 PM (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/tsar_mips32/core/hal_gpt.c
r401 r406 131 131 page_t * page; 132 132 xptr_t page_xp; 133 vpn_t vpn; 134 error_t error; 135 uint32_t attr; 136 137 gpt_dmsg("\n[DMSG] %s : core[%x,%d] enter\n", 138 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); 133 139 134 140 // check page size 135 if( CONFIG_PPM_PAGE_SIZE != 4096 ) 136 { 137 printk("\n[PANIC] in %s : For TSAR, the page must be 4 Kbytes\n", __FUNCTION__ ); 138 hal_core_sleep(); 139 } 141 assert( (CONFIG_PPM_PAGE_SIZE == 4096) , __FUNCTION__ , 142 "for TSAR, the page must be 4 Kbytes\n" ); 140 143 141 144 // allocates 2 physical pages for PT1 … … 146 149 page = (page_t *)kmem_alloc( &req ); 147 150 148 if( page == NULL ) 149 { 150 printk("\n[ERROR] in %s : cannot allocate physicalmemory for PT1\n", __FUNCTION__ );151 if( page == NULL ) 152 { 153 printk("\n[ERROR] in %s : cannot allocate memory for PT1\n", __FUNCTION__ ); 151 154 return ENOMEM; 152 155 } 153 156 154 157 // initialize generic page table descriptor … … 159 162 gpt->page = GET_PTR( page_xp ); 160 163 164 // identity map the kentry_vseg (must exist for all processes) 165 attr = GPT_MAPPED | GPT_SMALL | GPT_EXECUTABLE | GPT_CACHABLE | GPT_GLOBAL; 166 for( vpn = CONFIG_VMM_KENTRY_BASE; 167 vpn < (CONFIG_VMM_KENTRY_BASE + CONFIG_VMM_KENTRY_SIZE); vpn++ ) 168 { 169 gpt_dmsg("\n[DMSG] %s : identity map vpn %d\n", __FUNCTION__ , vpn ); 170 171 error = hal_gpt_set_pte( gpt, 172 vpn, 173 (local_cxy<<20) | (vpn & 0xFFFFF), 174 attr ); 175 176 if( error ) 177 { 178 printk("\n[ERROR] in %s : cannot identity map kentry vseg\n", __FUNCTION__ ); 179 return ENOMEM; 180 } 181 } 182 183 gpt_dmsg("\n[DMSG] %s : core[%x,%d] exit\n", 184 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); 185 161 186 return 0; 187 162 188 } // end hal_gpt_create() 163 189 … … 245 271 } // end hal_gpt_destroy() 246 272 247 ///////////////////////////////// 248 void hal_gpt_print( gpt_t * gpt ) 273 //////////////////////////////// 274 void hal_gpt_print( gpt_t * gpt, 275 pid_t pid ) 249 276 { 250 277 uint32_t ix1; … … 256 283 uint32_t pte2_attr; 257 284 ppn_t pte2_ppn; 258 259 printk("*** Page Table for process %x in cluster %x ***\n", 260 CURRENT_THREAD->process->pid , local_cxy ); 285 vpn_t vpn; 286 261 287 262 288 pt1 = (uint32_t *)gpt->ptr; 289 290 printk("\n***** Generic Page Table for process %x : &gpt = %x / &pt1 = %x\n\n", 291 pid , gpt , pt1 ); 263 292 264 293 // scan the PT1 … … 270 299 if( (pte1 & TSAR_MMU_SMALL) == 0 ) // BIG page 271 300 { 272 printk(" - BIG : pt1[%d] = %x\n", ix1 , pte1 ); 301 vpn = ix1 << 9; 302 printk(" - BIG : vpn = %x / pt1[%d] = %X\n", vpn , ix1 , pte1 ); 273 303 } 274 304 else // SMALL pages … … 283 313 pte2_attr = TSAR_MMU_ATTR_FROM_PTE2( pt2[2 * ix2] ); 284 314 pte2_ppn = TSAR_MMU_PPN_FROM_PTE2( pt2[2 * ix2 + 1] ); 315 285 316 if( (pte2_attr & TSAR_MMU_MAPPED) != 0 ) 286 317 { 287 printk(" - SMALL : pt1[%d] = %x / pt2[%d] / pt2[%d]\n", 288 ix1 , pt1[ix1] , 2*ix2 , pte2_attr , 2*ix2+1 , pte2_ppn ); 318 vpn = (ix1 << 9) | ix2; 319 printk(" - SMALL : vpn = %x / PT2[%d] = %x / pt2[%d] = %x\n", 320 vpn , 2*ix2 , pte2_attr , 2*ix2+1 , pte2_ppn ); 289 321 } 290 322 } … … 301 333 uint32_t attr ) // generic GPT attributes 302 334 { 303 uint32_t * pt1; // virtual base addres of PT1304 volatile uint32_t* pte1_ptr; // pointer on PT1 entry335 uint32_t * pt1; // PT1 base addres 336 uint32_t * pte1_ptr; // pointer on PT1 entry 305 337 uint32_t pte1; // PT1 entry value 306 338 307 339 ppn_t pt2_ppn; // PPN of PT2 308 uint32_t * pt2; // virtual base address of PT2340 uint32_t * pt2; // PT2 base address 309 341 310 342 uint32_t small; // requested PTE is for a small page 311 bool_t atomic; 343 bool_t success; // exit condition for while loop below 312 344 313 345 page_t * page; // pointer on new physical page descriptor … … 319 351 uint32_t tsar_attr; // PTE attributes for TSAR MMU 320 352 353 gpt_dmsg("\n[DMSG] %s : core[%x,%d] enter for vpn = %x / ppn = %x / gpt_attr = %x\n", 354 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , ppn , attr ); 355 321 356 // compute indexes in PT1 and PT2 322 357 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); … … 329 364 tsar_attr = gpt2tsar( attr ); 330 365 331 // get PT1 entry value 366 gpt_dmsg("\n[DMSG] %s : core[%x,%d] / vpn = %x / &pt1 = %x / tsar_attr = %x\n", 367 __FUNCTION__, local_cxy , CURRENT_THREAD->core->lid , vpn , pt1 , tsar_attr ); 368 369 // get pointer on PT1[ix1] 332 370 pte1_ptr = &pt1[ix1]; 333 pte1 = *pte1_ptr; 334 335 // Big pages (PTE1) are only set for the kernel vsegs, in the kernel init phase. 371 372 // PTE1 (big page) are only set for the kernel vsegs, in the kernel init phase. 336 373 // There is no risk of concurrent access. 337 374 if( small == 0 ) 338 375 { 339 if( pte1 != 0 )340 { 341 panic("\n[PANIC] in %s : set a big page in a mapped PT1 entry / PT1[%d] = %x\n", 342 __FUNCTION__ , ix1 , pte1 );343 }376 // get current pte1 value 377 pte1 = *pte1_ptr; 378 379 assert( (pte1 == 0) , __FUNCTION__ , 380 "try to set a big page in a mapped PT1 entry / PT1[%d] = %x\n", ix1 , pte1 ); 344 381 345 382 // set the PTE1 … … 352 389 // From this point, the requested PTE is a PTE2 (small page) 353 390 354 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // the PT1 entry is not valid 355 { 356 // allocate one physical page for the PT2 357 kmem_req_t req; 358 req.type = KMEM_PAGE; 359 req.size = 0; // 1 small page 360 req.flags = AF_KERNEL | AF_ZERO; 361 page = (page_t *)kmem_alloc( &req ); 362 if( page == NULL ) 391 // loop to access PTE1 and get pointer on PT2 392 success = false; 393 do 394 { 395 // get current pte1 value 396 pte1 = *pte1_ptr; 397 398 gpt_dmsg("\n[DMSG] %s : core[%x,%d] / vpn = %x / current_pte1 = %x\n", 399 __FUNCTION__, local_cxy , CURRENT_THREAD->core->lid , vpn , pte1 ); 400 401 // allocate a PT2 if PT1 entry not valid 402 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // PT1 entry not valid 403 { 404 // allocate one physical page for the PT2 405 kmem_req_t req; 406 req.type = KMEM_PAGE; 407 req.size = 0; // 1 small page 408 req.flags = AF_KERNEL | AF_ZERO; 409 page = (page_t *)kmem_alloc( &req ); 410 if( page == NULL ) 411 { 412 printk("\n[ERROR] in %s : cannot allocate PT2\n", __FUNCTION__ ); 413 return ENOMEM; 414 } 415 416 // get the PT2 PPN 417 page_xp = XPTR( local_cxy , page ); 418 pt2_ppn = ppm_page2ppn( page_xp ); 419 420 // try to atomicaly set the PT1 entry 421 pte1 = TSAR_MMU_MAPPED | TSAR_MMU_SMALL | pt2_ppn; 422 success = hal_atomic_cas( pte1_ptr , 0 , pte1 ); 423 424 // release allocated PT2 if PT1 entry modified by another thread 425 if( success == false ) ppm_free_pages( page ); 426 } 427 else // PT1 entry is valid 363 428 { 364 printk("\n[ERROR] in %s : try to set a small page but cannot allocate PT2\n", 365 __FUNCTION__ ); 366 return ENOMEM; 429 // This valid entry must be a PTD1 430 assert( (pte1 & TSAR_MMU_SMALL) , __FUNCTION__ , 431 "try to set a small page in a big PT1 entry / PT1[%d] = %x\n", ix1 , pte1 ); 432 433 success = true; 367 434 } 368 435 369 page_xp = XPTR( local_cxy , page ); 370 pt2_ppn = ppm_page2ppn( page_xp ); 371 pt2 = (uint32_t *)GET_PTR( ppm_page2base( page_xp ) ); 372 373 // try to atomicaly set a PTD1 in the PT1 entry 374 do 375 { 376 atomic = hal_atomic_cas( (void*)pte1, 0 , 377 TSAR_MMU_MAPPED | TSAR_MMU_SMALL | pt2_ppn ); 378 } 379 while( (atomic == false) && (*pte1_ptr == 0) ); 380 381 if( atomic == false ) // the mapping has been done by another thread !!! 382 { 383 // release the allocated page 384 ppm_free_pages( page ); 385 386 // read PT1 entry again 387 pte1 = *pte1_ptr; 388 389 // compute PPN of PT2 base 390 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 391 392 // compute pointer on PT2 base 393 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 394 } 395 } 396 else // The PT1 entry is valid 397 { 398 // This valid entry must be a PTD1 399 if( (pte1 & TSAR_MMU_SMALL) == 0 ) 400 { 401 printk("\n[ERROR] in %s : set a small page in a big PT1 entry / PT1[%d] = %x\n", 402 __FUNCTION__ , ix1 , pte1 ); 403 return EINVAL; 404 } 405 406 // compute PPN of PT2 base 407 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 408 409 // compute pointer on PT2 base 410 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 411 } 436 // get PT2 base from pte1 437 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 438 pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 439 440 gpt_dmsg("\n[DMSG] %s : core[%x,%d] / vpn = %x / pte1 = %x / &pt2 = %x\n", 441 __FUNCTION__, local_cxy , CURRENT_THREAD->core->lid , vpn , pte1 , pt2 ); 442 443 } 444 while (success == false); 412 445 413 446 // set PTE2 in this order … … 417 450 hal_fence(); 418 451 452 gpt_dmsg("\n[DMSG] %s : core[%x,%d] exit / vpn = %x / pte2_attr = %x / pte2_ppn = %x\n", 453 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , 454 pt2[2 * ix2] , pt2[2 * ix2 + 1] ); 455 419 456 return 0; 420 457 421 458 } // end of hal_gpt_set_pte() 459 422 460 423 461 /////////////////////////////////////
Note: See TracChangeset
for help on using the changeset viewer.