Changeset 587
- Timestamp:
- Nov 1, 2018, 12:39:27 PM (6 years ago)
- Location:
- trunk/hal
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/generic/hal_atomic.h
r505 r587 60 60 ***************************************************************************************** 61 61 * @ ptr : pointer on the shared variable (signed or unsigned) 62 * @ val : signedvalue to add62 * @ val : value to add 63 63 * @ return shared variable value before add 64 64 ****************************************************************************************/ -
trunk/hal/generic/hal_exception.h
r480 r587 35 35 // => The hal_do_exception() function call the generic vmm_handle_page_fault(), 36 36 // or the fpu_handle_exception() function, and the calling thread resumes execution 37 // when the exception has beenhandled.37 // when the exception can be sucessfully handled. 38 38 // 39 39 // - USER_ERROR : exceptions such a "illegal vaddr" or "illegal write access" are fatal. … … 51 51 52 52 /***************************************************************************************** 53 * This enum defines the global exception types after analysis by the exception handler. 54 ****************************************************************************************/ 55 56 typedef enum 57 { 58 EXCP_NON_FATAL, 59 EXCP_USER_ERROR, 60 EXCP_KERNEL_PANIC, 61 } 62 exception_handling_type_t; 63 64 65 /***************************************************************************************** 53 66 * This function is called by the hal_kentry() function when an exception is detected by 54 67 * the hardware for a given thread running on a given core. -
trunk/hal/generic/hal_gpt.h
r457 r587 126 126 127 127 /**************************************************************************************** 128 * This function map a local GPT entry identified by its VPN, from values defined129 * by the ppn and attr arguments. It allocates physical memory for the local generic130 * page table itselfif required.128 * This function map a - local or remote - GPT entry identified by its VPN, from values 129 * defined by the <ppn> and <attr> arguments. It allocates physical memory in remote 130 * cluster for the GPT PT2, using a RPC_PMEM_GET_PAGES, if required. 131 131 **************************************************************************************** 132 132 * @ gpt : [in] pointer on the page table … … 136 136 * @ returns 0 if success / returns ENOMEM if error 137 137 ***************************************************************************************/ 138 error_t hal_gpt_set_pte( gpt_t * gpt,138 error_t hal_gpt_set_pte( xptr_t gpt_xp, 139 139 vpn_t vpn, 140 140 uint32_t attr, … … 143 143 /**************************************************************************************** 144 144 * This function unmaps a page table entry identified by the <vpn> argument in the 145 * local page tableidentified by the <gpt> argument.145 * local GPT identified by the <gpt> argument. 146 146 * It does NOT release the physical memory allocated for the unmapped page. 147 147 **************************************************************************************** … … 153 153 154 154 /**************************************************************************************** 155 * This function returns in the <attr> and <ppn> arguments the current values 156 * stored in aGPT entry, identified by the <gpt> and <vpn> arguments.157 **************************************************************************************** 158 * @ gpt_xp : [in] pointer on the page table155 * This function returns in the <attr> and <ppn> arguments the current values stored 156 * in a -local or remote - GPT entry, identified by the <gpt> and <vpn> arguments. 157 **************************************************************************************** 158 * @ gpt_xp : [in] extended pointer on the page table 159 159 * @ vpn : [in] virtual page number 160 160 * @ attr : [out] generic attributes 161 161 * @ ppn : [out] physical page number 162 162 ***************************************************************************************/ 163 void hal_gpt_get_pte( gpt_t * gpt,163 void hal_gpt_get_pte( xptr_t gpt_xp, 164 164 vpn_t vpn, 165 165 uint32_t * attr, -
trunk/hal/tsar_mips32/core/hal_exception.c
r570 r587 46 46 47 47 ////////////////////////////////////////////////////////////////////////////////////////// 48 // This enum defines the global exception types after analysis by the exception handler.49 //////////////////////////////////////////////////////////////////////////////////////////50 51 typedef enum52 {53 EXCP_NON_FATAL,54 EXCP_USER_ERROR,55 EXCP_KERNEL_PANIC,56 }57 exception_handling_type_t;58 59 //////////////////////////////////////////////////////////////////////////////////////////60 48 // This enum defines the mask values for an MMU exception code reported by the mips32. 61 49 ////////////////////////////////////////////////////////////////////////////////////////// … … 219 207 uint32_t cycle = (uint32_t)hal_get_cycles(); 220 208 if( DEBUG_HAL_EXCEPTIONS < cycle ) 221 printk("\n[DBG] %s : thread %x in process %xenter / is_ins %d / %s / vaddr %x / cycle %d\n",222 __FUNCTION__, this->trdid, process->pid,209 printk("\n[DBG] %s : thread[%x,%x] enter / is_ins %d / %s / vaddr %x / cycle %d\n", 210 __FUNCTION__, process->pid, this->trdid, 223 211 is_ins, hal_mmu_exception_str(excp_code), bad_vaddr, cycle); 224 212 #endif … … 234 222 // try to map the unmapped PTE 235 223 error = vmm_handle_page_fault( process, 236 bad_vaddr >> CONFIG_PPM_PAGE_SHIFT, // vpn 237 false ); // not a COW 238 if( error ) 239 { 240 printk("\n[USER ERROR] in %s for thread %x in process %x\n" 241 " cannot map vaddr = %x / is_ins %d / epc %x\n", 242 __FUNCTION__, this->trdid, this->process->pid, bad_vaddr, is_ins, excPC ); 243 244 return EXCP_USER_ERROR; 245 } 246 else // page fault successfull 224 bad_vaddr >> CONFIG_PPM_PAGE_SHIFT ); 225 226 if( error == EXCP_NON_FATAL ) // page-fault successfully handled 247 227 { 248 228 … … 250 230 cycle = (uint32_t)hal_get_cycles(); 251 231 if( DEBUG_HAL_EXCEPTIONS < cycle ) 252 printk("\n[DBG] %s : thread %x in process %xexit / page-fault handled for vaddr = %x\n",253 __FUNCTION__, this->trdid, process->pid, bad_vaddr );232 printk("\n[DBG] %s : thread[%x,%x] exit / page-fault handled for vaddr = %x\n", 233 __FUNCTION__, process->pid, this->trdid, bad_vaddr ); 254 234 #endif 255 235 256 236 return EXCP_NON_FATAL; 257 237 } 238 else if( error == EXCP_USER_ERROR ) // illegal vaddr 239 { 240 printk("\n[USER ERROR] in %s for thread %x in process %x\n" 241 " illegal vaddr = %x / is_ins %d / epc %x\n", 242 __FUNCTION__, this->trdid, this->process->pid, bad_vaddr, is_ins, excPC ); 243 244 return EXCP_USER_ERROR; 245 } 246 else // error == EXCP_KERNEL_PANIC 247 { 248 printk("\n[KERNEL ERROR] in %s for thread %x in process %x\n" 249 " no memory to map vaddr = %x / is_ins %d / epc %x\n", 250 __FUNCTION__, this->trdid, this->process->pid, bad_vaddr, is_ins, excPC ); 251 252 return EXCP_KERNEL_PANIC; 253 } 258 254 } 259 255 case MMU_WRITE_PRIVILEGE_VIOLATION: // illegal access user error 260 256 case MMU_READ_PRIVILEGE_VIOLATION: 261 257 { 262 printk("\n[USER ERROR] in %s forthread %x in process %x\n"258 printk("\n[USER ERROR] in %s : thread %x in process %x\n" 263 259 " illegal user access to vaddr = %x / is_ins %d / epc %x\n", 264 260 __FUNCTION__, this->trdid, this->process->pid, bad_vaddr, is_ins, excPC ); … … 275 271 { 276 272 // try to allocate and copy the page 277 error = vmm_handle_page_fault( process, 278 bad_vaddr >> CONFIG_PPM_PAGE_SHIFT, // vpn 279 true ); // COW 280 if( error ) 281 { 282 printk("\n[USER ERROR] in %s for thread %x in process %x\n" 283 " cannot cow vaddr = %x / is_ins %d / epc %x\n", 284 __FUNCTION__, this->trdid, this->process->pid, bad_vaddr, is_ins, excPC ); 285 286 return EXCP_USER_ERROR; 287 } 288 else // Copy on write successfull 273 error = vmm_handle_cow( process, 274 bad_vaddr >> CONFIG_PPM_PAGE_SHIFT ); 275 276 if( error == EXCP_NON_FATAL ) // Copy on write successfull 289 277 { 290 278 … … 292 280 cycle = (uint32_t)hal_get_cycles(); 293 281 if( DEBUG_HAL_EXCEPTIONS < cycle ) 294 printk("\n[DBG] %s : thread %x in process %xexit / copy-on-write handled for vaddr = %x\n",295 __FUNCTION__, this->trdid, process->pid, bad_vaddr );282 printk("\n[DBG] %s : thread[%x,%x] exit / copy-on-write handled for vaddr = %x\n", 283 __FUNCTION__, process->pid, this->trdid, bad_vaddr ); 296 284 #endif 297 285 298 286 return EXCP_NON_FATAL; 299 287 } 288 else if( error == EXCP_USER_ERROR ) // illegal user access 289 { 290 printk("\n[USER ERROR] in %s : thread %x in process %x\n" 291 " cannot cow vaddr = %x / is_ins %d / epc %x\n", 292 __FUNCTION__, this->trdid, this->process->pid, bad_vaddr, is_ins, excPC ); 293 294 return EXCP_USER_ERROR; 295 } 296 else // error == EXCP_KERNEL_PANIC 297 { 298 printk("\n[KERNEL ERROR] in %s : thread %x in process %x\n" 299 " no memoty to cow vaddr = %x / is_ins %d / epc %x\n", 300 __FUNCTION__, this->trdid, this->process->pid, bad_vaddr, is_ins, excPC ); 301 302 return EXCP_USER_ERROR; 303 } 300 304 } 301 305 else // non writable user error 302 306 { 303 printk("\n[USER ERROR] in %s forthread %x in process %x\n"307 printk("\n[USER ERROR] in %s : thread %x in process %x\n" 304 308 " non-writable vaddr = %x / is_ins %d / epc %x\n", 305 309 __FUNCTION__, this->trdid, this->process->pid, bad_vaddr, is_ins, excPC ); … … 310 314 case MMU_READ_EXEC_VIOLATION: // user error 311 315 { 312 printk("\n[USER_ERROR] in %s forthread %x in process %x\n"316 printk("\n[USER_ERROR] in %s : thread %x in process %x\n" 313 317 " non-executable vaddr = %x / is_ins %d / epc %x\n", 314 318 __FUNCTION__, this->trdid, this->process->pid, bad_vaddr, is_ins, excPC ); … … 318 322 default: // this is a kernel error 319 323 { 320 printk("\n[KERNEL ERROR] in %s forthread %x in process %x\n"324 printk("\n[KERNEL ERROR] in %s : thread %x in process %x\n" 321 325 " epc %x / badvaddr %x / is_ins %d\n", 322 326 __FUNCTION__, this->trdid, this->process->pid, excPC, bad_vaddr, is_ins ); -
trunk/hal/tsar_mips32/core/hal_gpt.c
r570 r587 132 132 xptr_t page_xp; 133 133 134 thread_t * this = CURRENT_THREAD; 135 134 136 #if DEBUG_HAL_GPT_CREATE 135 uint32_t cycle = (uint32_t)hal_get_cycles ;137 uint32_t cycle = (uint32_t)hal_get_cycles(); 136 138 if( DEBUG_HAL_GPT_CREATE < cycle ) 137 printk("\n[DBG] %s : thread %xenter / cycle %d\n",138 __FUNCTION__, CURRENT_THREAD, cycle );139 printk("\n[DBG] %s : thread[%x,%x] enter / cycle %d\n", 140 __FUNCTION__, this->process->pid, this->trdid, cycle ); 139 141 #endif 140 142 … … 152 154 if( page == NULL ) 153 155 { 154 printk("\n[ERROR] in %s : cannot allocate memory for PT1\n", __FUNCTION__ ); 156 printk("\n[PANIC] in %s : no memory for PT1 / process %x / cluster %x\n", 157 __FUNCTION__, this->process->pid, local_cxy ); 155 158 return ENOMEM; 156 159 } … … 162 165 163 166 #if DEBUG_HAL_GPT_CREATE 164 cycle = (uint32_t)hal_get_cycles ;167 cycle = (uint32_t)hal_get_cycles(); 165 168 if( DEBUG_HAL_GPT_CREATE < cycle ) 166 printk("\n[DBG] %s : thread %xexit / cycle %d\n",167 __FUNCTION__, CURRENT_THREAD, cycle );169 printk("\n[DBG] %s : thread[%x,%x] exit / cycle %d\n", 170 __FUNCTION__, this->process->pid, this->trdid, cycle ); 168 171 #endif 169 172 … … 188 191 189 192 #if DEBUG_HAL_GPT_DESTROY 190 uint32_t cycle = (uint32_t)hal_get_cycles; 193 uint32_t cycle = (uint32_t)hal_get_cycles(); 194 thread_t * this = CURRENT_THREAD; 191 195 if( DEBUG_HAL_GPT_DESTROY < cycle ) 192 printk("\n[DBG] %s : thread %xenter / cycle %d\n",193 __FUNCTION__, CURRENT_THREAD, cycle );196 printk("\n[DBG] %s : thread[%x,%x] enter / cycle %d\n", 197 __FUNCTION__, this->process->pid, this->trdid, cycle ); 194 198 #endif 195 199 … … 230 234 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 231 235 xptr_t base_xp = ppm_ppn2base( pt2_ppn ); 232 pt2 = (uint32_t *)GET_PTR( base_xp );236 pt2 = GET_PTR( base_xp ); 233 237 234 238 // scan the PT2 to release all entries VALID and USER if reference cluster … … 261 265 262 266 #if DEBUG_HAL_GPT_DESTROY 263 cycle = (uint32_t)hal_get_cycles ;267 cycle = (uint32_t)hal_get_cycles(); 264 268 if( DEBUG_HAL_GPT_DESTROY < cycle ) 265 printk("\n[DBG] %s : thread %xexit / cycle %d\n",266 __FUNCTION__, CURRENT_THREAD, cycle );269 printk("\n[DBG] %s : thread[%x,%x] exit / cycle %d\n", 270 __FUNCTION__, this->process->pid, this->trdid, cycle ); 267 271 #endif 268 272 … … 309 313 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 310 314 xptr_t base_xp = ppm_ppn2base ( pt2_ppn ); 311 pt2 = (uint32_t *)GET_PTR( base_xp );315 pt2 = GET_PTR( base_xp ); 312 316 313 317 // scan the PT2 … … 330 334 331 335 332 /////////////////////////////////////// 333 error_t hal_gpt_set_pte( gpt_t * gpt,336 ////////////////////////////////////////// 337 error_t hal_gpt_set_pte( xptr_t gpt_xp, 334 338 vpn_t vpn, 335 uint32_t attr, // genericGPT attributes339 uint32_t attr, // GPT attributes 336 340 ppn_t ppn ) 337 341 { 338 uint32_t * pt1; // PT1 base addres 339 uint32_t * pte1_ptr; // pointer on PT1 entry 340 uint32_t pte1; // PT1 entry value 342 cxy_t gpt_cxy; // target GPT cluster 343 gpt_t * gpt_ptr; // target GPT local pointer 344 uint32_t * pt1_ptr; // local pointer on PT1 345 xptr_t pte1_xp; // extended pointer on PT1 entry 346 uint32_t pte1; // PT1 entry value if PTE1 341 347 342 348 ppn_t pt2_ppn; // PPN of PT2 343 uint32_t * pt2 ;// PT2 base address349 uint32_t * pt2_ptr; // PT2 base address 344 350 345 351 uint32_t small; // requested PTE is for a small page 346 bool_t success; // exit condition for while loop below347 352 348 353 page_t * page; // pointer on new physical page descriptor … … 354 359 uint32_t tsar_attr; // PTE attributes for TSAR MMU 355 360 356 #if DEBUG_HAL_GPT_ACCESS 357 uint32_t cycle = (uint32_t)hal_get_cycles; 358 if( DEBUG_HAL_GPT_ACCESS < cycle ) 359 printk("\n[DBG] %s : thread %x enter / vpn %x / attr %x / ppn %x / cycle %d\n", 360 __FUNCTION__, CURRENT_THREAD, vpn, attr, ppn, cycle ); 361 thread_t * this = CURRENT_THREAD; 362 363 // get cluster and local pointer on GPT 364 gpt_cxy = GET_CXY( gpt_xp ); 365 gpt_ptr = GET_PTR( gpt_xp ); 366 367 #if DEBUG_HAL_GPT_SET_PTE 368 uint32_t cycle = (uint32_t)hal_get_cycles(); 369 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 370 printk("\n[DBG] %s : thread[%x,%x] enter / vpn %x / attr %x / ppn %x / cluster %x / cycle %d\n", 371 __FUNCTION__, this->process->pid, this->trdid, vpn, attr, ppn, gpt_cxy, cycle ); 361 372 #endif 362 373 … … 365 376 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 366 377 367 pt1 = gpt->ptr;368 small = attr & GPT_SMALL;378 pt1_ptr = hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 379 small = attr & GPT_SMALL; 369 380 370 381 // compute tsar attributes from generic attributes 371 382 tsar_attr = gpt2tsar( attr ); 372 383 373 #if (DEBUG_HAL_GPT_ACCESS & 1) 374 if( DEBUG_HAL_GPT_ACCESS < cycle ) 375 printk("\n[DBG] %s : thread %x / vpn %x / &pt1 %x / tsar_attr %x\n", 376 __FUNCTION__, CURRENT_THREAD, vpn, pt1, tsar_attr ); 377 #endif 378 379 // get pointer on PT1[ix1] 380 pte1_ptr = &pt1[ix1]; 381 382 // PTE1 (big page) are only set for the kernel vsegs, in the kernel init phase. 383 // There is no risk of concurrent access. 384 if( small == 0 ) 385 { 386 // get current pte1 value 387 pte1 = *pte1_ptr; 388 384 // build extended pointer on PTE1 = PT1[ix1] 385 pte1_xp = XPTR( gpt_cxy , &pt1_ptr[ix1] ); 386 387 // get current pte1 value 388 pte1 = hal_remote_l32( pte1_xp ); 389 390 if( small == 0 ) // map a big page in PT1 391 { 389 392 assert( (pte1 == 0) , 390 393 "try to set a big page in a mapped PT1 entry / PT1[%d] = %x\n", ix1 , pte1 ); 391 394 392 // set the PTE1 393 *pte1_ptr = (tsar_attr & TSAR_MMU_PTE1_ATTR_MASK) | 394 ((ppn >> 9) & TSAR_MMU_PTE1_PPN_MASK);395 // set the PTE1 value in PT1 396 pte1 = (tsar_attr & TSAR_MMU_PTE1_ATTR_MASK) | ((ppn >> 9) & TSAR_MMU_PTE1_PPN_MASK); 397 hal_remote_s32( pte1_xp , pte1 ); 395 398 hal_fence(); 399 400 #if DEBUG_HAL_GPT_SET_PTE 401 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 402 printk("\n[DBG] %s : thread[%x,%x] map PTE1 / cxy %x / ix1 %x / pt1 %x / pte1 %x\n", 403 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, pte1 ); 404 #endif 405 396 406 return 0; 397 407 } 398 399 // From this point, the requested PTE is a PTE2 (small page) 400 401 // loop to access PTE1 and get pointer on PT2 402 success = false; 403 do 404 { 405 // get current pte1 value 406 pte1 = *pte1_ptr; 407 408 #if (DEBUG_HAL_GPT_ACCESS & 1) 409 if( DEBUG_HAL_GPT_ACCESS < cycle ) 410 printk("\n[DBG] %s : thread %x / vpn %x / current_pte1 %x\n", 411 __FUNCTION__, CURRENT_THREAD, vpn, pte1 ); 412 #endif 413 414 // allocate a PT2 if PT1 entry not valid 415 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // PT1 entry not valid 416 { 417 // allocate one physical page for the PT2 418 kmem_req_t req; 419 req.type = KMEM_PAGE; 420 req.size = 0; // 1 small page 421 req.flags = AF_KERNEL | AF_ZERO; 422 page = (page_t *)kmem_alloc( &req ); 408 else // map a small page in PT1 & PT2 409 { 410 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // PT1 entry unmapped => map it 411 { 412 // allocate one physical page for PT2 413 if( gpt_cxy == local_cxy ) 414 { 415 kmem_req_t req; 416 req.type = KMEM_PAGE; 417 req.size = 0; // 1 small page 418 req.flags = AF_KERNEL | AF_ZERO; 419 page = (page_t *)kmem_alloc( &req ); 420 } 421 else 422 { 423 rpc_pmem_get_pages_client( gpt_cxy , 0 , &page ); 424 } 425 423 426 if( page == NULL ) 424 427 { 425 printk("\n[ERROR] in %s : cannot allocate PT2\n", __FUNCTION__ ); 428 printk("\n[PANIC] in %s : no memory for GPT PT2 / process %x / cluster %x\n", 429 __FUNCTION__, this->process->pid, gpt_cxy ); 426 430 return ENOMEM; 427 431 } 428 432 429 433 // get the PT2 PPN 430 page_xp = XPTR( local_cxy , page );434 page_xp = XPTR( gpt_cxy , page ); 431 435 pt2_ppn = ppm_page2ppn( page_xp ); 432 436 433 // try to atomicaly set the PT1 entry437 // build PTD1 value 434 438 pte1 = TSAR_MMU_MAPPED | TSAR_MMU_SMALL | pt2_ppn; 435 success = hal_atomic_cas( pte1_ptr , 0 , pte1 ); 436 437 // release allocated PT2 if PT1 entry modified by another thread 438 if( success == false ) ppm_free_pages( page ); 439 440 // set the PTD1 value in PT1 441 hal_remote_s32( pte1_xp , pte1 ); 442 443 #if DEBUG_HAL_GPT_SET_PTE 444 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 445 printk("\n[DBG] %s : thread[%x,%x] map PTD1 / cxy %x / ix1 %d / pt1 %x / ptd1 %x\n", 446 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, pte1 ); 447 #endif 439 448 } 440 else // PT1 entry is valid449 else // pt1 entry mapped => use it 441 450 { 442 // This valid entry must be a PTD1 443 assert( (pte1 & TSAR_MMU_SMALL) , 444 "try to set a small page in a big PT1 entry / PT1[%d] = %x\n", ix1 , pte1 ); 445 446 success = true; 451 452 #if DEBUG_HAL_GPT_SET_PTE 453 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 454 printk("\n[DBG] %s : thread[%x,%x] get PTD1 / cxy %x / ix1 %d / pt1 %x / ptd1 %x\n", 455 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix1, pt1_ptr, pte1 ); 456 #endif 457 447 458 } 448 459 449 460 // get PT2 base from pte1 450 461 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 451 pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 452 453 #if (DEBUG_HAL_GPT_ACCESS & 1) 454 if( DEBUG_HAL_GPT_ACCESS < cycle ) 455 printk("\n[DBG] %s : thread %x / vpn %x / pte1 %x / &pt2 %x\n", 456 __FUNCTION__, CURRENT_THREAD, vpn, pte1, pt2 ); 457 #endif 458 462 pt2_ptr = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 463 464 // set PTE2 in PT2 (in this order) 465 hal_remote_s32( XPTR( gpt_cxy , &pt2_ptr[2 * ix2 + 1] ) , ppn ); 466 hal_fence(); 467 hal_remote_s32( XPTR( gpt_cxy , &pt2_ptr[2 * ix2] ) , tsar_attr ); 468 hal_fence(); 469 470 #if DEBUG_HAL_GPT_SET_PTE 471 if( DEBUG_HAL_GPT_SET_PTE < cycle ) 472 printk("\n[DBG] %s : thread[%x,%x] map PTE2 / cxy %x / ix2 %x / pt2 %x / attr %x / ppn %x\n", 473 __FUNCTION__, this->process->pid, this->trdid, gpt_cxy, ix2, pt2_ptr, tsar_attr, ppn ); 474 #endif 475 476 return 0; 459 477 } 460 while (success == false);461 462 // set PTE2 in this order463 pt2[2 * ix2 + 1] = ppn;464 hal_fence();465 pt2[2 * ix2] = tsar_attr;466 hal_fence();467 468 #if DEBUG_HAL_GPT_ACCESS469 cycle = (uint32_t)hal_get_cycles;470 if( DEBUG_HAL_GPT_ACCESS < cycle )471 printk("\n[DBG] %s : thread %x exit / vpn %x / pte2_attr %x / pte2_ppn %x / cycle %d\n",472 __FUNCTION__, CURRENT_THREAD, vpn, pt2[2 * ix2], pt2[2 * ix2 + 1], cycle );473 #endif474 475 return 0;476 477 478 } // end of hal_gpt_set_pte() 478 479 479 480 ///////////////////////////////////// 481 void hal_gpt_get_pte( gpt_t * gpt, 480 //////////////////////////////////////// 481 void hal_gpt_get_pte( xptr_t gpt_xp, 482 482 vpn_t vpn, 483 483 uint32_t * attr, … … 490 490 ppn_t pt2_ppn; 491 491 492 // get cluster and local pointer on GPT 493 cxy_t gpt_cxy = GET_CXY( gpt_xp ); 494 gpt_t * gpt_ptr = GET_PTR( gpt_xp ); 495 496 // compute indexes in PT1 and PT2 492 497 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 493 498 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 494 499 495 // get PTE1 value 496 pt1 = gpt->ptr; 497 pte1 = pt1[ix1]; 498 500 // get PT1 base 501 pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 502 503 // get pte1 504 pte1 = hal_remote_l32( XPTR( gpt_cxy , &pt1[ix1] ) ); 505 506 // check PTE1 mapped 499 507 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // PT1 entry not present 500 508 { 501 509 *attr = 0; 502 510 *ppn = 0; 511 return; 503 512 } 504 513 514 // access GPT 505 515 if( (pte1 & TSAR_MMU_SMALL) == 0 ) // it's a PTE1 506 516 { 517 // get PPN & ATTR from PT1 507 518 *attr = tsar2gpt( TSAR_MMU_ATTR_FROM_PTE1( pte1 ) ); 508 519 *ppn = TSAR_MMU_PPN_FROM_PTE1( pte1 ) | (vpn & ((1<<TSAR_MMU_IX2_WIDTH)-1)); 509 520 } 510 else // it's a PTD1521 else // it's a PTD1 511 522 { 512 523 // compute PT2 base address 513 524 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 514 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 515 516 *ppn = pt2[2*ix2+1] & ((1<<TSAR_MMU_PPN_WIDTH)-1); 517 *attr = tsar2gpt( pt2[2*ix2] ); 525 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 526 527 // get PPN & ATTR from PT2 528 *ppn = hal_remote_l32( XPTR( gpt_cxy , &pt2[2*ix2+1] ) ) & ((1<<TSAR_MMU_PPN_WIDTH)-1); 529 *attr = tsar2gpt( hal_remote_l32( XPTR( gpt_cxy , &pt2[2*ix2] ) ) ); 518 530 } 519 531 } // end hal_gpt_get_pte() … … 528 540 ppn_t pt2_ppn; // PPN of PT2 529 541 uint32_t * pt2; // PT2 base address 530 531 ppn_t ppn; // PPN of page to be released532 542 533 543 // get ix1 & ix2 indexes … … 546 556 if( (pte1 & TSAR_MMU_SMALL) == 0 ) // it's a PTE1 547 557 { 548 // get PPN549 ppn = TSAR_MMU_PPN_FROM_PTE1( pte1 );550 551 558 // unmap the big page 552 559 pt1[ix1] = 0; … … 559 566 // compute PT2 base address 560 567 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 561 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) );568 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 562 569 563 // get PPN564 ppn = TSAR_MMU_PPN_FROM_PTE2( pt2[2*ix2+1] );565 566 570 // unmap the small page 567 pt2[2*ix2] = 0; // only attr is reset571 pt2[2*ix2] = 0; 568 572 hal_fence(); 569 573 … … 623 627 page_xp = XPTR( local_cxy , page ); 624 628 pt2_ppn = ppm_page2ppn( page_xp ); 625 pt2 = (uint32_t *)GET_PTR( ppm_page2base( page_xp ) );629 pt2 = GET_PTR( ppm_page2base( page_xp ) ); 626 630 627 631 // try to set the PT1 entry … … 643 647 // get the PT2 base address 644 648 pt2_ppn = TSAR_MMU_PPN_FROM_PTE1( pte1 ); 645 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) );649 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 646 650 } 647 651 } … … 660 664 661 665 // compute pointer on PT2 base 662 pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) );666 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 663 667 } 664 668 … … 720 724 // get pointer on PT2 base 721 725 pt2_ppn = TSAR_MMU_PPN_FROM_PTE1( pte1 ); 722 pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) );726 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 723 727 724 728 // get pointer on PTE2 … … 762 766 uint32_t * dst_pt2; // local pointer on DST PT2 763 767 764 kmem_req_t req; // for dynamicPT2 allocation768 kmem_req_t req; // for PT2 allocation 765 769 766 770 uint32_t src_pte1; … … 776 780 ppn_t dst_pt2_ppn; 777 781 778 #if DEBUG_HAL_GPT_ACCESS779 uint32_t cycle = (uint32_t)hal_get_cycles;780 if( DEBUG_HAL_GPT_ACCESS < cycle )781 printk("\n[DBG] %s : thread %x enter / vpn %x / cycle %d\n",782 __FUNCTION__, CURRENT_THREAD, vpn, cycle );783 #endif784 785 782 // get remote src_gpt cluster and local pointer 786 783 src_cxy = GET_CXY( src_gpt_xp ); 787 src_gpt = (gpt_t *)GET_PTR( src_gpt_xp ); 784 src_gpt = GET_PTR( src_gpt_xp ); 785 786 #if DEBUG_HAL_GPT_COPY 787 uint32_t cycle = (uint32_t)hal_get_cycles(); 788 thread_t * this = CURRENT_THREAD; 789 if( DEBUG_HAL_GPT_COPY < cycle ) 790 printk("\n[DBG] %s : thread[%x,%x] enter / vpn %x / src_cxy %x / dst_cxy %x / cycle %d\n", 791 __FUNCTION__, this->process->pid, this->trdid, vpn, src_cxy, local_cxy, cycle ); 792 #endif 793 794 // get remote src_gpt cluster and local pointer 795 src_cxy = GET_CXY( src_gpt_xp ); 796 src_gpt = GET_PTR( src_gpt_xp ); 788 797 789 798 // get remote src_pt1 and local dst_pt1 … … 837 846 // get pointer on src_pt2 838 847 src_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( src_pte1 ); 839 src_pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( src_pt2_ppn ) );848 src_pt2 = GET_PTR( ppm_ppn2base( src_pt2_ppn ) ); 840 849 841 850 // get pointer on dst_pt2 842 851 dst_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( dst_pte1 ); 843 dst_pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( dst_pt2_ppn ) );852 dst_pt2 = GET_PTR( ppm_ppn2base( dst_pt2_ppn ) ); 844 853 845 854 // get attr and ppn from SRC_PT2 … … 867 876 *ppn = src_pte2_ppn; 868 877 869 #if DEBUG_HAL_GPT_ ACCESS878 #if DEBUG_HAL_GPT_COPY 870 879 cycle = (uint32_t)hal_get_cycles; 871 if( DEBUG_HAL_GPT_ ACCESS< cycle )872 printk("\n[DBG] %s : thread %xexit / copy done for vpn %x / cycle %d\n",873 __FUNCTION__, CURRENT_THREAD, vpn, cycle );880 if( DEBUG_HAL_GPT_COPY < cycle ) 881 printk("\n[DBG] %s : thread[%x,%x] exit / copy done for vpn %x / cycle %d\n", 882 __FUNCTION__, this->process->pid, this->trdid, vpn, cycle ); 874 883 #endif 875 884 … … 884 893 *ppn = 0; 885 894 886 #if DEBUG_HAL_GPT_ ACCESS895 #if DEBUG_HAL_GPT_COPY 887 896 cycle = (uint32_t)hal_get_cycles; 888 if( DEBUG_HAL_GPT_ ACCESS< cycle )889 printk("\n[DBG] %s : thread %xexit / nothing done for vpn %x / cycle %d\n",890 __FUNCTION__, CURRENT_THREAD, vpn, cycle );897 if( DEBUG_HAL_GPT_COPY < cycle ) 898 printk("\n[DBG] %s : thread[%x,%x] exit / nothing done for vpn %x / cycle %d\n", 899 __FUNCTION__, this->process->pid, this->trdid, vpn, cycle ); 891 900 #endif 892 901 … … 921 930 // compute PT2 base address 922 931 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 923 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) );932 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 924 933 925 934 // get pte2_attr … … 955 964 // compute PT2 base address 956 965 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 957 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) );966 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 958 967 959 968 // get pte2_attr … … 989 998 // get GPT cluster and local pointer 990 999 gpt_cxy = GET_CXY( gpt_xp ); 991 gpt_ptr = (gpt_t *)GET_PTR( gpt_xp );1000 gpt_ptr = GET_PTR( gpt_xp ); 992 1001 993 1002 // get local PT1 pointer … … 1008 1017 // compute PT2 base address 1009 1018 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 1010 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) );1019 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1011 1020 1012 1021 assert( (GET_CXY( ppm_ppn2base( pt2_ppn ) ) == gpt_cxy ), … … 1050 1059 // get cluster and local pointer on remote GPT 1051 1060 cxy_t gpt_cxy = GET_CXY( gpt_xp ); 1052 gpt_t * gpt_ptr = (gpt_t *)GET_PTR( gpt_xp );1061 gpt_t * gpt_ptr = GET_PTR( gpt_xp ); 1053 1062 1054 1063 // compute indexes in PT1 and PT2 … … 1070 1079 // get PT2 base from PTE1 1071 1080 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 1072 pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1073 1074 // reset PTE2 1075 hal_remote_s32( XPTR( gpt_cxy, &pt2[2 * ix2] ) , 0 ); 1076 hal_fence(); 1081 pt2 = GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1077 1082 1078 1083 // set PTE2 in this order -
trunk/hal/tsar_mips32/core/hal_vmm.c
r579 r587 1 1 /* 2 * hal_vmm.c - GenericVirtual Memory Manager Initialisation for TSAR2 * hal_vmm.c - Virtual Memory Manager Initialisation for TSAR 3 3 * 4 4 * Authors Alain Greiner (2016,2017) … … 38 38 // identity mapped. The following function is called by the generic vmm_init() function 39 39 // and identity map all pages of the "kentry" vseg. 40 // We dont take the locks protecting the VSL and the GPT, because there is no concurrent 41 // accesses to VMM during VMM initialization. 40 42 ////////////////////////////////////////////////////////////////////////////////////////// 41 43 … … 44 46 { 45 47 error_t error; 46 47 // get pointer on GPT48 gpt_t * gpt = &vmm->gpt;49 48 50 49 // map all pages of "kentry" vseg … … 55 54 vpn < (CONFIG_VMM_KENTRY_BASE + CONFIG_VMM_KENTRY_SIZE); vpn++ ) 56 55 { 57 error = hal_gpt_set_pte( gpt,56 error = hal_gpt_set_pte( XPTR( local_cxy , &vmm->gpt ), 58 57 vpn, 59 58 attr, … … 62 61 if( error ) return error; 63 62 } 64 65 // get extended pointer on lock protecting the VSL66 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );67 68 // get VSL lock69 remote_rwlock_wr_acquire( lock_xp );70 63 71 64 // scan the VSL to found the "kentry" vseg … … 90 83 } 91 84 92 // release the VSL lock 93 remote_rwlock_wr_release( lock_xp ); 94 95 if( found == false ) return error; 85 if( found == false ) return 0XFFFFFFFF; 96 86 97 87 return 0; -
trunk/hal/tsar_mips32/drivers/soclib_mty.c
r570 r587 546 546 // get extended pointers on MTY_WRITE & MTY_STATUS registers 547 547 xptr_t write_xp = XPTR( mty_cxy , mty_ptr + MTY_WRITE ); 548 xptr_t status_xp = XPTR( mty_cxy , mty_ptr + MTY_STATUS );549 548 550 549 // loop on characters (two bytes per character)
Note: See TracChangeset
for help on using the changeset viewer.