Changeset 408 for trunk/hal/tsar_mips32
- Timestamp:
- Dec 5, 2017, 4:20:07 PM (7 years ago)
- Location:
- trunk/hal/tsar_mips32
- Files:
-
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/tsar_mips32/core/hal_context.c
r407 r408 36 36 37 37 ///////////////////////////////////////////////////////////////////////////////////////// 38 // Define various SR values for TSAR-MIPS3239 ///////////////////////////////////////////////////////////////////////////////////////// 40 41 #define SR_USR_MODE 0x0000F C1342 #define SR_USR_MODE_FPU 0x2000F C1343 #define SR_SYS_MODE 0x0000F C0038 // Define various SR initialisation values for TSAR-MIPS32 39 ///////////////////////////////////////////////////////////////////////////////////////// 40 41 #define SR_USR_MODE 0x0000FF13 42 #define SR_USR_MODE_FPU 0x2000FF13 43 #define SR_SYS_MODE 0x0000FF00 44 44 45 45 ///////////////////////////////////////////////////////////////////////////////////////// … … 191 191 } // end hal_cpu_context_create() 192 192 193 //////////////////////////////////////////// 194 void hal_cpu_context_fork( xptr_t child_xp ) 195 { 196 // allocate a local CPU context in kernel stack 197 // It is initialized from local parent context 198 // and from child specific values, and is copied in 199 // in the remote child context using a remote_memcpy() 200 hal_cpu_context_t context; 201 202 // get local parent thread local pointer 203 thread_t * parent_ptr = CURRENT_THREAD; 204 205 // get remote child thread cluster and local pointer 206 cxy_t child_cxy = GET_CXY( child_xp ); 207 thread_t * child_ptr = (thread_t *)GET_PTR( child_xp ); 208 209 // get remote child cpu_context local pointer 210 char * child_context_ptr = hal_remote_lpt( XPTR(child_cxy , &child_ptr->cpu_context) ); 211 212 // get local pointer on remote child process 213 process_t * process = (process_t *)hal_remote_lpt( XPTR(child_cxy , &child_ptr->process) ); 214 215 // get ppn of remote child process page table 216 uint32_t pt_ppn = hal_remote_lw( XPTR(child_cxy , &process->vmm.gpt.ppn) ); 217 218 // save CPU registers in local CPU context 219 hal_do_cpu_save( &context ); 220 221 // From this point, both parent and child threads execute the following code. 222 // They can be distinguished by the CURRENT_THREAD value, and child will only 223 // execute it when it is unblocked by parent, after return to sys_fork(). 224 // - parent thread copies user stack, and patch sp_29 / c0_th / C0_sr / c2_ptpr 225 // - child thread does nothing 226 227 thread_t * current = CURRENT_THREAD; 228 229 if( current == parent_ptr ) // current == parent thread 230 { 231 // get parent and child stack pointers 232 char * parent_sp = (char *)context.sp_29; 233 char * child_sp = (char *)((intptr_t)parent_sp + 234 (intptr_t)child_ptr - 235 (intptr_t)parent_ptr ); 236 237 // patch kernel_stack pointer, current thread, and status slots 238 context.sp_29 = (uint32_t)child_sp; 239 context.c0_th = (uint32_t)child_ptr; 240 context.c0_sr = SR_SYS_MODE; 241 context.c2_ptpr = pt_ppn >> 1; 242 243 // copy local context to remote child context) 244 hal_remote_memcpy( XPTR( child_cxy , child_context_ptr ), 245 XPTR( local_cxy , &context ) , 246 sizeof( hal_cpu_context_t ) ); 247 248 // copy kernel stack content from local parent thread to remote child thread 249 uint32_t size = (uint32_t)parent_ptr + CONFIG_THREAD_DESC_SIZE - (uint32_t)parent_sp; 250 hal_remote_memcpy( XPTR( child_cxy , child_sp ), 251 XPTR( local_cxy , parent_sp ), 252 size ); 253 } 254 else // current == child thread 255 { 256 assert( (current == child_ptr) , __FUNCTION__ , "current = %x / child = %x\n"); 257 } 258 259 } // end hal_cpu_context_fork() 260 193 261 ///////////////////////////////////////////////// 194 void hal_cpu_context_display( thread_t * thread ) 195 { 196 hal_cpu_context_t * ctx = (hal_cpu_context_t *)thread->cpu_context; 197 262 void hal_cpu_context_display( xptr_t thread_xp ) 263 { 264 hal_cpu_context_t * ctx; 265 266 // get thread cluster and local pointer 267 cxy_t cxy = GET_CXY( thread_xp ); 268 thread_t * ptr = (thread_t *)GET_PTR( thread_xp ); 269 270 // get context pointer 271 ctx = (hal_cpu_context_t *)hal_remote_lpt( XPTR( cxy , &ptr->cpu_context ) ); 272 273 // get relevant context slots values 274 uint32_t sp_29 = hal_remote_lw( XPTR( cxy , &ctx->sp_29 ) ); 275 uint32_t ra_31 = hal_remote_lw( XPTR( cxy , &ctx->ra_31 ) ); 276 uint32_t c0_sr = hal_remote_lw( XPTR( cxy , &ctx->c0_sr ) ); 277 uint32_t c0_epc = hal_remote_lw( XPTR( cxy , &ctx->c0_epc ) ); 278 uint32_t c0_th = hal_remote_lw( XPTR( cxy , &ctx->c0_th ) ); 279 uint32_t c2_ptpr = hal_remote_lw( XPTR( cxy , &ctx->c2_ptpr ) ); 280 uint32_t c2_mode = hal_remote_lw( XPTR( cxy , &ctx->c2_mode ) ); 281 198 282 printk("\n***** CPU context for thread %x in process %x / cycle %d\n" 199 " gp_28 = %Xsp_29 = %X ra_31 = %X\n"283 " sp_29 = %X ra_31 = %X\n" 200 284 " c0_sr = %X c0_epc = %X c0_th = %X\n" 201 285 " c2_ptpr = %X c2_mode = %X\n", 202 thread->trdid, thread->process->pid, hal_time_stamp(),203 ctx->gp_28 , ctx->sp_29 , ctx->ra_31,204 c tx->c0_sr , ctx->c0_epc , ctx->c0_th,205 c tx->c2_ptpr , ctx->c2_mode );286 ptr->trdid, ptr->process->pid, hal_time_stamp(), 287 sp_29 , ra_31, 288 c0_sr , c0_epc , c0_th, 289 c2_ptpr , c2_mode ); 206 290 207 291 } // end hal_cpu_context_display() … … 270 354 271 355 ////////////////////////////////////////////// 272 void hal_fpu_context_save( thread_t * thread ) 273 { 274 uint32_t ctx = (uint32_t)thread->fpu_context; 356 void hal_fpu_context_save( xptr_t thread_xp ) 357 { 358 // allocate a local FPU context in kernel stack 359 hal_fpu_context_t context; 360 361 // get remote child cluster and local pointer 362 cxy_t thread_cxy = GET_CXY( thread_xp ); 363 thread_t * thread_ptr = (thread_t *)GET_PTR( thread_xp ); 275 364 276 365 asm volatile( … … 309 398 "swc1 $f31, 31*4(%0) \n" 310 399 ".set reorder \n" 311 : : "r"(ctx) ); 312 313 } // end hal_cpu_context_save() 400 : : "r"(&context) ); 401 402 // copy local context to remote child context) 403 hal_remote_memcpy( XPTR( thread_cxy , &thread_ptr->fpu_context ), 404 XPTR( local_cxy , &context ) , 405 sizeof( hal_fpu_context_t ) ); 406 407 } // end hal_fpu_context_save() 314 408 315 409 ///////////////////////////////////////////////// -
trunk/hal/tsar_mips32/core/hal_exception.c
r407 r408 99 99 xcode_values_t; 100 100 101 //////////////////////////////////////////////////// 102 static char * hal_mmu_exception_str( uint32_t code ) 103 { 104 if ( code == MMU_WRITE_PT1_UNMAPPED ) return "WRITE_PT1_UNMAPPED"; 105 else if( code == MMU_WRITE_PT2_UNMAPPED ) return "WRITE_PT2_UNMAPPED"; 106 else if( code == MMU_WRITE_PRIVILEGE_VIOLATION ) return "WRITE_PRIVILEGE_VIOLATION"; 107 else if( code == MMU_WRITE_ACCESS_VIOLATION ) return "WRITE_ACCESS_VIOLATION"; 108 else if( code == MMU_WRITE_UNDEFINED_XTN ) return "WRITE_UNDEFINED_XTN"; 109 else if( code == MMU_WRITE_PT1_ILLEGAL_ACCESS ) return "WRITE_PT1_ILLEGAL_ACCESS"; 110 else if( code == MMU_WRITE_PT2_ILLEGAL_ACCESS ) return "WRITE_PT2_ILLEGAL_ACCESS"; 111 else if( code == MMU_WRITE_DATA_ILLEGAL_ACCESS ) return "WRITE_DATA_ILLEGAL_ACCESS"; 112 else if( code == MMU_READ_PT1_UNMAPPED ) return "READ_PT1_UNMAPPED"; 113 else if( code == MMU_READ_PT2_UNMAPPED ) return "READ_PT2_UNMAPPED"; 114 else if( code == MMU_READ_PRIVILEGE_VIOLATION ) return "READ_PRIVILEGE_VIOLATION"; 115 else if( code == MMU_READ_EXEC_VIOLATION ) return "READ_EXEC_VIOLATION"; 116 else if( code == MMU_READ_UNDEFINED_XTN ) return "READ_UNDEFINED_XTN"; 117 else if( code == MMU_READ_PT1_ILLEGAL_ACCESS ) return "READ_PT1_ILLEGAL_ACCESS"; 118 else if( code == MMU_READ_PT2_ILLEGAL_ACCESS ) return "READ_PT2_ILLEGAL_ACCESS"; 119 else if( code == MMU_READ_DATA_ILLEGAL_ACCESS ) return "READ_DATA_ILLEGAL_ACCESS"; 120 else return "undefined"; 121 } 122 101 123 ////////////////////////////////////////////////////////////////////////////////////////// 102 124 // This function is called when a FPU Coprocessor Unavailable exception has been … … 120 142 if( core->fpu_owner != this ) 121 143 { 122 hal_fpu_context_save ( core->fpu_owner->fpu_context);144 hal_fpu_context_save( XPTR( local_cxy , core->fpu_owner ) ); 123 145 } 124 146 } … … 179 201 } 180 202 181 excp_dmsg("\n[DBG] %s : core[%x,%d] / is_ins %d / code %x / vaddr %x\n", 182 __FUNCTION__ , local_cxy , this->core->lid , is_ins, excp_code, bad_vaddr ); 203 // @@@ 204 thread_t * parent = (thread_t *)0xa4000; 205 uint32_t cond = (this == 0xe0000) && (hal_time_stamp() > 5380000); 206 207 if( cond ) hal_gpt_display( this->process ); 208 if( cond ) hal_gpt_display( parent->process ); 209 if( cond ) printk("\n[DBG] %s : core[%x,%d] / is_ins %d / %s / vaddr %x\n", 210 __FUNCTION__ , local_cxy , this->core->lid , is_ins, 211 hal_mmu_exception_str(excp_code) , bad_vaddr ); 183 212 184 213 // analyse exception code … … 217 246 return EXCP_USER_ERROR; 218 247 } 219 case MMU_WRITE_ACCESS_VIOLATION: // user error or Copy-on-Write248 case MMU_WRITE_ACCESS_VIOLATION: // user error, or Copy-on-Write 220 249 { 221 250 // access page table to get GPT_COW flag 222 251 bool_t cow = hal_gpt_pte_is_cow( &(process->vmm.gpt), 223 bad_vaddr >> CONFIG_PPM_PAGE_SHIFT ); // vpn252 bad_vaddr >> CONFIG_PPM_PAGE_SHIFT ); 224 253 225 254 if( cow ) // Copy-on-Write 226 255 { 227 256 // try to allocate and copy the page 228 error = vmm_ copy_on_write( process,229 bad_vaddr >> CONFIG_PPM_PAGE_SHIFT ); // vpn257 error = vmm_handle_cow( process, 258 bad_vaddr >> CONFIG_PPM_PAGE_SHIFT ); 230 259 if( error ) 231 260 { … … 238 267 { 239 268 240 excp_dmsg("\n[DBG] %s : core[%x,%d] / copy-on-write handled for vaddr = %x\n",269 if( cond ) printk("\n[DBG] %s : core[%x,%d] / copy-on-write handled for vaddr = %x\n", 241 270 __FUNCTION__ , local_cxy , this->core->lid , bad_vaddr ); 242 271 … … 274 303 ////////////////////////////////////////////////////////////////////////////////////////// 275 304 // @ this : pointer on faulty thread descriptor. 276 // @ regs_tbl: pointer on register array.305 // @ uzone : pointer on register array. 277 306 // @ error : EXCP_USER_ERROR or EXCP_KERNEL_PANIC 278 307 ////////////////////////////////////////////////////////////////////////////////////////// 279 308 static void hal_exception_dump( thread_t * this, 280 reg_t * regs_tbl,309 reg_t * uzone, 281 310 error_t error ) 282 311 { 283 uint32_t save_sr; 284 core_t * core = this->core; 312 uint32_t save_sr; 313 core_t * core = this->core; 314 process_t * process = this->process; 285 315 286 316 // get pointers on TXT0 chdev … … 297 327 if( error == EXCP_USER_ERROR ) 298 328 { 299 nolock_printk("\n=== ====== USER ERROR / core[%x,%d] / cycle %d ==============\n",300 local_cxy, core->lid , (uint32_t)hal_get_cycles() );329 nolock_printk("\n=== USER ERROR / trdid %x / pid %x / core[%x,%d] / cycle %d ===\n", 330 this->trdid, process->pid, local_cxy, core->lid , (uint32_t)hal_get_cycles() ); 301 331 } 302 332 else 303 333 { 304 nolock_printk("\n=== ==== KERNEL PANIC / core[%x,%d] / cycle %d ==============\n",305 local_cxy, core->lid , (uint32_t)hal_get_cycles() );334 nolock_printk("\n=== KERNEL PANIC / trdid %x / pid %x / core[%x,%d] / cycle %d ===\n", 335 this->trdid, process->pid, local_cxy, core->lid , (uint32_t)hal_get_cycles() ); 306 336 } 307 337 308 nolock_printk(" thread type = %s / trdid = %x / pid %x / core[%x,%d]\n" 309 " local locks = %d / remote locks = %d / blocked_vector = %X\n\n", 310 thread_type_str(this->type), this->trdid, this->process->pid, local_cxy, 311 this->core->lid, this->local_locks, this->remote_locks, this->blocked ); 312 313 nolock_printk("cp0_cr %X cp0_epc %X cp0_sr %X cp2_mode %X\n", 314 regs_tbl[UZ_CR], regs_tbl[UZ_EPC], regs_tbl[UZ_SR], regs_tbl[UZ_MODE]); 315 316 nolock_printk("at_01 %X v0_2 %X v1_3 %X a0_4 %X a1_5 %X\n", 317 regs_tbl[UZ_AT],regs_tbl[UZ_V0],regs_tbl[UZ_V1],regs_tbl[UZ_A0],regs_tbl[UZ_A1]); 318 319 nolock_printk("a2_6 %X a3_7 %X t0_8 %X t1_9 %X t2_10 %X\n", 320 regs_tbl[UZ_A2],regs_tbl[UZ_A3],regs_tbl[UZ_T0],regs_tbl[UZ_T1],regs_tbl[UZ_T2]); 338 nolock_printk("local locks = %d / remote locks = %d / blocked_vector = %X\n\n", 339 this->local_locks, this->remote_locks, this->blocked ); 340 341 nolock_printk("c0_cr %X c0_epc %X c0_sr %X c0_th %X\n", 342 uzone[UZ_CR], uzone[UZ_EPC], uzone[UZ_SR], uzone[UZ_TH] ); 343 344 nolock_printk("c2_mode %X c2_ptpr %X\n", 345 uzone[UZ_MODE], uzone[UZ_PTPR] ); 346 347 nolock_printk("at_01 %X v0_2 %X v1_3 %X a0_4 %X a1_5 %X\n", 348 uzone[UZ_AT], uzone[UZ_V0], uzone[UZ_V1], uzone[UZ_A0], uzone[UZ_A1] ); 349 350 nolock_printk("a2_6 %X a3_7 %X t0_8 %X t1_9 %X t2_10 %X\n", 351 uzone[UZ_A2], uzone[UZ_A3], uzone[UZ_T0], uzone[UZ_T1], uzone[UZ_T2] ); 321 352 322 nolock_printk("t3_11 %X t4_12 %X t5_13%X t6_14 %X t7_15 %X\n",323 regs_tbl[UZ_T3],regs_tbl[UZ_T4],regs_tbl[UZ_T5],regs_tbl[UZ_T6],regs_tbl[UZ_T7]);324 325 nolock_printk("s0_16 %X s1_17 %X s2_18%X s3_19 %X s4_20 %X\n",326 regs_tbl[UZ_S0],regs_tbl[UZ_S1],regs_tbl[UZ_S2],regs_tbl[UZ_S3],regs_tbl[UZ_S4]);353 nolock_printk("t3_11 %X t4_12 %X t5_13 %X t6_14 %X t7_15 %X\n", 354 uzone[UZ_T3], uzone[UZ_T4], uzone[UZ_T5], uzone[UZ_T6], uzone[UZ_T7] ); 355 356 nolock_printk("s0_16 %X s1_17 %X s2_18 %X s3_19 %X s4_20 %X\n", 357 uzone[UZ_S0], uzone[UZ_S1], uzone[UZ_S2], uzone[UZ_S3], uzone[UZ_S4] ); 327 358 328 nolock_printk("s5_21 %X s6_22 %X s7_23%X s8_24 %X ra_25 %X\n",329 regs_tbl[UZ_S5],regs_tbl[UZ_S6],regs_tbl[UZ_S7],regs_tbl[UZ_T8],regs_tbl[UZ_T9]);330 331 nolock_printk("gp_28 %X sp_29 %X S8_30%X ra_31 %X\n",332 regs_tbl[UZ_GP],regs_tbl[UZ_SP],regs_tbl[UZ_S8],regs_tbl[UZ_RA]);359 nolock_printk("s5_21 %X s6_22 %X s7_23 %X s8_24 %X ra_25 %X\n", 360 uzone[UZ_S5], uzone[UZ_S6], uzone[UZ_S7], uzone[UZ_T8], uzone[UZ_T9] ); 361 362 nolock_printk("gp_28 %X sp_29 %X S8_30 %X ra_31 %X\n", 363 uzone[UZ_GP], uzone[UZ_SP], uzone[UZ_S8], uzone[UZ_RA] ); 333 364 334 365 // release the lock … … 337 368 } // end hal_exception_dump() 338 369 339 340 /////////////////////////////////////////////////////////////////////////////// 341 // TODO replace the hal_core_sleep() by the generic panic() function. 342 /////////////////////////////////////////////////////////////////////////////// 343 void hal_do_exception( thread_t * this, 344 reg_t * regs_tbl ) 345 { 346 error_t error; 347 uint32_t excCode; // 4 bits XCODE from CP0_CR 370 /////////////////////// 371 void hal_do_exception() 372 { 373 uint32_t * uzone; 374 thread_t * this; 375 error_t error; 376 uint32_t excCode; // 4 bits XCODE from CP0_CR 377 378 // get pointer on faulty thread uzone 379 this = CURRENT_THREAD; 380 uzone = (uint32_t *)CURRENT_THREAD->uzone; 348 381 349 382 // get 4 bits XCODE from CP0_CR register 350 excCode = ( regs_tbl[UZ_CR] >> 2) & 0xF;383 excCode = (uzone[UZ_CR] >> 2) & 0xF; 351 384 352 385 excp_dmsg("\n[DBG] %s : core[%x,%d] / thread %x in process %x / xcode %x / cycle %d\n", 353 386 __FUNCTION__, local_cxy, this->core->lid, this->trdid, 354 this->process->pid, excCode, (uint32_t)hal_get_cycle () );387 this->process->pid, excCode, (uint32_t)hal_get_cycles() ); 355 388 356 389 switch(excCode) … … 368 401 case XCODE_CPU: // can be non fatal 369 402 { 370 if( (( regs_tbl[UZ_CR] >> 28) & 0x3) == 1 )// unavailable FPU403 if( ((uzone[UZ_CR] >> 28) & 0x3) == 1 ) // unavailable FPU 371 404 { 372 405 error = hal_fpu_exception( this ); … … 395 428 if( error == EXCP_USER_ERROR ) // user error => kill user process 396 429 { 397 hal_exception_dump( this , regs_tbl, error );430 hal_exception_dump( this , uzone , error ); 398 431 399 432 // FIXME : replace this loop by sys_kill() … … 403 436 else if( error == EXCP_KERNEL_PANIC ) // kernel error => kernel panic 404 437 { 405 hal_exception_dump( this , regs_tbl , error ); 406 hal_core_sleep(); 438 hal_exception_dump( this , uzone , error ); 439 panic( "KERNEL_PANIC for thread %x in process %x on core [%x,%d]/n", 440 this->trdid , this->process->pid , local_cxy , this->core->lid ); 407 441 } 408 442 409 443 excp_dmsg("\n[DBG] %s : core[%x,%d] exit / thread %x in process %x / cycle %d\n", 410 __FUNCTION__, local_cxy, this->core->lid, this->trdid, this->process->pid, hal_time_stamp() ); 444 __FUNCTION__, local_cxy, this->core->lid, this->trdid, this->process->pid, 445 (uint32_t)hal_get_cycles() ); 411 446 412 447 } // end hal_do_exception() -
trunk/hal/tsar_mips32/core/hal_gpt.c
r407 r408 170 170 error = hal_gpt_set_pte( gpt, 171 171 vpn, 172 (local_cxy<<20) | (vpn & 0xFFFFF),173 attr);172 attr, 173 (local_cxy<<20) | (vpn & 0xFFFFF) ); 174 174 175 175 if( error ) … … 321 321 { 322 322 vpn = (ix1 << 9) | ix2; 323 printk(" - SMALL : vpn = %x / PT2[%d] = %x / pt2[%d] = %x\n",324 vpn , 2*ix2 , pte2_attr , 2*ix2+1 , pte2_ppn);323 printk(" - SMALL : vpn %X / ppn %X / attr %X\n", 324 vpn , pte2_ppn , tsar2gpt(pte2_attr) ); 325 325 } 326 326 } … … 334 334 error_t hal_gpt_set_pte( gpt_t * gpt, 335 335 vpn_t vpn, 336 ppn_t ppn,337 uint32_t attr ) // generic GPT attributes336 uint32_t attr, // generic GPT attributes 337 ppn_t ppn ) 338 338 { 339 339 uint32_t * pt1; // PT1 base addres … … 355 355 uint32_t tsar_attr; // PTE attributes for TSAR MMU 356 356 357 358 357 gpt_dmsg("\n[DBG] %s : core[%x,%d] enter for vpn = %x / ppn = %x / gpt_attr = %x\n", 358 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , ppn , attr ); 359 359 360 360 // compute indexes in PT1 and PT2 … … 368 368 tsar_attr = gpt2tsar( attr ); 369 369 370 371 370 gpt_dmsg("\n[DBG] %s : core[%x,%d] / vpn = %x / &pt1 = %x / tsar_attr = %x\n", 371 __FUNCTION__, local_cxy , CURRENT_THREAD->core->lid , vpn , pt1 , tsar_attr ); 372 372 373 373 // get pointer on PT1[ix1] … … 400 400 pte1 = *pte1_ptr; 401 401 402 403 402 gpt_dmsg("\n[DBG] %s : core[%x,%d] / vpn = %x / current_pte1 = %x\n", 403 __FUNCTION__, local_cxy , CURRENT_THREAD->core->lid , vpn , pte1 ); 404 404 405 405 // allocate a PT2 if PT1 entry not valid … … 442 442 pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 443 443 444 445 444 gpt_dmsg("\n[DBG] %s : core[%x,%d] / vpn = %x / pte1 = %x / &pt2 = %x\n", 445 __FUNCTION__, local_cxy , CURRENT_THREAD->core->lid , vpn , pte1 , pt2 ); 446 446 447 447 } … … 454 454 hal_fence(); 455 455 456 457 458 456 gpt_dmsg("\n[DBG] %s : core[%x,%d] exit / vpn = %x / pte2_attr = %x / pte2_ppn = %x\n", 457 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , 458 pt2[2 * ix2] , pt2[2 * ix2 + 1] ); 459 459 460 460 return 0; … … 728 728 } // end hal_gpt_unlock_pte() 729 729 730 /////////////////////////////////////// 731 error_t hal_gpt_copy( gpt_t * dst_gpt, 732 gpt_t * src_gpt, 733 vpn_t vpn_base, 734 vpn_t vpn_size, 735 bool_t cow ) 736 { 737 vpn_t vpn; // current vpn 738 730 /////////////////////////////////////////// 731 error_t hal_gpt_pte_copy( gpt_t * dst_gpt, 732 xptr_t src_gpt_xp, 733 vpn_t vpn, 734 bool_t cow, 735 ppn_t * ppn, 736 bool_t * mapped ) 737 { 739 738 uint32_t ix1; // index in PT1 740 739 uint32_t ix2; // index in PT2 741 740 742 uint32_t * src_pt1; // local pointer on PT1 for SRC_GPT 743 uint32_t * dst_pt1; // local pointer on PT1 for DST_GPT 744 uint32_t * dst_pt2; // local pointer on PT2 for DST_GPT 745 uint32_t * src_pt2; // local pointer on PT2 for SRC_GPT 741 cxy_t src_cxy; // SRC GPT cluster 742 gpt_t * src_gpt; // SRC GPT local pointer 743 744 uint32_t * src_pt1; // local pointer on SRC PT1 745 uint32_t * dst_pt1; // local pointer on DST PT1 746 uint32_t * src_pt2; // local pointer on SRC PT2 747 uint32_t * dst_pt2; // local pointer on DST PT2 746 748 747 749 kmem_req_t req; // for dynamic PT2 allocation … … 750 752 uint32_t dst_pte1; 751 753 752 uint32_t pte2_attr;753 uint32_t pte2_ppn;754 uint32_t src_pte2_attr; 755 uint32_t src_pte2_ppn; 754 756 755 757 page_t * page; … … 759 761 ppn_t dst_pt2_ppn; 760 762 761 gpt_dmsg("\n[DBG] %s : core[%x,%d] enter\n", 762 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); 763 764 // check page size 765 assert( (CONFIG_PPM_PAGE_SIZE == 4096) , __FUNCTION__ , 766 "for TSAR, the page must be 4 Kbytes\n" ); 767 768 // check SRC_PT1 and DST_PT1 existence 769 assert( (src_gpt->ptr != NULL) , __FUNCTION__ , "SRC_PT1 does not exist\n"); 770 assert( (dst_gpt->ptr != NULL) , __FUNCTION__ , "DST_PT1 does not exist\n"); 771 772 // get pointers on SRC_PT1 and DST_PT1 773 src_pt1 = (uint32_t *)src_gpt->ptr; 763 gpt_dmsg("\n[DBG] %s : core[%x,%d] enter for vpn %x\n", 764 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 765 766 // get remote src_gpt cluster and local pointer 767 src_cxy = GET_CXY( src_gpt_xp ); 768 src_gpt = (gpt_t *)GET_PTR( src_gpt_xp ); 769 770 // get remote src_pt1 and local dst_pt1 771 src_pt1 = (uint32_t *)hal_remote_lpt( XPTR( src_cxy , &src_gpt->ptr ) ); 774 772 dst_pt1 = (uint32_t *)dst_gpt->ptr; 775 773 776 // scan pages in vseg 777 for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) 778 { 779 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 780 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 781 782 // get SRC_PT1 entry 783 src_pte1 = src_pt1[ix1]; 784 785 // do nothing if SRC_PTE1 unmapped 786 if( (src_pte1 & TSAR_MMU_MAPPED) != 0 ) // SRC_PTE1 is mapped 774 // check src_pt1 and dst_pt1 existence 775 assert( (src_pt1 != NULL) , __FUNCTION__ , "src_pt1 does not exist\n"); 776 assert( (dst_pt1 != NULL) , __FUNCTION__ , "dst_pt1 does not exist\n"); 777 778 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 779 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 780 781 // get src_pte1 782 src_pte1 = hal_remote_lw( XPTR( src_cxy , &src_pt1[ix1] ) ); 783 784 // do nothing if src_pte1 not MAPPED or not SMALL 785 if( (src_pte1 & TSAR_MMU_MAPPED) && (src_pte1 & TSAR_MMU_SMALL) ) 786 { 787 // get dst_pt1 entry 788 dst_pte1 = dst_pt1[ix1]; 789 790 // map dst_pte1 if required 791 if( (dst_pte1 & TSAR_MMU_MAPPED) == 0 ) 792 { 793 // allocate one physical page for a new PT2 794 req.type = KMEM_PAGE; 795 req.size = 0; // 1 small page 796 req.flags = AF_KERNEL | AF_ZERO; 797 page = (page_t *)kmem_alloc( &req ); 798 799 if( page == NULL ) 800 { 801 printk("\n[ERROR] in %s : cannot allocate PT2\n", __FUNCTION__ ); 802 return -1; 803 } 804 805 // build extended pointer on page descriptor 806 page_xp = XPTR( local_cxy , page ); 807 808 // get PPN for this new PT2 809 dst_pt2_ppn = (ppn_t)ppm_page2ppn( page_xp ); 810 811 // build the new dst_pte1 812 dst_pte1 = TSAR_MMU_MAPPED | TSAR_MMU_SMALL | dst_pt2_ppn; 813 814 // register it in DST_GPT 815 dst_pt1[ix1] = dst_pte1; 816 } 817 818 // get pointer on src_pt2 819 src_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( src_pte1 ); 820 src_pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( src_pt2_ppn ) ); 821 822 // get pointer on dst_pt2 823 dst_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( dst_pte1 ); 824 dst_pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( dst_pt2_ppn ) ); 825 826 // get attr and ppn from SRC_PT2 827 src_pte2_attr = hal_remote_lw( XPTR( src_cxy , &src_pt2[2 * ix2] ) ); 828 src_pte2_ppn = hal_remote_lw( XPTR( src_cxy , &src_pt2[2 * ix2 + 1] ) ); 829 830 // do nothing if src_pte2 not MAPPED 831 if( (src_pte2_attr & TSAR_MMU_MAPPED) != 0 ) 787 832 { 788 assert( (src_pte1 & TSAR_MMU_SMALL) , __FUNCTION__ , 789 "no BIG page for user process in TSAR architecture\n" ); 790 791 // get DST_PT1 entry 792 dst_pte1 = dst_pt1[ix1]; 793 794 // map dst_pte1 if required 795 if( (dst_pte1 & TSAR_MMU_MAPPED) == 0 ) 796 { 797 // allocate one physical page for a new DST_PT2 798 req.type = KMEM_PAGE; 799 req.size = 0; // 1 small page 800 req.flags = AF_KERNEL | AF_ZERO; 801 page = (page_t *)kmem_alloc( &req ); 802 803 if( page == NULL ) 804 { 805 printk("\n[ERROR] in %s : cannot allocate PT2\n", __FUNCTION__ ); 806 return ENOMEM; 807 } 808 809 // build extended pointer on page descriptor 810 page_xp = XPTR( local_cxy , page ); 811 812 // get PPN for this new DST_PT2 813 dst_pt2_ppn = (ppn_t)ppm_page2ppn( page_xp ); 814 815 // build the new dst_pte1 816 dst_pte1 = TSAR_MMU_MAPPED | TSAR_MMU_SMALL | dst_pt2_ppn; 817 818 // register it in DST_GPT 819 dst_pt1[ix1] = dst_pte1; 833 // set PPN in DST PTE2 834 dst_pt2[2*ix2+1] = src_pte2_ppn; 835 836 // set attributes in DST PTE2 837 if( cow && (src_pte2_attr & TSAR_MMU_WRITABLE) ) 838 { 839 dst_pt2[2*ix2] = (src_pte2_attr | TSAR_MMU_COW) & (~TSAR_MMU_WRITABLE); 840 } 841 else 842 { 843 dst_pt2[2*ix2] = src_pte2_attr; 820 844 } 821 845 822 // get PPN and pointer on SRC_PT2 823 src_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( src_pte1 ); 824 src_pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( src_pt2_ppn ) ); 825 826 // get PPN and pointer on DST_PT2 827 dst_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( dst_pte1 ); 828 dst_pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( dst_pt2_ppn ) ); 829 830 // get attr and ppn from SRC_PT2 831 pte2_attr = TSAR_MMU_ATTR_FROM_PTE2( src_pt2[2 * ix2] ); 832 pte2_ppn = TSAR_MMU_PPN_FROM_PTE2( src_pt2[2 * ix2 + 1] ); 833 834 // no copy if SRC_PTE2 unmapped 835 if( (pte2_attr & TSAR_MMU_MAPPED) != 0 ) // valid PTE2 in SRC_GPT 836 { 837 // set a new PTE2 in DST_PT2 838 dst_pt2[2*ix2] = pte2_attr; 839 dst_pt2[2*ix2 + 1] = pte2_ppn; 840 841 // FIXME increment page descriptor refcount for the referenced page 842 843 // handle Copy-On-Write 844 if( cow && (pte2_attr & TSAR_MMU_WRITABLE) ) 845 { 846 // reset WRITABLE flag in DST_GPT 847 hal_atomic_and( &dst_pt2[2*ix2] , ~TSAR_MMU_WRITABLE ); 848 849 // set COW flag in DST_GPT 850 hal_atomic_or( &dst_pt2[2*ix2] , TSAR_MMU_COW ); 851 } 852 } 853 } // end if PTE1 mapped 854 } // end loop on vpn 846 // return "successfully copied" 847 *mapped = true; 848 *ppn = src_pte2_ppn; 849 850 gpt_dmsg("\n[DBG] %s : core[%x,%d] exit for vpn %x / copy done\n", 851 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 852 853 hal_fence(); 854 855 return 0; 856 } // end if PTE2 mapped 857 } // end if PTE1 mapped 858 859 // return "nothing done" 860 *mapped = false; 861 *ppn = 0; 862 863 gpt_dmsg("\n[DBG] %s : core[%x,%d] exit for vpn %x / nothing done\n", 864 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 855 865 856 866 hal_fence(); 857 867 858 gpt_dmsg("\n[DBG] %s : core[%x,%d] exit\n",859 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid );860 861 868 return 0; 862 869 863 } // end hal_gpt_copy() 870 } // end hal_gpt_pte_copy() 871 872 ////////////////////////////////////////// 873 bool_t hal_gpt_pte_is_mapped( gpt_t * gpt, 874 vpn_t vpn ) 875 { 876 uint32_t * pt1; 877 uint32_t pte1; 878 uint32_t pte2_attr; 879 880 uint32_t * pt2; 881 ppn_t pt2_ppn; 882 883 uint32_t ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 884 uint32_t ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 885 886 // get PTE1 value 887 pt1 = gpt->ptr; 888 pte1 = pt1[ix1]; 889 890 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) return false; 891 892 if( (pte1 & TSAR_MMU_SMALL) == 0 ) return false; 893 894 // compute PT2 base address 895 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 896 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 897 898 // get pte2_attr 899 pte2_attr = pt2[2*ix2]; 900 901 if( (pte2_attr & TSAR_MMU_MAPPED) == 0 ) return false; 902 else return true; 903 904 } // end hal_gpt_pte_is_mapped() 864 905 865 906 /////////////////////////////////////// … … 869 910 uint32_t * pt1; 870 911 uint32_t pte1; 912 uint32_t pte2_attr; 871 913 872 914 uint32_t * pt2; … … 880 922 pte1 = pt1[ix1]; 881 923 882 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // PT1 entry not mapped 883 { 884 return false; 885 } 886 887 if( (pte1 & TSAR_MMU_SMALL) == 0 ) // it's a PTE1 888 { 889 return false; 890 } 891 else // it's a PTD1 892 { 893 // compute PT2 base address 894 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 895 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 896 897 if( pt2[2*ix2] & TSAR_MMU_COW ) return true; 898 else return false; 899 } 924 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) return false; 925 926 if( (pte1 & TSAR_MMU_SMALL) == 0 ) return false; 927 928 // compute PT2 base address 929 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 930 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 931 932 // get pte2_attr 933 pte2_attr = pt2[2*ix2]; 934 935 if( (CURRENT_THREAD == 0xe0000) && (hal_time_stamp() > 5380000) ) 936 printk("\n@@@ %s : vpn = %X / attr = %X\n", __FUNCTION__ , vpn , tsar2gpt( pte2_attr ) ); 937 938 if( (pte2_attr & TSAR_MMU_MAPPED) == 0 ) return false; 939 940 if( (pte2_attr & TSAR_MMU_COW) == 0 ) return false; 941 else return true; 942 900 943 } // end hal_gpt_pte_is_cow() 901 944 902 903 904 905 906 907 908 909 910 911 912 913 /* deprecated : old hal_gpt_copy [AG] 914 915 // scan the SRC_PT1 916 for( ix1 = 0 ; ix1 < 2048 ; ix1++ ) 917 { 918 pte1 = src_pt1[ix1]; 919 if( (pte1 & TSAR_MMU_MAPPED) != 0 ) 945 ///////////////////////////////////////// 946 void hal_gpt_flip_cow( bool_t set_cow, 947 xptr_t gpt_xp, 948 vpn_t vpn_base, 949 vpn_t vpn_size ) 950 { 951 cxy_t gpt_cxy; 952 gpt_t * gpt_ptr; 953 954 vpn_t vpn; 955 956 uint32_t ix1; 957 uint32_t ix2; 958 959 uint32_t * pt1; 960 uint32_t pte1; 961 962 uint32_t * pt2; 963 ppn_t pt2_ppn; 964 965 uint32_t old_attr; 966 uint32_t new_attr; 967 968 // get GPT cluster and local pointer 969 gpt_cxy = GET_CXY( gpt_xp ); 970 gpt_ptr = (gpt_t *)GET_PTR( gpt_xp ); 971 972 // get local PT1 pointer 973 pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 974 975 // loop on pages 976 for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) 977 { 978 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 979 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 980 981 // get PTE1 value 982 pte1 = hal_remote_lw( XPTR( gpt_cxy , &pt1[ix1] ) ); 983 984 // only MAPPED & SMALL PTEs are modified 985 if( (pte1 & TSAR_MMU_MAPPED) && (pte1 & TSAR_MMU_SMALL) ) 920 986 { 921 if( (pte1 & TSAR_MMU_SMALL) == 0 ) // PTE1 => big kernel page 987 // compute PT2 base address 988 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 989 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 990 991 assert( (GET_CXY( ppm_ppn2base( pt2_ppn ) ) == gpt_cxy ), __FUNCTION__, 992 "PT2 and PT1 must be in the same cluster\n"); 993 994 // get current PTE2 attributes 995 old_attr = hal_remote_lw( XPTR( gpt_cxy , &pt2[2*ix2] ) ); 996 997 // only MAPPED PTEs are modified 998 if( old_attr & TSAR_MMU_MAPPED ) 922 999 { 923 // big kernel pages are shared by all processes => copy it 924 dst_pt1[ix1] = pte1; 925 } 926 else // PTD1 => smal pages 927 { 928 // allocate one physical page for a PT2 in DST_GPT 929 kmem_req_t req; 930 req.type = KMEM_PAGE; 931 req.size = 0; // 1 small page 932 req.flags = AF_KERNEL | AF_ZERO; 933 page = (page_t *)kmem_alloc( &req ); 934 935 if( page == NULL ) 936 { 937 // TODO release all memory allocated to DST_GPT 938 printk("\n[ERROR] in %s : cannot allocate PT2\n", __FUNCTION__ ); 939 return ENOMEM; 1000 if( (set_cow != 0) && (old_attr & TSAR_MMU_WRITABLE) ) 1001 { 1002 new_attr = (old_attr | TSAR_MMU_COW) & (~TSAR_MMU_WRITABLE); 1003 hal_remote_sw( XPTR( gpt_cxy , &pt2[2*ix2] ) , new_attr ); 940 1004 } 941 942 // get extended pointer on page descriptor 943 page_xp = XPTR( local_cxy , page ); 944 945 // get pointer on new PT2 in DST_GPT 946 xptr_t base_xp = ppm_page2base( page_xp ); 947 dst_pt2 = (uint32_t *)GET_PTR( base_xp ); 948 949 // set a new PTD1 in DST_GPT 950 dst_pt2_ppn = (ppn_t)ppm_page2ppn( page_xp ); 951 dst_pt1[ix1] = TSAR_MMU_MAPPED | TSAR_MMU_SMALL | dst_pt2_ppn; 952 953 // get pointer on PT2 in SRC_GPT 954 src_pt2_ppn = (ppn_t)TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 955 src_pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( src_pt2_ppn ) ); 956 957 // scan the SRC_PT2 958 for( ix2 = 0 ; ix2 < 512 ; ix2++ ) 959 { 960 // get attr & ppn from PTE2 961 pte2_attr = TSAR_MMU_ATTR_FROM_PTE2( src_pt2[2 * ix2] ); 962 963 if( (pte2_attr & TSAR_MMU_MAPPED) != 0 ) // valid PTE2 in SRC_GPT 964 { 965 // get GPT_WRITABLE & PPN 966 pte2_writable = pte2_attr & GPT_WRITABLE; 967 pte2_ppn = TSAR_MMU_PPN_FROM_PTE2( src_pt2[2 * ix2 + 1] ); 968 969 // set a new PTE2 in DST_GPT 970 dst_pt2[2*ix2] = pte2_attr; 971 dst_pt2[2*ix2 + 1] = pte2_ppn; 972 973 // handle Copy-On-Write 974 if( cow && pte2_writable ) 975 { 976 // reset GPT_WRITABLE in both SRC_GPT and DST_GPT 977 hal_atomic_and( &dst_pt2[2*ix2] , ~GPT_WRITABLE ); 978 hal_atomic_and( &src_pt2[2*ix2] , ~GPT_WRITABLE ); 979 980 // register PG_COW in page descriptor 981 page = (page_t *)GET_PTR( ppm_ppn2page( pte2_ppn ) ); 982 hal_atomic_or( &page->flags , PG_COW ); 983 hal_atomic_add( &page->fork_nr , 1 ); 984 } 985 } 986 } // end loop on ix2 987 } 988 } 989 } // end loop ix1 990 991 hal_fence(); 992 993 return 0; 994 995 } // end hal_gpt_copy() 996 997 */ 1005 if( (set_cow == 0) && (old_attr & TSAR_MMU_COW ) ) 1006 { 1007 new_attr = (old_attr | TSAR_MMU_WRITABLE) & (~TSAR_MMU_COW); 1008 hal_remote_sw( XPTR( gpt_cxy , &pt2[2*ix2] ) , new_attr ); 1009 } 1010 } // end if PTE2 mapped 1011 } // end if PTE1 mapped 1012 } // end loop on pages 1013 1014 } // end hal_gpt_flip_cow() 1015 1016 ////////////////////////////////////////// 1017 void hal_gpt_update_pte( xptr_t gpt_xp, 1018 vpn_t vpn, 1019 uint32_t attr, // generic GPT attributes 1020 ppn_t ppn ) 1021 { 1022 uint32_t * pt1; // PT1 base addres 1023 uint32_t pte1; // PT1 entry value 1024 1025 ppn_t pt2_ppn; // PPN of PT2 1026 uint32_t * pt2; // PT2 base address 1027 1028 uint32_t ix1; // index in PT1 1029 uint32_t ix2; // index in PT2 1030 1031 uint32_t tsar_attr; // PTE attributes for TSAR MMU 1032 1033 // check attr argument MAPPED and SMALL 1034 if( (attr & GPT_MAPPED) == 0 ) return; 1035 if( (attr & GPT_SMALL ) == 0 ) return; 1036 1037 // get cluster and local pointer on remote GPT 1038 cxy_t gpt_cxy = GET_CXY( gpt_xp ); 1039 gpt_t * gpt_ptr = (gpt_t *)GET_PTR( gpt_xp ); 1040 1041 // compute indexes in PT1 and PT2 1042 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); 1043 ix2 = TSAR_MMU_IX2_FROM_VPN( vpn ); 1044 1045 // get PT1 base 1046 pt1 = (uint32_t *)hal_remote_lpt( XPTR( gpt_cxy , &gpt_ptr->ptr ) ); 1047 1048 // compute tsar_attr from generic attributes 1049 tsar_attr = gpt2tsar( attr ); 1050 1051 // get PTE1 value 1052 pte1 = hal_remote_lw( XPTR( gpt_cxy , &pt1[ix1] ) ); 1053 1054 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) return; 1055 if( (pte1 & TSAR_MMU_SMALL ) == 0 ) return; 1056 1057 // get PT2 base from PTE1 1058 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 1059 pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 1060 1061 // reset PTE2 1062 hal_remote_sw( XPTR( gpt_cxy, &pt2[2 * ix2] ) , 0 ); 1063 hal_fence(); 1064 1065 // set PTE2 in this order 1066 hal_remote_sw( XPTR( gpt_cxy, &pt2[2 * ix2 + 1] ) , ppn ); 1067 hal_fence(); 1068 hal_remote_sw( XPTR( gpt_cxy, &pt2[2 * ix2] ) , tsar_attr ); 1069 hal_fence(); 1070 1071 } // end hal_gpt_update_pte() 1072 -
trunk/hal/tsar_mips32/core/hal_interrupt.c
r407 r408 29 29 #include <soclib_pic.h> 30 30 31 /////////////////////////////////////// 32 void hal_do_interrupt( thread_t * this, 33 reg_t * regs_tbl ) 31 /////////////////////// 32 void hal_do_interrupt() 34 33 { 35 34 -
trunk/hal/tsar_mips32/core/hal_kentry.S
r407 r408 40 40 #define UZ_T6 14 41 41 #define UZ_T7 15 42 #define UZ_T8 16 43 #define UZ_T9 17 44 #define UZ_S0 18 45 #define UZ_S1 19 46 #define UZ_S2 20 47 #define UZ_S3 21 48 #define UZ_S4 22 49 #define UZ_S5 23 50 #define UZ_S6 24 51 #define UZ_S7 25 52 #define UZ_S8 26 53 #define UZ_GP 27 54 #define UZ_RA 28 55 #define UZ_EPC 29 56 #define UZ_CR 30 57 #define UZ_SP 31 58 #define UZ_SR 32 59 #define UZ_LO 33 60 #define UZ_HI 34 61 62 #define UZ_REGS 35 42 #define UZ_S0 16 43 #define UZ_S1 17 44 #define UZ_S2 18 45 #define UZ_S3 19 46 #define UZ_S4 20 47 #define UZ_S5 21 48 #define UZ_S6 22 49 #define UZ_S7 23 50 #define UZ_T8 24 51 #define UZ_T9 25 52 53 #define UZ_LO 26 54 #define UZ_HI 27 55 56 #define UZ_GP 28 57 #define UZ_SP 29 58 #define UZ_S8 30 59 #define UZ_RA 31 60 #define UZ_PTPR 32 61 #define UZ_EPC 33 62 #define UZ_SR 34 63 #define UZ_TH 35 64 #define UZ_CR 36 65 66 #define UZ_REGS 37 63 67 64 68 #include <kernel_config.h> … … 69 73 .extern hal_do_exception 70 74 .extern hal_do_syscall 71 .extern cluster_core_kernel_enter 72 .extern cluster_core_kentry_exit 75 .extern puts 76 .extern putx 77 .extern putl 73 78 74 79 .org 0x180 … … 80 85 .set noreorder 81 86 82 #--------------------------------------------------------------------------------- 87 #------------------------------------------------------------------------------------ 83 88 # Kernel Entry point for Interrupt / Exception / Syscall 84 89 # The c2_dext and c2_iext CP2 registers must have been previously set 85 90 # to "local_cxy", because the kernel run with MMU desactivated. 86 #--------------------------------------------------------------------------------- 91 #------------------------------------------------------------------------------------ 87 92 88 93 hal_kentry_enter: … … 93 98 ori $27, $0, 0x3 # $27 <= code for MMU OFF 94 99 95 #------------------------------------------------------------------------------------ ---100 #------------------------------------------------------------------------------------ 96 101 # This code is executed when the core is in user mode: 97 102 # - save current c2_mode in $26. … … 106 111 move $27, $29 # $27 <= user stack pointer 107 112 mfc0 $29, $4, 2 # get pointer on thread descriptor from c0_th 108 addi $29, $29, 109 addi $29, $29, -8# $29 <= kernel stack pointer113 addi $29, $29, CONFIG_THREAD_DESC_SIZE 114 addi $29, $29, -8 # $29 <= kernel stack pointer 110 115 j unified_mode 111 116 nop 112 117 113 #------------------------------------------------------------------------------------ ---118 #------------------------------------------------------------------------------------ 114 119 # This code is executed when the core is already in kernel mode: 115 120 # - save current c2_mode in $26. … … 123 128 move $27, $29 # $27 <= current kernel stack pointer 124 129 125 #------------------------------------------------------------------------------------ ---130 #------------------------------------------------------------------------------------ 126 131 # This code is executed in both modes (user or kernel): 127 132 # The assumptions are: … … 131 136 # - $29 contains the kernel stack pointer. 132 137 # We execute the following actions: 133 # - allocate an uzone in kernel stack, incrementing $29134 # - save relevant registers to uzone.138 # - allocate an uzone in kernel stack, decrementing $29. 139 # - save relevant GPR, CP0 and CP2 registers to uzone. 135 140 # - set the SR in kernel mode: IRQ disabled, clear exl. 136 # - signal the kernel entry.137 141 138 142 unified_mode: … … 166 170 sw $25, (UZ_T9*4)($29) 167 171 168 sw $26, (UZ_MODE*4)($29) # save c2_mode 169 sw $27, (UZ_SP*4)($29) # save sp 170 171 sw $28, (UZ_GP*4)($29) 172 sw $30, (UZ_S8*4)($29) 173 sw $31, (UZ_RA*4)($29) 174 175 mfc0 $16, $14 176 sw $16, (UZ_EPC*4)($29) # save c0_epc 177 mflo $14 178 sw $14, (UZ_LO*4)($29) # save lo 179 mfhi $15 180 sw $15, (UZ_HI*4)($29) # save hi 181 mfc0 $18, $12 182 sw $18, (UZ_SR*4)($29) # save c0_sr 183 mfc0 $17, $13 184 sw $17, (UZ_CR*4)($29) # save c0_cr 185 mfc2 $26, $1 186 187 srl $3, $18, 5 188 sll $3, $3, 5 189 mtc0 $3, $12 # set new sr 190 191 #--------------------------------------------------------------------------------------- 192 # This code call the relevant Interrupt / Exception / Syscall handler, 172 mflo $1 173 sw $1, (UZ_LO*4)($29) # save lo 174 mflo $1 175 sw $1, (UZ_HI*4)($29) # save hi 176 177 sw $28, (UZ_GP*4)($29) # save gp 178 sw $27, (UZ_SP*4)($29) # save previous sp (can be usp or ksp) 179 sw $30, (UZ_S8*4)($29) # save s8 180 sw $31, (UZ_RA*4)($29) # save ra 181 182 mfc0 $1, $14 183 sw $1, (UZ_EPC*4)($29) # save c0_epc 184 mfc0 $1, $12 185 sw $1, (UZ_SR*4)($29) # save c0_sr 186 mfc0 $1, $4, 2 187 sw $1, (UZ_TH*4)($29) # save c0_th 188 mfc0 $1, $13 189 sw $1, (UZ_CR*4)($29) # save c0_cr 190 mfc2 $1, $0 191 sw $1, (UZ_PTPR*4)($29) # save c2_ptpr 192 193 sw $26, (UZ_MODE*4)($29) # save previous c2_mode (can be user or kernel) 194 195 mfc0 $3, $12 196 srl $3, $3, 5 197 sll $3, $3, 5 # reset 5 LSB bits 198 mtc0 $3, $12 # set new c0_sr 199 200 #if CONFIG_KENTRY_DEBUG 201 202 # display "enter" message 203 la $4, msg_enter 204 jal puts 205 nop 206 move $4, $29 207 jal putx 208 nop 209 la $4, msg_crlf 210 jal puts 211 nop 212 # display saved SP value 213 la $4, msg_sp 214 jal puts 215 nop 216 lw $4, (UZ_SP*4)($29) 217 jal putx 218 nop 219 la $4, msg_crlf 220 jal puts 221 nop 222 # display saved RA value 223 la $4, msg_ra 224 jal puts 225 nop 226 lw $4, (UZ_RA*4)($29) 227 jal putx 228 nop 229 la $4, msg_crlf 230 jal puts 231 nop 232 # display saved TH value 233 la $4, msg_th 234 jal puts 235 nop 236 lw $4, (UZ_TH*4)($29) 237 jal putx 238 nop 239 la $4, msg_crlf 240 jal puts 241 nop 242 # display saved EPC value 243 la $4, msg_epc 244 jal puts 245 nop 246 lw $4, (UZ_EPC*4)($29) 247 jal putx 248 nop 249 la $4, msg_crlf 250 jal puts 251 nop 252 # display saved MODE value 253 la $4, msg_mode 254 jal puts 255 nop 256 lw $4, (UZ_MODE*4)($29) 257 jal putx 258 nop 259 la $4, msg_crlf 260 jal puts 261 nop 262 # display saved V0 value 263 la $4, msg_v0 264 jal puts 265 nop 266 lw $4, (UZ_V0*4)($29) 267 jal putx 268 nop 269 la $4, msg_crlf 270 jal puts 271 nop 272 273 #endif 274 275 #------------------------------------------------------------------------------------ 276 # This code update the uzone field in thread descriptor, 277 # and call the relevant Interrupt / Exception / Syscall handler, 193 278 # depending on XCODE in CP0_CR. 194 279 # assumption: $29 contains the kernel stack pointer, that is the uzone base. 195 # The three handlers take the same two arguments: thread pointer and uzone pointer.196 # The uzone pointer is saved in $19 to be used by kentry_exit.197 280 198 281 mfc0 $17, $13 # $17 <= CR 199 282 andi $17, $17, 0x3F # $17 <= XCODE 200 283 201 mfc0 $4, $4, 2 # $4 <= thread pointer (first arg) 202 or $5, $0, $29 # $5 <= uzone pointer (second arg) 203 or $19, $0, $29 # $19 <= &uzone (for kentry_exit) 204 205 ori $8, $0, 0x20 # $8 <= cause syscall 284 mfc0 $4, $4, 2 # $4 <= pointer on thread desc 285 sw $29, 8($4) # update uzone pointer in thread desc 286 287 ori $8, $0, 0x20 206 288 beq $8, $17, cause_sys # go to syscall handler 207 289 nop … … 210 292 211 293 cause_excp: 212 la $1, hal_do_exception 213 jalr $1 # call exception handler 214 addiu $29, $29, -8 # hal_do_exception has 2 args 215 addiu $29, $29, 8 294 jal hal_do_exception # call exception handler 295 nop 216 296 j kentry_exit # jump to kentry_exit 217 297 nop 218 298 219 299 cause_sys: 220 la $1, hal_do_syscall 221 jalr $1 # call syscall handler 222 addiu $29, $29, -8 # hal_do_syscall has 2 args 223 addiu $29, $29, 8 300 jal hal_do_syscall # call syscall handler 301 nop 224 302 j kentry_exit # jump to kentry_exit 225 303 nop 226 304 227 305 cause_int: 228 la $1, hal_do_interrupt 229 jalr $1 # call interrupt handler 230 addiu $29, $29, -8 # hal_do_interrupt has 2 args 231 addiu $29, $29, 8 306 jal hal_do_interrupt # call interrupt handler 307 nop 232 308 233 309 # ----------------------------------------------------------------------------------- 234 310 # Kernel exit 235 # The pointer on uzone is supposed to be stored in $19311 # The pointer on uzone is supposed to be contained in $29 236 312 # ----------------------------------------------------------------------------------- 237 313 kentry_exit: 238 314 315 #if CONFIG_KENTRY_DEBUG 316 317 # display "exit" message 318 la $4, msg_exit 319 jal puts 320 nop 321 move $4, $29 322 jal putx 323 nop 324 la $4, msg_crlf 325 jal puts 326 nop 327 # display saved SP value 328 la $4, msg_sp 329 jal puts 330 nop 331 lw $4, (UZ_SP*4)($29) 332 jal putx 333 nop 334 la $4, msg_crlf 335 jal puts 336 nop 337 # display saved RA value 338 la $4, msg_ra 339 jal puts 340 nop 341 lw $4, (UZ_RA*4)($29) 342 jal putx 343 nop 344 la $4, msg_crlf 345 jal puts 346 nop 347 # display saved TH value 348 la $4, msg_th 349 jal puts 350 nop 351 lw $4, (UZ_TH*4)($29) 352 jal putx 353 nop 354 la $4, msg_crlf 355 jal puts 356 nop 357 # display saved EPC value 358 la $4, msg_epc 359 jal puts 360 nop 361 lw $4, (UZ_EPC*4)($29) 362 jal putx 363 nop 364 la $4, msg_crlf 365 jal puts 366 nop 367 # display saved MODE value 368 la $4, msg_mode 369 jal puts 370 nop 371 lw $4, (UZ_MODE*4)($29) 372 jal putx 373 nop 374 la $4, msg_crlf 375 jal puts 376 nop 377 # display saved V0 value 378 la $4, msg_v0 379 jal puts 380 nop 381 lw $4, (UZ_V0*4)($29) 382 jal putx 383 nop 384 la $4, msg_crlf 385 jal puts 386 nop 387 388 #endif 389 239 390 # restore registers from uzone 240 or $27, $0, $19 # $27 <= &uzone 241 242 lw $29, (UZ_SP*4)($27) # restore SP from uzone 243 lw $16, (UZ_EPC*4)($27) 244 mtc0 $16, $14 # restore EPC from uzone 245 lw $16, (UZ_HI*4)($27) 246 mthi $16 # restore HI from uzone 247 lw $16, (UZ_LO*4)($27) 248 mtlo $16 # restore LO from uzone 249 250 lw $17, (UZ_SR*4)($27) # get saved SR value from uzone 251 andi $17, $17, 0x1F # keep only the 5 LSB bits 252 mfc0 $26, $12 # get current SR value from CP0 253 or $26, $26, $17 # merge the two values 254 mtc0 $26, $12 # setup new SR to CP0 391 or $27, $0, $29 # $27 <= ksp (contains &uzone) 392 393 lw $1, (UZ_EPC*4)($27) 394 mtc0 $1, $14 # restore c0_epc from uzone 395 lw $1, (UZ_SR*4)($27) 396 mtc0 $1, $12 # restore c0_sr from uzone 397 398 lw $26, (UZ_HI*4)($27) 399 mthi $26 # restore hi from uzone 400 lw $26, (UZ_LO*4)($27) 401 mtlo $26 # restore lo from uzone 255 402 256 403 lw $1, (UZ_AT*4)($27) 257 258 404 lw $2, (UZ_V0*4)($27) 405 lw $3, (UZ_V1*4)($27) 259 406 lw $4, (UZ_A0*4)($27) 260 407 lw $5, (UZ_A1*4)($27) … … 279 426 lw $24, (UZ_T8*4)($27) 280 427 lw $25, (UZ_T9*4)($27) 281 lw $28, (UZ_GP*4)($27) 282 lw $30, (UZ_S8*4)($27) 283 lw $31, (UZ_RA*4)($27) 428 429 lw $28, (UZ_GP*4)($27) # restore gp_28 from uzone 430 lw $29, (UZ_SP*4)($27) # restore sp_29 from uzone 431 lw $30, (UZ_S8*4)($27) # restore s8_30 from uzone 432 lw $31, (UZ_RA*4)($27) # restore ra_31 from uzone 284 433 285 434 lw $26, (UZ_MODE*4)($27) … … 291 440 292 441 hal_kentry_eret: 293 nop 294 eret 442 eret # jump to EPC, reset EXL bit 295 443 296 444 .set reorder … … 298 446 299 447 #------------------------------------------------------------------------------------ 300 448 .section .kdata 449 450 msg_sp: 451 .align 2 452 .asciiz "- UZ_SP = " 453 msg_ra: 454 .align 2 455 .asciiz "- UZ_RA = " 456 msg_epc: 457 .align 2 458 .asciiz "- UZ_EPC = " 459 msg_th: 460 .align 2 461 .asciiz "- UZ_TH = " 462 msg_mode: 463 .align 2 464 .asciiz "- UZ_MODE = " 465 msg_v0: 466 .align 2 467 .asciiz "- UZ_V0 = " 468 msg_crlf: 469 .align 2 470 .asciiz "\n" 471 msg_enter: 472 .align 2 473 .asciiz "\nenter kernel : &uzone = " 474 msg_exit: 475 .align 2 476 .asciiz "\nexit kernel : &uzone = " 477 -
trunk/hal/tsar_mips32/core/hal_kentry.h
r407 r408 36 36 37 37 /**************************************************************************************** 38 * This structure defines the cpu_uzone for TSAR MIPS32, as well as the 39 * mnemonics used by the hal_kentry assembly code. 38 * This structure defines the cpu_uzone dynamically allocated in the kernel stack 39 * by the hal_kentry assembly code for the TSAR_MIPS32 architecture. 40 * WARNING : It is replicated in hal_kentry.S file. 40 41 ***************************************************************************************/ 41 42 … … 56 57 #define UZ_T6 14 /* t6_14 */ 57 58 #define UZ_T7 15 /* t7_15 */ 58 #define UZ_T8 16 /* t8_24 */ 59 #define UZ_T9 17 /* t9_25 */ 60 #define UZ_S0 18 /* s0_16 */ 61 #define UZ_S1 19 /* s1_17 */ 62 #define UZ_S2 20 /* s2_18 */ 63 #define UZ_S3 21 /* s3_19 */ 64 #define UZ_S4 22 /* s4_20 */ 65 #define UZ_S5 23 /* s5_21 */ 66 #define UZ_S6 24 /* s6_22 */ 67 #define UZ_S7 25 /* s7_23 */ 68 #define UZ_S8 26 /* s8_30 */ 69 #define UZ_GP 27 /* gp_28 */ 70 #define UZ_RA 28 /* ra_31 */ 71 #define UZ_EPC 29 /* c0_epc */ 72 #define UZ_CR 30 /* c0_cr */ 73 #define UZ_SP 31 /* sp_29 */ 74 #define UZ_SR 32 /* c0_sr */ 75 #define UZ_LO 33 76 #define UZ_HI 34 59 #define UZ_S0 16 /* s0_16 */ 60 #define UZ_S1 17 /* s1_17 */ 61 #define UZ_S2 18 /* s2_18 */ 62 #define UZ_S3 19 /* s3_19 */ 63 #define UZ_S4 20 /* s4_20 */ 64 #define UZ_S5 21 /* s5_21 */ 65 #define UZ_S6 22 /* s6_22 */ 66 #define UZ_S7 23 /* s7_23 */ 67 #define UZ_T8 24 /* t8_24 */ 68 #define UZ_T9 25 /* t9_25 */ 77 69 78 #define UZ_REGS 35 70 #define UZ_LO 26 71 #define UZ_HI 27 72 73 #define UZ_GP 28 /* gp_28 */ 74 #define UZ_SP 29 /* sp_29 */ 75 #define UZ_S8 30 /* s8_30 */ 76 #define UZ_RA 31 /* ra_31 */ 77 78 #define UZ_PTPR 32 /* c2_ptpr */ 79 #define UZ_EPC 33 /* c0_epc */ 80 #define UZ_SR 34 /* c0_sr */ 81 #define UZ_TH 35 /* c0_th */ 82 #define UZ_CR 36 /* c0_cr */ 83 84 #define UZ_REGS 37 79 85 80 86 /************************************************************************************* 81 87 * The hal_kentry_enter() function is the unique kernel entry point in case of 82 88 * exception, interrupt, or syscall for the TSAR_MIPS32 architecture. 89 * It can be executed by a core in user mode (in case of exception or syscall), 90 * or by a core already in kernel mode (in case of interrupt). 83 91 * 84 * When we enter the kernel, we test the status register: 85 * - If the core is in user mode, we desactivate the MMU, and we save 86 * the core context in the uzone of the calling thread descriptor. 87 * - If the core is already in kernel mode (in case of interrupt), 88 * we save the context in the kernel stack. 89 * - In both cases, we increment the cores_in_kernel variable, 90 * and we call the relevant exception/interrupt/syscall handler 92 * In both cases it allocates an "uzone" space in the kernel stack to save the 93 * CPU registers values, desactivates the MMU, and calls the relevant handler 94 * (exception/interrupt/syscall) 91 95 * 92 * When we exit the kernel after handler execution: 93 * - we restore the core context from the uzone and return to user space, 94 * calling the hal_kentry_eret() 96 * After handler execution, it restores the CPU context from the uzone and jumps 97 * to address contained in EPC calling hal_kentry_eret() 95 98 ************************************************************************************/ 96 99 void hal_kentry_enter(); … … 98 101 /************************************************************************************* 99 102 * The hal_kentry_eret() function contains only the assembly "eret" instruction, 100 * that andthe EXL bit in the c0_sr register, and jump to the address103 * that reset the EXL bit in the c0_sr register, and jump to the address 101 104 * contained in the c0_epc register. 102 105 * ************************************************************************************/ -
trunk/hal/tsar_mips32/core/hal_switch.S
r407 r408 37 37 hal_do_cpu_switch: 38 38 39 /* save old thread context */ 40 39 41 move $26, $4 /* $26 <= ctx_old */ 40 42 … … 49 51 sw $6, 6*4($26) 50 52 sw $7, 7*4($26) 51 52 53 sw $8, 8*4($26) 53 54 sw $9, 9*4($26) … … 58 59 sw $14, 14*4($26) 59 60 sw $15, 15*4($26) 60 61 61 sw $16, 16*4($26) 62 62 sw $17, 17*4($26) … … 67 67 sw $22, 22*4($26) 68 68 sw $23, 23*4($26) 69 70 69 sw $24, 24*4($26) 71 70 sw $25, 25*4($26) … … 91 90 sw $27, 33*4($26) /* save c2_mode to slot 33 */ 92 91 93 sync94 92 /* restore new thread context */ 93 95 94 move $26, $5 /* $26 <= ctx_new */ 96 95 … … 105 104 lw $6, 6*4($26) 106 105 lw $7, 7*4($26) 107 108 106 lw $8, 8*4($26) 109 107 lw $9, 9*4($26) … … 114 112 lw $14, 14*4($26) 115 113 lw $15, 15*4($26) 116 117 114 lw $16, 16*4($26) 118 115 lw $17, 17*4($26) … … 123 120 lw $22, 22*4($26) 124 121 lw $23, 23*4($26) 125 126 122 lw $24, 24*4($26) 127 123 lw $25, 25*4($26) … … 149 145 mtc0 $26, $12 /* restore c0_sr from slot 34 */ 150 146 147 sync 148 151 149 jr $31 /* return to caller */ 152 150 nop 153 151 154 152 #--------------------------------------------------------------------------------- 155 # The hal_do_cpu_save()function makes the following assumption s:153 # The hal_do_cpu_save()function makes the following assumption: 156 154 # - register $4 contains a pointer on the target thread context. 157 # - register $5 contains the target thread descriptor pointer.158 # - register $6 contains the offset to add to stack pointer.159 155 #--------------------------------------------------------------------------------- 160 156 hal_do_cpu_save: 161 157 162 move $26, $4 /* $26 <= context */ 163 164 move $27, $5 165 sw $27, 35*4($26) /* save child thread to slot 35 */ 166 167 add $27, $6, $29 168 sw $27, 29*4($26) /* save (sp_29 + offset) to slot 29 */ 169 170 mfc0 $27, $12 171 sw $27, 34*4($26) /* save c0_sr to slot 34 */ 172 173 mfc2 $27, $0 174 sw $27, 32*4($26) /* save c2_ptpr to slot 32 */ 175 176 mfc2 $27, $1 177 sw $27, 33*4($26) /* save c2_mode to slot 33 */ 178 179 mfc0 $27, $14 158 move $26, $4 /* $26 <= &context */ 159 160 mfc0 $27, $14 180 161 sw $27, 0*4($26) /* save c0_epc to slot 0 */ 181 162 182 163 sw $1, 1*4($26) 183 164 sw $2, 2*4($26) … … 187 168 sw $6, 6*4($26) 188 169 sw $7, 7*4($26) 189 190 170 sw $8, 8*4($26) 191 171 sw $9, 9*4($26) … … 196 176 sw $14, 14*4($26) 197 177 sw $15, 15*4($26) 198 199 178 sw $16, 16*4($26) 200 179 sw $17, 17*4($26) … … 205 184 sw $22, 22*4($26) 206 185 sw $23, 23*4($26) 207 208 186 sw $24, 24*4($26) 209 sw $25, 25*4($26) 210 187 sw $25, 25*4($26) 188 211 189 mfhi $27 212 sw $27, 26*4($26) /* save hi to slot 26 */ 213 190 sw $27, 26*4($26) /* save hi to slot 26 */ 214 191 mflo $27 215 sw $27, 27*4($26) /* save lo to slot 27 */ 216 217 sw $28, 28*4($26) /* save gp to slot 28 */ 218 219 sw $30, 30*4($26) /* save s8 to slot 30 */ 220 sw $31, 31*4($26) /* save ra to slot 31 */ 192 sw $27, 27*4($26) /* save lo to slot 27 */ 193 194 sw $28, 28*4($26) /* save gp to slot 28 */ 195 sw $29, 29*4($26) /* save sp to slot 29 */ 196 sw $30, 30*4($26) /* save s8 to slot 30 */ 197 sw $31, 31*4($26) /* save ra to slot 31 */ 198 199 mfc2 $27, $0 200 sw $27, 32*4($26) /* save c2_ptpr to slot 32 */ 201 mfc2 $27, $1 202 sw $27, 33*4($26) /* save c2_mode to slot 33 */ 203 mfc0 $27, $12 204 sw $27, 34*4($26) /* save c0_sr to slot 34 */ 205 mfc0 $27, $4, 2 206 sw $27, 35*4($26) /* save c0_th to slot 35 */ 221 207 222 208 sync -
trunk/hal/tsar_mips32/core/hal_syscall.c
r407 r408 30 30 31 31 32 ///////////////////////////////////// 33 void hal_do_syscall( thread_t * this, 34 reg_t * regs_tbl ) 32 ///////////////////// 33 void hal_do_syscall() 35 34 { 35 thread_t * this; 36 36 37 #if(CONFIG_SYSCALL_DEBUG & 0x1) 38 printk("\n[DBG] %s : core[%x,%d] enter at cycle %d\n", 39 __FUNCTION__ , local_cxy , this->core->lid , hal_time_stamp() ); 40 #endif 37 uint32_t * enter_uzone; 38 uint32_t * exit_uzone; 41 39 42 register reg_t arg0;43 register reg_t arg1;44 register reg_t arg2;45 register reg_t arg3;46 register reg_t service_num;47 register reg_t retval;40 uint32_t arg0; 41 uint32_t arg1; 42 uint32_t arg2; 43 uint32_t arg3; 44 uint32_t service_num; 45 uint32_t retval; 48 46 49 service_num = regs_tbl[UZ_V0]; 47 // get pointer on enter_thread uzone 48 this = CURRENT_THREAD; 49 enter_uzone = (uint32_t *)this->uzone; 50 50 51 arg0 = regs_tbl[UZ_A0]; 52 arg1 = regs_tbl[UZ_A1]; 53 arg2 = regs_tbl[UZ_A2]; 54 arg3 = regs_tbl[UZ_A3]; 51 // get syscall arguments from uzone 52 service_num = enter_uzone[UZ_V0]; 53 arg0 = enter_uzone[UZ_A0]; 54 arg1 = enter_uzone[UZ_A1]; 55 arg2 = enter_uzone[UZ_A2]; 56 arg3 = enter_uzone[UZ_A3]; 55 57 58 syscall_dmsg("\n[DBG] %s : core[%x,%d] enters for %s / &uzone %x / cycle %d \n", 59 __FUNCTION__, local_cxy, this->core->lid, 60 syscall_str(service_num) , enter_uzone , (uint32_t)hal_get_cycles() ); 61 56 62 // call architecture independant syscall handler 57 63 retval = do_syscall( this, … … 62 68 service_num ); 63 69 64 regs_tbl[UZ_V0] = retval; 65 regs_tbl[UZ_V1] = this->errno; 66 regs_tbl[UZ_EPC] += 4; 70 // get pointer on exit_thread uzone, 71 // exit_thread can be different from enter_thread 72 this = CURRENT_THREAD; 73 exit_uzone = (uint32_t *)this->uzone; 67 74 68 #if(CONFIG_SYSCALL_DEBUG & 0x1) 69 printk("\n[DBG] %s : core[%x,%d] exit at cycle %d\n", 70 __FUNCTION__ , local_cxy , this->core->lid , hal_time_stamp() ); 71 #endif 75 // set syscall return value to uzone 76 exit_uzone[UZ_V0] = retval; 77 78 // update EPC in uzone 79 exit_uzone[UZ_EPC] += 4; 80 81 syscall_dmsg("\n[DBG] %s : core[%x,%d] exit from %s / &uzone %x / cycle %d \n", 82 __FUNCTION__, local_cxy, this->core->lid, 83 syscall_str(service_num) , exit_uzone , (uint32_t)hal_get_cycles() ); 72 84 73 85 } -
trunk/hal/tsar_mips32/core/hal_types.h
r407 r408 134 134 * Address types and macros !!! hardware dependant !!! 135 135 *************************************************************************** 136 * An extended pointer a 64 bits integer, structured in two fields :136 * An extended pointer is a 64 bits integer, structured in two fields : 137 137 * - cxy : cluster identifier. 138 138 * - ptr : pointer in the virtual space of a single cluster. -
trunk/hal/tsar_mips32/drivers/soclib_bdv.c
r407 r408 133 133 { 134 134 thread_block( CURRENT_THREAD , THREAD_BLOCKED_DEV_ISR ); 135 sched_yield( );135 sched_yield("blocked on ISR"); 136 136 137 137 // the IO operation status is reported in the command by the ISR -
trunk/hal/tsar_mips32/drivers/soclib_dma.c
r407 r408 91 91 // Block and deschedule server thread 92 92 thread_block( CURRENT_THREAD , THREAD_BLOCKED_DEV_ISR ); 93 sched_yield( );93 sched_yield("blocked on ISR"); 94 94 95 95 } // soclib_dma_cmd() -
trunk/hal/tsar_mips32/drivers/soclib_hba.c
r407 r408 197 197 else // retry if asynchronous access. 198 198 { 199 sched_yield( );199 sched_yield( "blocked on ISR" ); 200 200 } 201 201 } … … 240 240 { 241 241 thread_block( CURRENT_THREAD , THREAD_BLOCKED_DEV_ISR ); 242 sched_yield( );242 sched_yield( "blocked on ISR" ); 243 243 } 244 244 -
trunk/hal/tsar_mips32/drivers/soclib_tty.c
r407 r408 113 113 // Block and deschedule server thread 114 114 thread_block( CURRENT_THREAD , THREAD_BLOCKED_DEV_ISR ); 115 sched_yield( );115 sched_yield("blocked on ISR"); 116 116 117 117 txt_dmsg("\n[DBG] %s : core[%x,%d] / DEV thread resume / cycle %d\n",
Note: See TracChangeset
for help on using the changeset viewer.