- Timestamp:
- Aug 29, 2017, 12:03:37 PM (7 years ago)
- Location:
- trunk/hal
- Files:
-
- 2 deleted
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/hal/generic/hal_gpt.h
r402 r406 99 99 **************************************************************************************** 100 100 * @ gpt : pointer on generic page table descriptor. 101 * @ pid : process identifier. 101 102 ***************************************************************************************/ 102 void hal_gpt_print( gpt_t * gpt); 103 void hal_gpt_print( gpt_t * gpt, 104 pid_t pid ); 103 105 104 106 /**************************************************************************************** -
trunk/hal/tsar_mips32/core/hal_context.c
r317 r406 33 33 #include <cluster.h> 34 34 #include <hal_context.h> 35 #include <hal_kentry.h> 35 36 36 37 ///////////////////////////////////////////////////////////////////////////////////////// … … 38 39 ///////////////////////////////////////////////////////////////////////////////////////// 39 40 40 #define SR_USR_MODE 0x FC1141 #define SR_USR_MODE_FPU 0x2000FC1 142 #define SR_SYS_MODE 0x FC0043 44 ///////////////////////////////////////////////////////////////////////////////////////// 45 // Th ese structuree defines the cpu_context anf fpu_context for TSAR MIPS32.41 #define SR_USR_MODE 0x0000FC13 42 #define SR_USR_MODE_FPU 0x2000FC13 43 #define SR_SYS_MODE 0x0000FC00 44 45 ///////////////////////////////////////////////////////////////////////////////////////// 46 // This structuree defines the cpu_context for TSAR MIPS32. 46 47 // These registers are saved/restored at each context switch. 47 // WARNING : update the hal_***_context_save() and hal_***_context_restore() 48 // functions when modifying this structure, and check the two 49 // CONFIG_CPU_CTX_SIZE & CONFIGFPU_CTX_SIZE configuration parameterss. 48 // WARNING : check the two CONFIG_CPU_CTX_SIZE & CONFIG_FPU_CTX_SIZE configuration 49 // parameterss when modifying this structure. 50 50 ///////////////////////////////////////////////////////////////////////////////////////// 51 51 … … 80 80 81 81 uint32_t t8_24; // slot 24 82 uint32_t t 8_25; // slot 2582 uint32_t t9_25; // slot 25 83 83 uint32_t hi_26; // slot 26 84 84 uint32_t lo_27; // slot 27 … … 107 107 108 108 109 110 109 ///////////////////////////////////////////////////////////////////////////////////////// 111 110 // CPU context access functions 112 111 ///////////////////////////////////////////////////////////////////////////////////////// 113 112 114 115 // ///////////////////////////////////////////////////////////////////////////////////////116 // Seven registers are initialised by this function:117 // GPR : sp_29 / fp_30 / ra_31118 // CP0 : c0_sr / c0_th 113 ///////////////////////////////////////////////////////////////////////////////////////// 114 // This function allocates and initializes the cpu_context stucture in thread descriptor. 115 // The following context slots are initialised by this function: 116 // GPR : a0_04 / sp_29 / fp_30 / ra_31 117 // CP0 : c0_sr / c0_th / c0_epc 119 118 // CP2 : c2_ptpr / c2_mode 120 119 ///////////////////////////////////////////////////////////////////////////////////////// … … 123 122 kmem_req_t req; 124 123 125 context_dmsg("\n[INFO] %s : enters for thread %x in process %x\n", 124 assert( (sizeof(hal_cpu_context_t) <= CONFIG_CPU_CTX_SIZE) , __FUNCTION__ , 125 "inconsistent CPU context size" ); 126 127 context_dmsg("\n[DMSG] %s : enters for thread %x in process %x\n", 126 128 __FUNCTION__ , thread->trdid , thread->process->pid ); 127 129 128 130 // allocate memory for cpu_context 129 131 req.type = KMEM_CPU_CTX; 130 req.size = sizeof(hal_cpu_context_t);131 132 req.flags = AF_KERNEL | AF_ZERO; 132 133 … … 158 159 159 160 // initialise context 161 context->a0_04 = (uint32_t)thread->entry_args; 160 162 context->sp_29 = sp_29; 161 context->fp_30 = sp_29; // TODO check this [AG] 162 context->ra_31 = (uint32_t)thread->entry_func; 163 context->fp_30 = sp_29; // TODO check this [AG] 164 context->ra_31 = (uint32_t)&hal_kentry_eret; 165 context->c0_epc = (uint32_t)thread->entry_func; 163 166 context->c0_sr = c0_sr; 164 167 context->c0_th = (uint32_t)thread; … … 166 169 context->c2_mode = c2_mode; 167 170 168 context_dmsg("\n[INFO] %s : exit for thread %x in process %x / ra = %x\n", 169 __FUNCTION__ , thread->trdid , thread->process->pid , context->ra_31 ); 170 171 context_dmsg("\n[DMSG] %s : exit for thread %x in process %x\n" 172 " - a0 = %x\n" 173 " - sp = %x\n" 174 " - fp = %x\n" 175 " - ra = %x\n" 176 " - sr = %x\n" 177 " - th = %x\n" 178 " - epc = %x\n" 179 " - ptpr = %x\n" 180 " - mode = %x\n", 181 __FUNCTION__ , thread->trdid , thread->process->pid, 182 context->a0_04, context->sp_29, context->fp_30, context->ra_31, 183 context->c0_sr, context->c0_th, context->c0_epc, 184 context->c2_ptpr, context->c2_mode ); 171 185 return 0; 186 172 187 } // end hal_cpu_context_create() 173 188 … … 177 192 hal_cpu_context_t * ctx = (hal_cpu_context_t *)thread->cpu_context; 178 193 179 printk("\n***** cpu_context for thread %x in cluster %x / ctx = %x\n"194 printk("\n***** CPU context for thread %x in process %x / cycle %d\n" 180 195 " gp_28 = %X sp_29 = %X ra_31 = %X\n" 181 196 " c0_sr = %X c0_epc = %X c0_th = %X\n" 182 197 " c2_ptpr = %X c2_mode = %X\n", 183 thread->trdid, local_cxy, ctx,198 thread->trdid, thread->process->pid, hal_time_stamp(), 184 199 ctx->gp_28 , ctx->sp_29 , ctx->ra_31, 185 200 ctx->c0_sr , ctx->c0_epc , ctx->c0_th, … … 188 203 } // end hal_context_display() 189 204 190 /*191 ////////////////////////////////////////////////////////////////////////////////////////192 // This static function makes the actual context switch.193 ////////////////////////////////////////////////////////////////////////////////////////194 static void hal_do_switch( hal_cpu_context_t * ctx_old,195 hal_cpu_context_t * ctx_new )196 {197 asm volatile(198 ".set noat \n"199 ".set noreorder \n"200 "move $26, %0 \n"201 202 "mfc0 $27, $14 \n"203 "sw $27, 0*4($26) \n"204 205 "sw $1, 1*4($26) \n"206 "sw $2, 2*4($26) \n"207 "sw $3, 3*4($26) \n"208 "sw $4, 4*4($26) \n"209 "sw $5, 5*4($26) \n"210 "sw $6, 6*4($26) \n"211 "sw $7, 7*4($26) \n"212 213 "sw $8, 8*4($26) \n"214 "sw $9, 9*4($26) \n"215 "sw $10, 10*4($26) \n"216 "sw $11, 11*4($26) \n"217 "sw $12, 12*4($26) \n"218 "sw $13, 13*4($26) \n"219 "sw $14, 14*4($26) \n"220 "sw $15, 15*4($26) \n"221 222 "sw $16, 16*4($26) \n"223 "sw $17, 17*4($26) \n"224 "sw $18, 18*4($26) \n"225 "sw $19, 19*4($26) \n"226 "sw $20, 20*4($26) \n"227 "sw $21, 21*4($26) \n"228 "sw $22, 22*4($26) \n"229 "sw $23, 23*4($26) \n"230 231 "sw $24, 24*4($26) \n"232 "sw $25, 25*4($26) \n"233 234 "mfhi $27 \n"235 "sw $27, 26*4($26) \n"236 "mflo $27 \n"237 "sw $27, 27*4($26) \n"238 239 "sw $28, 28*4($26) \n"240 "sw $29, 29*4($26) \n"241 "sw $30, 30*4($26) \n"242 "sw $31, 31*4($26) \n"243 244 "mfc2 $27, $0 \n"245 "sw $27, 32*4($26) \n"246 "mfc2 $27, $1 \n"247 "sw $27, 33*4($26) \n"248 249 "mfc0 $27, $12 \n"250 "sw $27, 34*4($26) \n"251 "mfc0 $27, $4, 2 \n"252 "sw $27, 35*4($26) \n"253 254 "sync \n"255 256 "move $26, %1 \n"257 258 "lw $27, 0*4($26) \n"259 "mtc0 $27, $14 \n"260 261 "lw $1, 1*4($26) \n"262 "lw $2, 2*4($26) \n"263 "lw $3, 3*4($26) \n"264 "lw $4, 4*4($26) \n"265 "lw $5, 5*4($26) \n"266 "lw $6, 6*4($26) \n"267 "lw $7, 7*4($26) \n"268 269 "lw $8, 8*4($26) \n"270 "lw $9, 9*4($26) \n"271 "lw $10, 10*4($26) \n"272 "lw $11, 11*4($26) \n"273 "lw $12, 12*4($26) \n"274 "lw $13, 13*4($26) \n"275 "lw $14, 14*4($26) \n"276 "lw $15, 15*4($26) \n"277 278 "lw $16, 16*4($26) \n"279 "lw $17, 17*4($26) \n"280 "lw $18, 18*4($26) \n"281 "lw $19, 19*4($26) \n"282 "lw $20, 20*4($26) \n"283 "lw $21, 21*4($26) \n"284 "lw $22, 22*4($26) \n"285 "lw $23, 23*4($26) \n"286 287 "lw $24, 24*4($26) \n"288 "lw $25, 25*4($26) \n"289 290 "lw $27, 26*4($26) \n"291 "mthi $27 \n"292 "lw $27, 27*4($26) \n"293 "mtlo $27 \n"294 295 "lw $28, 28*4($26) \n"296 "lw $29, 29*4($26) \n"297 "lw $30, 30*4($26) \n"298 "lw $31, 31*4($26) \n"299 300 "lw $27, 32*4($26) \n"301 "mtc2 $27, $0 \n"302 "lw $27, 33*4($26) \n"303 "mtc2 $27, $1 \n"304 305 "lw $27, 34*4($26) \n"306 "mtc0 $27, $12 \n"307 "lw $27, 35*4($26) \n"308 "mtc0 $27, $4, 2 \n"309 310 "jr $31 \n"311 312 ".set reorder \n"313 ".set at \n"314 : : "r"(ctx_old) , "r"(ctx_new) : "$26" , "$27" , "memory" );315 316 } // hal_context_switch()317 318 */319 320 205 ///////////////////////////////////////////////////////////////////////////////////////// 321 206 // These registers are saved/restored to/from CPU context defined by <ctx> argument. 322 207 // - GPR : all, but (zero, k0, k1), plus (hi, lo) 323 // - CP0 : c0_th , c0_sr 324 // - CP2 : c2_ptpr , C2_mode, C2_epc 325 ///////////////////////////////////////////////////////////////////////////////////////// 326 void hal_cpu_context_switch( thread_t * old, 327 thread_t * new ) 328 { 329 hal_cpu_context_t * ctx_old = old->cpu_context; 330 hal_cpu_context_t * ctx_new = new->cpu_context; 208 // - CP0 : c0_th , c0_sr , C0_epc 209 // - CP2 : c2_ptpr , C2_mode 210 ///////////////////////////////////////////////////////////////////////////////////////// 211 // old_thread : pointer on current thread descriptor 212 // new_thread : pointer on new thread descriptor 213 ///////////////////////////////////////////////////////////////////////////////////////// 214 void hal_cpu_context_switch( thread_t * old_thread, 215 thread_t * new_thread ) 216 { 217 hal_cpu_context_t * ctx_old = old_thread->cpu_context; 218 hal_cpu_context_t * ctx_new = new_thread->cpu_context; 331 219 332 220 #if CONFIG_CONTEXT_DEBUG 333 hal_cpu_context_display( old );334 hal_cpu_context_display( new );221 hal_cpu_context_display( old_thread ); 222 hal_cpu_context_display( new_thread ); 335 223 #endif 224 225 // reset loadable field in new thread descriptor 226 new_thread->flags &= ~THREAD_FLAG_LOADABLE; 336 227 337 228 hal_do_switch( ctx_old , ctx_new ); … … 377 268 378 269 379 380 381 382 270 /////////////////////////////////////////////////// 383 271 error_t hal_fpu_context_create( thread_t * thread ) … … 385 273 kmem_req_t req; 386 274 275 assert( (sizeof(hal_fpu_context_t) <= CONFIG_FPU_CTX_SIZE) , __FUNCTION__ , 276 "inconsistent FPU context size" ); 277 387 278 // allocate memory for uzone 388 279 req.type = KMEM_FPU_CTX; 389 req.size = sizeof(hal_fpu_context_t);390 280 req.flags = AF_KERNEL | AF_ZERO; 391 281 … … 407 297 // allocate memory for dst fpu_context 408 298 req.type = KMEM_FPU_CTX; 409 req.size = sizeof(hal_fpu_context_t);410 299 req.flags = AF_KERNEL | AF_ZERO; 411 300 … … 435 324 436 325 } // end hal_fpu_context_destroy() 437 438 /////////////////////////////////////////////////////////////////////////////////////////439 // These registers are initialised:440 // - GPR : sp_29 , fp_30 , a0441 // - CP0 : c0_sr , c0_epc , c0_th442 // - CP2 : C2_ptpr , c2_mode443 // TODO Quand cette fonction est-elle appelée? [AG]444 /////////////////////////////////////////////////////////////////////////////////////////445 void hal_cpu_context_load( thread_t * thread )446 {447 // get relevant values from thread context448 hal_cpu_context_t * ctx = (hal_cpu_context_t *)thread->cpu_context;449 uint32_t sp_29 = ctx->sp_29;450 uint32_t fp_30 = ctx->fp_30;451 uint32_t c0_th = ctx->c0_th;452 uint32_t c0_sr = ctx->c0_sr;453 uint32_t c2_ptpr = ctx->c2_ptpr;454 uint32_t c2_mode = ctx->c2_mode;455 456 // get pointer on entry function & argument from thread attributes457 uint32_t func = (uint32_t)thread->entry_func;458 uint32_t args = (uint32_t)thread->entry_args;459 460 // reset loadable field in thread descriptor461 thread->flags &= ~THREAD_FLAG_LOADABLE;462 463 // load registers464 asm volatile(465 ".set noreorder \n"466 "or $26, %0, $0 \n" /* $26 <= stack pointer */467 "or $27, %2, $0 \n" /* $27 <= status register */468 "addiu $26, $26, -4 \n" /* decrement stack pointer */469 "or $4, %7, $0 \n" /* load a0 */470 "sw $4, ($26) \n" /* set entry_args in stack */471 "ori $27, $27, 0x2 \n" /* set EXL flag in status register */472 "mtc0 $27, $12 \n" /* load c0_sr */473 "mtc0 %3, $4, 2 \n" /* load c0_th */474 "mtc2 %4, $0 \n" /* load c2 ptpr */475 "mtc0 %6, $14 \n" /* load c0_epc */476 "or $29, $16, $0 \n" /* load sp_29 */477 "or $30, %1, $0 \n" /* load fp_30 */478 "mtc2 %5, $1 \n" /* load c2_mode */479 "nop \n"480 "eret \n" /* jump to user code */481 "nop \n"482 ".set reorder \n"483 :484 : "r"(sp_29),"r"(fp_30),"r"(c0_sr),"r"(c0_th),485 "r"(c2_ptpr),"r"(c2_mode),"r"(func),"r"(args)486 : "$4","$26","$27","$29","$30" );487 488 } // end hal_cpu_context_load()489 490 326 491 327 ////////////////////////////////////////////// -
trunk/hal/tsar_mips32/core/hal_exception.c
r401 r406 24 24 #include <hal_types.h> 25 25 #include <hal_irqmask.h> 26 #include <hal_special.h> 26 27 #include <hal_exception.h> 27 28 #include <thread.h> … … 35 36 #include <syscalls.h> 36 37 #include <remote_spinlock.h> 37 #include < mips32_uzone.h>38 #include <hal_kentry.h> 38 39 39 40 … … 57 58 58 59 ////////////////////////////////////////////////////////////////////////////////////////// 59 // This enum defines the relevant subtypes for a MMU exceptionreported by the mips32.60 // This enum defines the mask valuesi for an MMU exception code reported by the mips32. 60 61 ////////////////////////////////////////////////////////////////////////////////////////// 61 62 62 63 typedef enum 63 64 { 64 MMU_EXCP_PAGE_UNMAPPED ,65 MMU_EXCP_USER_PRIVILEGE ,66 MMU_EXCP_USER_ EXEC,67 MMU_EXCP_USER_ WRITE,65 MMU_EXCP_PAGE_UNMAPPED = 0x0003, 66 MMU_EXCP_USER_PRIVILEGE = 0x0004, 67 MMU_EXCP_USER_WRITE = 0x0008, 68 MMU_EXCP_USER_EXEC = 0x1010, 68 69 } 69 70 mmu_exception_subtype_t; … … 86 87 87 88 ////////////////////////////////////////////////////////////////////////////////////////// 88 // This staticfunction is called when a FPU Coprocessor Unavailable exception has been89 // This function is called when a FPU Coprocessor Unavailable exception has been 89 90 // detected for the calling thread. 90 91 // It enables the FPU, It saves the current FPU context in the current owner thread … … 94 95 // @ return always EXCP_NON_FATAL 95 96 ////////////////////////////////////////////////////////////////////////////////////////// 96 staticerror_t hal_fpu_exception( thread_t * this )97 error_t hal_fpu_exception( thread_t * this ) 97 98 { 98 99 core_t * core = this->core; … … 119 120 120 121 ////////////////////////////////////////////////////////////////////////////////////////// 121 // This staticfunction is called when an MMU exception has been detected.122 // This function is called when an MMU exception has been detected. 122 123 // It get the relevant exception arguments from the MMU. 123 124 // It signal a fatal error in case of illegal access. In case of page unmapped … … 128 129 ////////////////////////////////////////////////////////////////////////////////////////// 129 130 // @ this : pointer on faulty thread descriptor. 131 // @ is_ins : IBE if true / DBE if false. 130 132 // @ return EXCP_NON_FATAL / EXCP_USER_ERROR / EXCP_KERNEL_PANIC 131 133 ////////////////////////////////////////////////////////////////////////////////////////// 132 static error_t hal_mmu_exception( thread_t * this ) 133 { 134 process_t * process; // local process descriptor 135 error_t error; // return value 136 137 reg_t mmu_ins_excp_code; 138 reg_t mmu_ins_bad_vaddr; 139 reg_t mmu_dat_excp_code; 140 reg_t mmu_dat_bad_vaddr; 141 142 intptr_t bad_vaddr; 134 error_t hal_mmu_exception( thread_t * this, 135 bool_t is_ins ) 136 { 137 process_t * process; 138 error_t error; 139 140 uint32_t mmu_ins_excp_code; 141 uint32_t mmu_ins_bad_vaddr; 142 uint32_t mmu_dat_excp_code; 143 uint32_t mmu_dat_bad_vaddr; 144 145 uint32_t bad_vaddr; 143 146 uint32_t excp_code; 144 147 145 process = this->process; 148 process = this->process; 149 150 excp_dmsg("\n[DMSG] %s : enter for thread %x in process %x / is_ins = %d\n", 151 __FUNCTION__ , this->trdid , process->pid , is_ins ); 146 152 147 153 // get relevant values from MMU … … 151 157 &mmu_dat_bad_vaddr ); 152 158 153 // get exception code and faulty vaddr 154 if( mmu_ins_excp_code ) 159 excp_dmsg("\n[DMSG] %s : icode = %x / ivaddr = %x / dcode = %x / dvaddr = %x\n", 160 __FUNCTION__ , mmu_ins_excp_code , mmu_ins_bad_vaddr , 161 mmu_dat_excp_code , mmu_dat_bad_vaddr ); 162 163 // get exception code and faulty vaddr, depending on IBE/DBE 164 if( is_ins ) 155 165 { 156 166 excp_code = mmu_ins_excp_code; 157 167 bad_vaddr = mmu_ins_bad_vaddr; 158 168 } 159 else if( mmu_dat_excp_code )169 else 160 170 { 161 171 excp_code = mmu_dat_excp_code; 162 172 bad_vaddr = mmu_dat_bad_vaddr; 163 173 } 164 else 165 { 166 return EXCP_NON_FATAL; 167 } 168 169 vmm_dmsg("\n[INFO] %s : enters for thread %x / process %x" 170 " / bad_vaddr = %x / excep_code = %x\n", 171 __FUNCTION__, this->trdid , process->pid , bad_vaddr , excp_code ); 172 173 // on TSAR, a kernel thread should not rise an MMU exception 174 assert( (this->type != THREAD_USER) , __FUNCTION__ , 175 "thread %x is a kernel thread / vaddr = %x\n", this->trdid , bad_vaddr ); 176 174 175 excp_dmsg("\n[DMSG] %s : excp_code = %x / bad_vaddr = %x\n", 176 __FUNCTION__ , excp_code , bad_vaddr ); 177 177 178 // analyse exception code 178 179 if( excp_code & MMU_EXCP_PAGE_UNMAPPED ) 179 180 { 181 excp_dmsg("\n[DMSG] %s : type PAGE_UNMAPPED\n", __FUNCTION__ ); 182 180 183 // enable IRQs before handling page fault 181 hal_enable_irq( NULL );184 // hal_enable_irq( NULL ); 182 185 183 186 // try to map the unmapped PTE … … 185 188 bad_vaddr >> CONFIG_PPM_PAGE_SHIFT ); // vpn 186 189 // disable IRQs 187 hal_disable_irq( NULL );190 // hal_disable_irq( NULL ); 188 191 189 192 if( error ) // not enough memory 190 193 { 191 printk("\n[ERROR] in %s for thread %x : cannot map legalvaddr = %x\n",194 printk("\n[ERROR] in %s for thread %x : cannot map vaddr = %x\n", 192 195 __FUNCTION__ , this->trdid , bad_vaddr ); 193 196 … … 196 199 else // page fault successfully handled 197 200 { 198 vmm_dmsg("\n[INFO] %s : page fault handled for vaddr = %x in thread%x\n",199 __FUNCTION__ , bad_vaddr , this->trdid);201 excp_dmsg("\n[DMSG] %s : page fault handled / bad_vaddr = %x / excp_code = %x\n", 202 __FUNCTION__ , bad_vaddr , excp_code ); 200 203 201 204 return EXCP_NON_FATAL; … … 223 226 return EXCP_USER_ERROR; 224 227 } 225 226 228 else // this is a kernel error => panic 227 229 { … … 309 311 excCode = (regs_tbl[UZ_CR] >> 2) & 0xF; 310 312 313 excp_dmsg("\n[DMSG] %s : enter for thread %x in process %x / xcode = %x / cycle %d\n", 314 __FUNCTION__ , this->trdid , this->process->pid , excCode , hal_time_stamp() ); 315 311 316 switch(excCode) 312 317 { 313 318 case XCODE_DBE: // can be non fatal 319 { 320 error = hal_mmu_exception( this , false ); // data MMU exception 321 break; 322 } 314 323 case XCODE_IBE: // can be non fatal 315 324 { 316 error = hal_mmu_exception( this ); 317 } 318 break; 319 325 error = hal_mmu_exception( this , true ); // ins MMU exception 326 break; 327 } 320 328 case XCODE_CPU: // can be non fatal 321 329 { … … 328 336 error = EXCP_USER_ERROR; 329 337 } 330 } 331 break; 332 338 break; 339 } 333 340 case XCODE_OVR: // user fatal error 334 341 case XCODE_RI: // user fatal error … … 337 344 { 338 345 error = EXCP_USER_ERROR; 339 } 340 break; 341 346 break; 347 } 342 348 default: 343 349 { … … 357 363 hal_core_sleep(); 358 364 } 365 366 excp_dmsg("\n[DMSG] %s : exit for thread %x in process %x / cycle %d\n", 367 __FUNCTION__ , this->trdid , this->process->pid , hal_time_stamp() ); 368 359 369 } // end hal_do_exception() 360 370 -
trunk/hal/tsar_mips32/core/hal_gpt.c
r401 r406 131 131 page_t * page; 132 132 xptr_t page_xp; 133 vpn_t vpn; 134 error_t error; 135 uint32_t attr; 136 137 gpt_dmsg("\n[DMSG] %s : core[%x,%d] enter\n", 138 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); 133 139 134 140 // check page size 135 if( CONFIG_PPM_PAGE_SIZE != 4096 ) 136 { 137 printk("\n[PANIC] in %s : For TSAR, the page must be 4 Kbytes\n", __FUNCTION__ ); 138 hal_core_sleep(); 139 } 141 assert( (CONFIG_PPM_PAGE_SIZE == 4096) , __FUNCTION__ , 142 "for TSAR, the page must be 4 Kbytes\n" ); 140 143 141 144 // allocates 2 physical pages for PT1 … … 146 149 page = (page_t *)kmem_alloc( &req ); 147 150 148 if( page == NULL ) 149 { 150 printk("\n[ERROR] in %s : cannot allocate physicalmemory for PT1\n", __FUNCTION__ );151 if( page == NULL ) 152 { 153 printk("\n[ERROR] in %s : cannot allocate memory for PT1\n", __FUNCTION__ ); 151 154 return ENOMEM; 152 155 } 153 156 154 157 // initialize generic page table descriptor … … 159 162 gpt->page = GET_PTR( page_xp ); 160 163 164 // identity map the kentry_vseg (must exist for all processes) 165 attr = GPT_MAPPED | GPT_SMALL | GPT_EXECUTABLE | GPT_CACHABLE | GPT_GLOBAL; 166 for( vpn = CONFIG_VMM_KENTRY_BASE; 167 vpn < (CONFIG_VMM_KENTRY_BASE + CONFIG_VMM_KENTRY_SIZE); vpn++ ) 168 { 169 gpt_dmsg("\n[DMSG] %s : identity map vpn %d\n", __FUNCTION__ , vpn ); 170 171 error = hal_gpt_set_pte( gpt, 172 vpn, 173 (local_cxy<<20) | (vpn & 0xFFFFF), 174 attr ); 175 176 if( error ) 177 { 178 printk("\n[ERROR] in %s : cannot identity map kentry vseg\n", __FUNCTION__ ); 179 return ENOMEM; 180 } 181 } 182 183 gpt_dmsg("\n[DMSG] %s : core[%x,%d] exit\n", 184 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); 185 161 186 return 0; 187 162 188 } // end hal_gpt_create() 163 189 … … 245 271 } // end hal_gpt_destroy() 246 272 247 ///////////////////////////////// 248 void hal_gpt_print( gpt_t * gpt ) 273 //////////////////////////////// 274 void hal_gpt_print( gpt_t * gpt, 275 pid_t pid ) 249 276 { 250 277 uint32_t ix1; … … 256 283 uint32_t pte2_attr; 257 284 ppn_t pte2_ppn; 258 259 printk("*** Page Table for process %x in cluster %x ***\n", 260 CURRENT_THREAD->process->pid , local_cxy ); 285 vpn_t vpn; 286 261 287 262 288 pt1 = (uint32_t *)gpt->ptr; 289 290 printk("\n***** Generic Page Table for process %x : &gpt = %x / &pt1 = %x\n\n", 291 pid , gpt , pt1 ); 263 292 264 293 // scan the PT1 … … 270 299 if( (pte1 & TSAR_MMU_SMALL) == 0 ) // BIG page 271 300 { 272 printk(" - BIG : pt1[%d] = %x\n", ix1 , pte1 ); 301 vpn = ix1 << 9; 302 printk(" - BIG : vpn = %x / pt1[%d] = %X\n", vpn , ix1 , pte1 ); 273 303 } 274 304 else // SMALL pages … … 283 313 pte2_attr = TSAR_MMU_ATTR_FROM_PTE2( pt2[2 * ix2] ); 284 314 pte2_ppn = TSAR_MMU_PPN_FROM_PTE2( pt2[2 * ix2 + 1] ); 315 285 316 if( (pte2_attr & TSAR_MMU_MAPPED) != 0 ) 286 317 { 287 printk(" - SMALL : pt1[%d] = %x / pt2[%d] / pt2[%d]\n", 288 ix1 , pt1[ix1] , 2*ix2 , pte2_attr , 2*ix2+1 , pte2_ppn ); 318 vpn = (ix1 << 9) | ix2; 319 printk(" - SMALL : vpn = %x / PT2[%d] = %x / pt2[%d] = %x\n", 320 vpn , 2*ix2 , pte2_attr , 2*ix2+1 , pte2_ppn ); 289 321 } 290 322 } … … 301 333 uint32_t attr ) // generic GPT attributes 302 334 { 303 uint32_t * pt1; // virtual base addres of PT1304 volatile uint32_t* pte1_ptr; // pointer on PT1 entry335 uint32_t * pt1; // PT1 base addres 336 uint32_t * pte1_ptr; // pointer on PT1 entry 305 337 uint32_t pte1; // PT1 entry value 306 338 307 339 ppn_t pt2_ppn; // PPN of PT2 308 uint32_t * pt2; // virtual base address of PT2340 uint32_t * pt2; // PT2 base address 309 341 310 342 uint32_t small; // requested PTE is for a small page 311 bool_t atomic; 343 bool_t success; // exit condition for while loop below 312 344 313 345 page_t * page; // pointer on new physical page descriptor … … 319 351 uint32_t tsar_attr; // PTE attributes for TSAR MMU 320 352 353 gpt_dmsg("\n[DMSG] %s : core[%x,%d] enter for vpn = %x / ppn = %x / gpt_attr = %x\n", 354 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , ppn , attr ); 355 321 356 // compute indexes in PT1 and PT2 322 357 ix1 = TSAR_MMU_IX1_FROM_VPN( vpn ); … … 329 364 tsar_attr = gpt2tsar( attr ); 330 365 331 // get PT1 entry value 366 gpt_dmsg("\n[DMSG] %s : core[%x,%d] / vpn = %x / &pt1 = %x / tsar_attr = %x\n", 367 __FUNCTION__, local_cxy , CURRENT_THREAD->core->lid , vpn , pt1 , tsar_attr ); 368 369 // get pointer on PT1[ix1] 332 370 pte1_ptr = &pt1[ix1]; 333 pte1 = *pte1_ptr; 334 335 // Big pages (PTE1) are only set for the kernel vsegs, in the kernel init phase. 371 372 // PTE1 (big page) are only set for the kernel vsegs, in the kernel init phase. 336 373 // There is no risk of concurrent access. 337 374 if( small == 0 ) 338 375 { 339 if( pte1 != 0 )340 { 341 panic("\n[PANIC] in %s : set a big page in a mapped PT1 entry / PT1[%d] = %x\n", 342 __FUNCTION__ , ix1 , pte1 );343 }376 // get current pte1 value 377 pte1 = *pte1_ptr; 378 379 assert( (pte1 == 0) , __FUNCTION__ , 380 "try to set a big page in a mapped PT1 entry / PT1[%d] = %x\n", ix1 , pte1 ); 344 381 345 382 // set the PTE1 … … 352 389 // From this point, the requested PTE is a PTE2 (small page) 353 390 354 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // the PT1 entry is not valid 355 { 356 // allocate one physical page for the PT2 357 kmem_req_t req; 358 req.type = KMEM_PAGE; 359 req.size = 0; // 1 small page 360 req.flags = AF_KERNEL | AF_ZERO; 361 page = (page_t *)kmem_alloc( &req ); 362 if( page == NULL ) 391 // loop to access PTE1 and get pointer on PT2 392 success = false; 393 do 394 { 395 // get current pte1 value 396 pte1 = *pte1_ptr; 397 398 gpt_dmsg("\n[DMSG] %s : core[%x,%d] / vpn = %x / current_pte1 = %x\n", 399 __FUNCTION__, local_cxy , CURRENT_THREAD->core->lid , vpn , pte1 ); 400 401 // allocate a PT2 if PT1 entry not valid 402 if( (pte1 & TSAR_MMU_MAPPED) == 0 ) // PT1 entry not valid 403 { 404 // allocate one physical page for the PT2 405 kmem_req_t req; 406 req.type = KMEM_PAGE; 407 req.size = 0; // 1 small page 408 req.flags = AF_KERNEL | AF_ZERO; 409 page = (page_t *)kmem_alloc( &req ); 410 if( page == NULL ) 411 { 412 printk("\n[ERROR] in %s : cannot allocate PT2\n", __FUNCTION__ ); 413 return ENOMEM; 414 } 415 416 // get the PT2 PPN 417 page_xp = XPTR( local_cxy , page ); 418 pt2_ppn = ppm_page2ppn( page_xp ); 419 420 // try to atomicaly set the PT1 entry 421 pte1 = TSAR_MMU_MAPPED | TSAR_MMU_SMALL | pt2_ppn; 422 success = hal_atomic_cas( pte1_ptr , 0 , pte1 ); 423 424 // release allocated PT2 if PT1 entry modified by another thread 425 if( success == false ) ppm_free_pages( page ); 426 } 427 else // PT1 entry is valid 363 428 { 364 printk("\n[ERROR] in %s : try to set a small page but cannot allocate PT2\n", 365 __FUNCTION__ ); 366 return ENOMEM; 429 // This valid entry must be a PTD1 430 assert( (pte1 & TSAR_MMU_SMALL) , __FUNCTION__ , 431 "try to set a small page in a big PT1 entry / PT1[%d] = %x\n", ix1 , pte1 ); 432 433 success = true; 367 434 } 368 435 369 page_xp = XPTR( local_cxy , page ); 370 pt2_ppn = ppm_page2ppn( page_xp ); 371 pt2 = (uint32_t *)GET_PTR( ppm_page2base( page_xp ) ); 372 373 // try to atomicaly set a PTD1 in the PT1 entry 374 do 375 { 376 atomic = hal_atomic_cas( (void*)pte1, 0 , 377 TSAR_MMU_MAPPED | TSAR_MMU_SMALL | pt2_ppn ); 378 } 379 while( (atomic == false) && (*pte1_ptr == 0) ); 380 381 if( atomic == false ) // the mapping has been done by another thread !!! 382 { 383 // release the allocated page 384 ppm_free_pages( page ); 385 386 // read PT1 entry again 387 pte1 = *pte1_ptr; 388 389 // compute PPN of PT2 base 390 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 391 392 // compute pointer on PT2 base 393 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 394 } 395 } 396 else // The PT1 entry is valid 397 { 398 // This valid entry must be a PTD1 399 if( (pte1 & TSAR_MMU_SMALL) == 0 ) 400 { 401 printk("\n[ERROR] in %s : set a small page in a big PT1 entry / PT1[%d] = %x\n", 402 __FUNCTION__ , ix1 , pte1 ); 403 return EINVAL; 404 } 405 406 // compute PPN of PT2 base 407 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 408 409 // compute pointer on PT2 base 410 pt2 = (uint32_t*)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 411 } 436 // get PT2 base from pte1 437 pt2_ppn = TSAR_MMU_PTBA_FROM_PTE1( pte1 ); 438 pt2 = (uint32_t *)GET_PTR( ppm_ppn2base( pt2_ppn ) ); 439 440 gpt_dmsg("\n[DMSG] %s : core[%x,%d] / vpn = %x / pte1 = %x / &pt2 = %x\n", 441 __FUNCTION__, local_cxy , CURRENT_THREAD->core->lid , vpn , pte1 , pt2 ); 442 443 } 444 while (success == false); 412 445 413 446 // set PTE2 in this order … … 417 450 hal_fence(); 418 451 452 gpt_dmsg("\n[DMSG] %s : core[%x,%d] exit / vpn = %x / pte2_attr = %x / pte2_ppn = %x\n", 453 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , 454 pt2[2 * ix2] , pt2[2 * ix2 + 1] ); 455 419 456 return 0; 420 457 421 458 } // end of hal_gpt_set_pte() 459 422 460 423 461 ///////////////////////////////////// -
trunk/hal/tsar_mips32/core/hal_interrupt.c
r337 r406 27 27 #include <thread.h> 28 28 #include <printk.h> 29 #include <do_interrupt.h>30 #include <hal_interrupt.h>31 29 #include <soclib_pic.h> 32 30 … … 35 33 reg_t * regs_tbl ) 36 34 { 37 irq_dmsg("\n[ INFO] %s : enter / core[%x,%d] / cycle %d\n",35 irq_dmsg("\n[DMSG] %s : enter / core[%x,%d] / cycle %d\n", 38 36 __FUNCTION__ , local_cxy , this->core->lid , hal_time_stamp() ); 39 40 // update user time41 thread_user_time_update( this );42 37 43 38 // access local ICU to call the relevant ISR 44 39 soclib_pic_irq_handler(); 45 40 46 // update kernel time 47 thread_kernel_time_update( this ); 48 49 irq_dmsg("\n[INFO] %s : exit / core[%x,%d] / cycle %d\n", 41 irq_dmsg("\n[DMSG] %s : exit / core[%x,%d] / cycle %d\n", 50 42 __FUNCTION__ , local_cxy , this->core->lid , hal_time_stamp() ); 51 43 } -
trunk/hal/tsar_mips32/core/hal_kentry.S
r296 r406 24 24 */ 25 25 26 #include <mips32_uzone.h> 27 28 #--------------------------------------------------------------------------------- 29 # This code is the unique kernel entry point in case of exception, interrupt, 30 # or syscall for the TSAR_MIPS32 architecture. 31 # 32 # When we enter the kernel, we test the status register: 33 # - If the core is in user mode, we desactivate the MMU, and we save 34 # the core context in the uzone of the calling thread descriptor. 35 # - If the core is already in kernel mode (in case of interrupt), 36 # we save the context in the kernel stack. 37 # - In both cases, we increment the cores_in_kernel variable, 38 # and we call the relevant exception/interrupt/syscall handler 39 # 40 # When we exit the kernel after handler execution: 41 # - we restore the core context from the uzone 42 #--------------------------------------------------------------------------------- 26 #define UZ_MODE 0 27 #define UZ_AT 1 28 #define UZ_V0 2 29 #define UZ_V1 3 30 #define UZ_A0 4 31 #define UZ_A1 5 32 #define UZ_A2 6 33 #define UZ_A3 7 34 #define UZ_T0 8 35 #define UZ_T1 9 36 #define UZ_T2 10 37 #define UZ_T3 11 38 #define UZ_T4 12 39 #define UZ_T5 13 40 #define UZ_T6 14 41 #define UZ_T7 15 42 #define UZ_T8 16 43 #define UZ_T9 17 44 #define UZ_S0 18 45 #define UZ_S1 19 46 #define UZ_S2 20 47 #define UZ_S3 21 48 #define UZ_S4 22 49 #define UZ_S5 23 50 #define UZ_S6 24 51 #define UZ_S7 25 52 #define UZ_S8 26 53 #define UZ_GP 27 54 #define UZ_RA 28 55 #define UZ_EPC 29 56 #define UZ_CR 30 57 #define UZ_SP 31 58 #define UZ_SR 32 59 #define UZ_LO 33 60 #define UZ_HI 34 61 62 #define UZ_REGS 35 63 64 #include <kernel_config.h> 43 65 44 66 .section .kentry, "ax", @progbits … … 48 70 .extern hal_do_syscall 49 71 .extern cluster_core_kernel_enter 50 .extern cluster_core_ke rnel_exit72 .extern cluster_core_kentry_exit 51 73 52 74 .org 0x180 53 .ent kernel_enter 54 .global kernel_enter 75 76 .global hal_kentry_enter 77 .global hal_kentry_eret 55 78 56 79 .set noat 57 80 .set noreorder 58 81 59 #define SAVE_SIZE CPU_REGS_NR*460 #define LID_WIDTH 261 #define CXY_WIDTH 862 #define CXY_MASK 0xFF63 #define MMU_MODE_MASK 0xF64 #define GID_MASK 0x3FF65 #define LID_MASK 0x366 67 82 #--------------------------------------------------------------------------------- 68 83 # Kernel Entry point for Interrupt / Exception / Syscall 84 # The c2_dext and c2_iext CP2 registers must have been previously set 85 # to "local_cxy", because the kernel run with MMU desactivated. 69 86 #--------------------------------------------------------------------------------- 70 87 71 kernel_enter: 72 mfc0 $26, $12 # read SR to test user/kernel mode 73 andi $26, $26, 0x10 # User Mode bitmask 74 beq $26, $0, kernel_mode 75 ori $26, $0, 0x3 # $26 <= MMU OFF value 88 hal_kentry_enter: 89 90 mfc0 $26, $12 # get c0_sr 91 andi $26, $26, 0x10 # test User Mode bit 92 beq $26, $0, kernel_mode # jump if core already in kernel 93 ori $27, $0, 0x3 # $27 <= code for MMU OFF 76 94 77 95 #--------------------------------------------------------------------------------------- 78 # this code is executed when the core is in user mode:79 # - we use the uzone defined in user thread descriptor.80 # - we set the MMU off, and save the CP2_MODE register to uzone.81 # - we save the user thread stack pointer to uzone and load the kernel stack pointer82 # - we store the uzone pointer in $2796 # This code is executed when the core is in user mode: 97 # - save current c2_mode in $26. 98 # - set MMU OFF. 99 # - save user stack pointer in $27. 100 # - set kernel stack pointer in $29. 83 101 84 102 user_mode: 85 mtc2 $26, $1 # set MMU OFF 86 nop 87 88 mfc0 $26, $4, 2 # $26 <= thread pointer 89 lw $26, 0($26) # $26 <= uzone pointer 90 91 sw $29, (UZ_SP*4)($26) # save user stack to uzone 92 lw $29, (UZ_KSP*4)($26) # load kernel stack from uzone 93 94 ori $27, $0, 0xF # MMU old value: assumed ON 95 sw $27, (UZ_MODE*4)($26) # save MMU MODE to uzone 96 97 j unified_mode 98 or $27, $0, $26 # $27 <= uzone 103 104 mfc2 $26, $1 # $26 <= c2_mode 105 mtc2 $27, $1 # set MMU OFF 106 move $27, $29 # $27 <= user stack pointer 107 mfc0 $29, $4, 2 # get pointer on thread descriptor from c0_th 108 addi $29, $29, CONFIG_THREAD_DESC_SIZE 109 addi $29, $29, -8 # $29 <= kernel stack pointer 110 j unified_mode 111 nop 99 112 100 113 #--------------------------------------------------------------------------------------- 101 # this code is executed when the core is in kernel mode: 102 # - we use an uzone dynamically allocated in kernel stack. 103 # - we set the MMU off, set the MMU data_paddr extension to local_cxy, 104 # and save the CP2_MODE and CP2_DEXT to uzone. 105 # - we save the kernel stack pointer to uzone and load the new kernel stack pointer 106 # - we store the uzone pointer in $27 114 # This code is executed when the core is already in kernel mode: 115 # - save current c2_mode in $26. 116 # - set MMU OFF. 117 # - save current kernel stack pointer in $27. 107 118 108 119 kernel_mode: 109 mfc2 $26, $24 110 andi $26, $26, CXY_MASK # $26 <= CP2_DEXT 111 112 mfc0 $27, $15, 1 113 andi $27, $27, GID_MASK # $27 <= core_gid (4/4/2 format) 114 115 srl $27, $27, LID_WIDTH # $27 <= local_cxy 116 mtc2 $27, $24 # set local_cxy to CP2_DEXT 117 118 # use $26 to save both CP2_MODE (4 bits) and CP2_DEXT (8 bits) values 119 120 mfc2 $27, $1 121 andi $27, $27, MMU_MODE_MASK # $27 <= CP2_MODE 122 sll $27, $27, CXY_WIDTH # $27 <= 0x00000M00 123 or $26, $26, $27 # $26 <= 0x00000MXY 124 125 ori $27, $0, 0x3 120 121 mfc2 $26, $1 # $26 <= c2_mode 126 122 mtc2 $27, $1 # set MMU OFF 127 128 # save old SP, CP2_MODE and CP2_DEXT in uzone allocated in kernel stack 129 130 addiu $27, $29, -(SAVE_SIZE) # allocate an uzone in stack (use $27 as KSP) 131 sw $29, (UZ_SP*4)($27) # save old KSP in this uzone 132 133 srl $29, $26, CXY_WIDTH 134 sw $29, (UZ_MODE*4)($27) # save CP2_MODE in this uzone 135 136 andi $26, $26, CXY_MASK 137 sw $26, (UZ_DEXT*4)($27) # save CP2_DEXT in this uzone 138 139 or $29, $27, $0 # load new kernel stack pointer 140 141 #-------------------------------------------------------------------------------------- 142 # This code is executed in both modes, and saves the core context, 143 # with the two following assumptions: 144 # - $27 contains the pointer on uzone to save the core registers 145 # - $29 contains the kernel stack pointer 146 147 unified_mode: 148 sw $1, (UZ_AT*4)($27) 149 sw $2, (UZ_V0*4)($27) 150 sw $3, (UZ_V1*4)($27) 151 sw $4, (UZ_A0*4)($27) 152 sw $5, (UZ_A1*4)($27) 153 sw $6, (UZ_A2*4)($27) 154 sw $7, (UZ_A3*4)($27) 155 sw $8, (UZ_T0*4)($27) 156 sw $9, (UZ_T1*4)($27) 157 sw $10, (UZ_T2*4)($27) 158 sw $11, (UZ_T3*4)($27) 159 sw $12, (UZ_T4*4)($27) 160 sw $13, (UZ_T5*4)($27) 161 sw $14, (UZ_T6*4)($27) 162 sw $15, (UZ_T7*4)($27) 163 sw $16, (UZ_S0*4)($27) 164 sw $17, (UZ_S1*4)($27) 165 sw $18, (UZ_S2*4)($27) 166 sw $19, (UZ_S3*4)($27) 167 sw $20, (UZ_S4*4)($27) 168 sw $21, (UZ_S5*4)($27) 169 sw $22, (UZ_S6*4)($27) 170 sw $23, (UZ_S7*4)($27) 171 sw $24, (UZ_T8*4)($27) 172 sw $25, (UZ_T9*4)($27) 173 sw $28, (UZ_GP*4)($27) 174 sw $30, (UZ_S8*4)($27) 175 sw $31, (UZ_RA*4)($27) 123 move $27, $29 # $27 <= current kernel stack pointer 124 125 #--------------------------------------------------------------------------------------- 126 # This code is executed in both modes (user or kernel): 127 # The assumptions are: 128 # - c2_mode contains the MMU OFF value. 129 # - $26 contains the previous c2_mode value. 130 # - $27 contains the previous sp value (can be usp or ksp). 131 # - $29 contains the kernel stack pointer. 132 # We execute the following actions: 133 # - allocate an uzone in kernel stack, incrementing $29 134 # - save relevant registers to uzone. 135 # - set the SR in kernel mode: IRQ disabled, clear exl. 136 # - signal the kernel entry. 137 138 unified_mode: 139 140 addiu $29, $29, -(UZ_REGS*4) # allocate uzone in kernel stack 141 142 sw $1, (UZ_AT*4)($29) 143 sw $2, (UZ_V0*4)($29) 144 sw $3, (UZ_V1*4)($29) 145 sw $4, (UZ_A0*4)($29) 146 sw $5, (UZ_A1*4)($29) 147 sw $6, (UZ_A2*4)($29) 148 sw $7, (UZ_A3*4)($29) 149 sw $8, (UZ_T0*4)($29) 150 sw $9, (UZ_T1*4)($29) 151 sw $10, (UZ_T2*4)($29) 152 sw $11, (UZ_T3*4)($29) 153 sw $12, (UZ_T4*4)($29) 154 sw $13, (UZ_T5*4)($29) 155 sw $14, (UZ_T6*4)($29) 156 sw $15, (UZ_T7*4)($29) 157 sw $16, (UZ_S0*4)($29) 158 sw $17, (UZ_S1*4)($29) 159 sw $18, (UZ_S2*4)($29) 160 sw $19, (UZ_S3*4)($29) 161 sw $20, (UZ_S4*4)($29) 162 sw $21, (UZ_S5*4)($29) 163 sw $22, (UZ_S6*4)($29) 164 sw $23, (UZ_S7*4)($29) 165 sw $24, (UZ_T8*4)($29) 166 sw $25, (UZ_T9*4)($29) 167 168 sw $26, (UZ_MODE*4)($29) # save c2_mode 169 sw $27, (UZ_SP*4)($29) # save sp 170 171 sw $28, (UZ_GP*4)($29) 172 sw $30, (UZ_S8*4)($29) 173 sw $31, (UZ_RA*4)($29) 176 174 177 175 mfc0 $16, $14 178 sw $16, (UZ_EPC*4)($2 7) # Save EPC176 sw $16, (UZ_EPC*4)($29) # save c0_epc 179 177 mflo $14 180 sw $14, (UZ_LO*4)($2 7) # save LO178 sw $14, (UZ_LO*4)($29) # save lo 181 179 mfhi $15 182 sw $15, (UZ_HI*4)($2 7) # save HI180 sw $15, (UZ_HI*4)($29) # save hi 183 181 mfc0 $18, $12 184 sw $18, (UZ_SR*4)($2 7) # Save SR182 sw $18, (UZ_SR*4)($29) # save c0_sr 185 183 mfc0 $17, $13 186 sw $17, (UZ_CR*4)($2 7) # Save CR187 188 # put SR in kernel mode, IRQ disabled, clear exl 184 sw $17, (UZ_CR*4)($29) # save c0_cr 185 mfc2 $26, $1 186 189 187 srl $3, $18, 5 190 188 sll $3, $3, 5 191 mtc0 $3, $12 # Set new SR189 mtc0 $3, $12 # set new sr 192 190 193 191 # signal that core enters kernel 194 192 la $1, cluster_core_kernel_enter 195 jal 193 jalr $1 196 194 nop 197 195 198 196 #--------------------------------------------------------------------------------------- 199 197 # This code call the relevant Interrupt / Exception / Syscall handler, 200 # depending on XCODE in CP0_CR, with the two following assumptions: 201 # - $27 contains the pointer on uzone containing to save the core registers 202 # - $29 contains the kernel stack pointer 198 # depending on XCODE in CP0_CR. 199 # assumption: $29 contains the kernel stack pointer, that is the uzone base. 203 200 # The three handlers take the same two arguments: thread pointer and uzone pointer. 204 # The uzone pointer is saved in $19 to be used by ke rnel_exit.201 # The uzone pointer is saved in $19 to be used by kentry_exit. 205 202 206 203 mfc0 $17, $13 # $17 <= CR … … 208 205 209 206 mfc0 $4, $4, 2 # $4 <= thread pointer (first arg) 210 or $5, $0, $2 7# $5 <= uzone pointer (second arg)211 or $19, $0, $2 7 # $19 <= &uzone (for kernel_exit)207 or $5, $0, $29 # $5 <= uzone pointer (second arg) 208 or $19, $0, $29 # $19 <= &uzone (for kentry_exit) 212 209 213 210 ori $8, $0, 0x20 # $8 <= cause syscall … … 222 219 addiu $29, $29, -8 # hal_do_exception has 2 args 223 220 addiu $29, $29, 8 224 j ke rnel_exit # jump to kernel_exit221 j kentry_exit # jump to kentry_exit 225 222 nop 226 223 … … 230 227 addiu $29, $29, -8 # hal_do_syscall has 2 args 231 228 addiu $29, $29, 8 232 j ke rnel_exit # jump to kernel_exit229 j kentry_exit # jump to kentry_exit 233 230 nop 234 231 … … 243 240 # The pointer on uzone is supposed to be stored in $19 244 241 # ----------------------------------------------------------------------------------- 245 ke rnel_exit:242 kentry_exit: 246 243 247 244 # signal that core exit kernel … … 250 247 nop 251 248 252 # restore contextfrom uzone249 # restore registers from uzone 253 250 or $27, $0, $19 # $27 <= &uzone 254 251 … … 296 293 lw $31, (UZ_RA*4)($27) 297 294 298 lw $26, (UZ_DEXT*4)($27)299 mtc2 $26, $24 # restore CP2_DEXT from uzone300 301 295 lw $26, (UZ_MODE*4)($27) 302 296 mtc2 $26, $1 # restore CP2_MODE from uzone 303 297 298 # ----------------------------------------------------------------------------------- 299 # eret function 300 # ----------------------------------------------------------------------------------- 301 302 hal_kentry_eret: 304 303 nop 305 304 eret 306 305 307 .end kernel_enter308 306 .set reorder 309 307 .set at 310 308 311 #------------------------------------------------------------------------------- 312 309 #------------------------------------------------------------------------------------ 310 -
trunk/hal/tsar_mips32/core/hal_kentry.h
r279 r406 24 24 #define _HAL_KENTRY_H_ 25 25 26 ////////////////////////////////////////////////////////////////////////////////////////// 27 // This file defines the MIPS32 specific mnemonics to access the "uzone", that is 28 // a fixed size array of 32 bits integers, used by the kentry function to save/restore 29 // the MIPS32 CPU registers, at each exception / interruption / syscall. 30 // It also defines several initial values for the SR register. 31 // 32 // This file is included in the hal_kentry.S, hal_syscall.c, hal_exception.c, 33 // and hal_context.c files. 34 ////////////////////////////////////////////////////////////////////////////////////////// 26 35 27 #define CPU_IN_KERNEL 1 36 37 /**************************************************************************************** 38 * This structure defines the cpu_uzone for TSAR MIPS32, as well as the 39 * mnemonics used by the hal_kentry assembly code. 40 ***************************************************************************************/ 41 42 #define UZ_MODE 0 /* c2_mode */ 43 #define UZ_AT 1 44 #define UZ_V0 2 45 #define UZ_V1 3 46 #define UZ_A0 4 47 #define UZ_A1 5 48 #define UZ_A2 6 49 #define UZ_A3 7 50 #define UZ_T0 8 51 #define UZ_T1 9 52 #define UZ_T2 10 53 #define UZ_T3 11 54 #define UZ_T4 12 55 #define UZ_T5 13 56 #define UZ_T6 14 57 #define UZ_T7 15 58 #define UZ_T8 16 59 #define UZ_T9 17 60 #define UZ_S0 18 61 #define UZ_S1 19 62 #define UZ_S2 20 63 #define UZ_S3 21 64 #define UZ_S4 22 65 #define UZ_S5 23 66 #define UZ_S6 24 67 #define UZ_S7 25 68 #define UZ_S8 26 69 #define UZ_GP 27 70 #define UZ_RA 28 71 #define UZ_EPC 29 /* c0_epc */ 72 #define UZ_CR 30 /* c0_cr */ 73 #define UZ_SP 31 74 #define UZ_SR 32 /* c0_sr */ 75 #define UZ_LO 33 76 #define UZ_HI 34 77 78 #define UZ_REGS 35 79 80 /************************************************************************************* 81 * The hal_kentry_enter() function is the unique kernel entry point in case of 82 * exception, interrupt, or syscall for the TSAR_MIPS32 architecture. 83 * 84 * When we enter the kernel, we test the status register: 85 * - If the core is in user mode, we desactivate the MMU, and we save 86 * the core context in the uzone of the calling thread descriptor. 87 * - If the core is already in kernel mode (in case of interrupt), 88 * we save the context in the kernel stack. 89 * - In both cases, we increment the cores_in_kernel variable, 90 * and we call the relevant exception/interrupt/syscall handler 91 * 92 * When we exit the kernel after handler execution: 93 * - we restore the core context from the uzone and return to user space, 94 * calling the hal_kentry_eret() 95 ************************************************************************************/ 96 void hal_kentry_enter(); 97 98 /************************************************************************************* 99 * The hal_kentry_eret() function contains only the assembly "eret" instruction, 100 * that and the EXL bit in the c0_sr register, and jump to the address 101 * contained in the c0_epc register. 102 * ************************************************************************************/ 103 void hal_kentry_eret(); 28 104 29 105 #endif /* _HAL_KENTRY_H_ */ -
trunk/hal/tsar_mips32/core/hal_special.c
r296 r406 240 240 "mfc2 %2, $12 \n" 241 241 "mfc2 %3, $14 \n" 242 : "=&r"(mmu_ins_excp_code), 243 "=&r"(mmu_ins_bad_vaddr), 244 "=&r"(mmu_dat_excp_code), 245 "=&r"(mmu_dat_bad_vaddr) ); 246 } 242 : "=&r"(*mmu_ins_excp_code), 243 "=&r"(*mmu_ins_bad_vaddr), 244 "=&r"(*mmu_dat_excp_code), 245 "=&r"(*mmu_dat_bad_vaddr) ); 246 } 247 -
trunk/hal/tsar_mips32/core/hal_switch.S
r367 r406 30 30 #--------------------------------------------------------------------------------- 31 31 32 .section . text, "ax" , @progbits32 .section .switch , "ax" , @progbits 33 33 34 .ent hal_do_switch35 .glob lhal_do_switch34 .ent hal_do_switch 35 .global hal_do_switch 36 36 37 .set noat38 .set noreorder37 .set noat 38 .set noreorder 39 39 40 40 hal_do_switch: … … 84 84 sw $31, 31*4($26) 85 85 86 mfc0 $27, $12 87 sw $27, 34*4($26) /* save c0_sr to slot 34 */ 88 mfc0 $27, $4, 2 89 sw $27, 35*4($26) /* save c0_th to slot 35 */ 90 86 91 mfc2 $27, $0 87 92 sw $27, 32*4($26) /* save c2_ptpr to slot 32 */ 88 93 mfc2 $27, $1 89 94 sw $27, 33*4($26) /* save c2_mode to slot 33 */ 90 91 mfc0 $27, $1292 sw $27, 34*4($26) /* save c0_sr to slot 34 */93 mfc0 $27, $4, 294 sw $27, 35*4($26) /* save c0_th to slot 35 */95 95 96 96 sync … … 140 140 lw $31, 31*4($26) 141 141 142 lw $27, 32*4($26) 142 lw $27, 32*4($26) /* $27 <= c2_ptpr */ 143 143 mtc2 $27, $0 /* restore c2_ptpr from slot 32 */ 144 lw $27, 33*4($26) 144 145 lw $27, 35*4($26) /* $27 <= c0_th */ 146 mtc0 $27, $4, 2 /* restore c0_th from slot 35 */ 147 148 lw $27, 33*4($26) /* $27 <= c2_mode */ 149 lw $26, 34*4($26) /* $26 <= c0_sr */ 150 145 151 mtc2 $27, $1 /* restore c2_mode from slot 33 */ 146 147 lw $27, 34*4($26) 148 mtc0 $27, $12 /* restore c0_sr from slot 34 */ 149 lw $27, 35*4($26) 150 mtc0 $27, $4, 2 /* restore co_th from slot 35 */ 152 mtc0 $26, $12 /* restore c0_sr from slot 34 */ 151 153 152 154 jr $31 /* return to caller */ -
trunk/hal/tsar_mips32/core/hal_syscall.c
r62 r406 26 26 #include <do_syscall.h> 27 27 #include <thread.h> 28 #include < mips32_uzone.h>28 #include <hal_kentry.h> 29 29 30 30 -
trunk/hal/tsar_mips32/drivers/soclib_pic.c
r380 r406 116 116 chdev_t * src_chdev; // pointer on source chdev descriptor 117 117 uint32_t index; // WTI / HWI / PTI index 118 uint32_t ack; // XCU acknowledge requires a read... 119 120 core_t * core = CURRENT_THREAD->core; 118 119 uint32_t * xcu_base = soclib_pic_xcu_base(); 120 121 core_t * core = CURRENT_THREAD->core; 121 122 122 123 // get XCU status … … 126 127 &pti_status ); 127 128 128 irq_dmsg("\n[ INFO] %s : enter for core[%x,%d] / WTI = %x / HWI = %x / WTI = %x\n",129 irq_dmsg("\n[DMSG] %s : enter for core[%x,%d] / WTI = %x / HWI = %x / WTI = %x\n", 129 130 __FUNCTION__ , local_cxy , core->lid , wti_status , hwi_status , pti_status ); 130 131 … … 139 140 assert( (index == core->lid) , __FUNCTION__ , "illegal IPI index" ); 140 141 141 // read mailbox content to acknowledge WTI, 142 uint32_t * base = soclib_pic_xcu_base(); 143 ack = base[(XCU_WTI_REG << 5) | core->lid]; 142 irq_dmsg("\n[DMSG] %s : core[%x,%d] received an IPI / cycle %d\n", 143 __FUNCTION__ , local_cxy , core->lid , hal_time_stamp() ); 144 145 // acknowledge WTI (this require an XCU read) 146 uint32_t ack = xcu_base[(XCU_WTI_REG << 5) | core->lid]; 144 147 145 148 // check RPC FIFO, and activate or create a RPC thread … … 165 168 else // call relevant ISR 166 169 { 167 irq_dmsg("\n[INFO] %s received WTI : index = %d for core %d in cluster%d\n",168 __FUNCTION__ , index , core->lid , local_cxy);170 irq_dmsg("\n[DMSG] %s : core[%x,%d] received external WTI %d / cycle %d\n", 171 __FUNCTION__ , local_cxy , core->lid , index , hal_time_stamp() ); 169 172 170 173 // call ISR … … 189 192 190 193 // disable HWI in local XCU controller 191 uint32_t * base = soclib_pic_xcu_base(); 192 base[(XCU_MSK_HWI_DISABLE << 5) | core->lid] = 1 << core->lid; 194 xcu_base[(XCU_MSK_HWI_DISABLE << 5) | core->lid] = 1 << core->lid; 193 195 } 194 196 else // call relevant ISR 195 197 { 196 irq_dmsg("\n[INFO] %s received HWI : index = %d for core %d in cluster%d\n",197 __FUNCTION__ , index , core->lid , local_cxy);198 irq_dmsg("\n[DMSG] %s : core[%x,%d] received HWI %d / cycle %d\n", 199 __FUNCTION__ , local_cxy , core->lid , index , hal_time_stamp() ); 198 200 199 201 // call ISR … … 206 208 index = pti_status - 1; 207 209 208 irq_dmsg("\n[INFO] %s received PTI : index = %d for cpu %d in cluster%d\n",209 __FUNCTION__ , index , core->lid , local_cxy);210 irq_dmsg("\n[DMSG] %s : core[%x,%d] received PTI %d / cycle %d\n", 211 __FUNCTION__ , core->lid , local_cxy , index , hal_time_stamp() ); 210 212 211 213 assert( (index == core->lid) , __FUNCTION__ , "unconsistent PTI index\n"); 212 214 213 // acknowledge PTI 214 uint32_t * base = soclib_pic_xcu_base(); 215 ack = base[(XCU_PTI_ACK << 5) | core->lid]; 215 // acknowledge PTI (this require a read access to XCU) 216 uint32_t ack = xcu_base[(XCU_PTI_ACK << 5) | core->lid]; 216 217 217 218 // execute all actions related to TICK event 218 core_clock( core ); 219 // condition is always true, but we must use the ack value 220 if( ack + 1 ) core_clock( core ); 219 221 } 220 222 } // end soclib_pic_irq_handler() -
trunk/hal/tsar_mips32/kernel.ld
r296 r406 8 8 **************************************************************************************/ 9 9 10 /* Define the kernel code base address */10 /* Define the kernel code base addresses */ 11 11 12 12 seg_kcode_base = 0x00008000; … … 42 42 { 43 43 *(.kentry) 44 *(.switch) 44 45 } 45 46 } -
trunk/hal/x86_64/core/hal_gpt.c
r405 r406 317 317 } 318 318 319 void hal_gpt_print( gpt_t * gpt )319 void hal_gpt_print( gpt_t * gpt , pid_t pid ) 320 320 { 321 321 x86_panic((char *)__func__);
Note: See TracChangeset
for help on using the changeset viewer.