Changeset 238 for soft/giet_vm/sys
- Timestamp:
- May 29, 2013, 1:24:09 AM (12 years ago)
- Location:
- soft/giet_vm/sys
- Files:
-
- 15 edited
Legend:
- Unmodified
- Added
- Removed
-
soft/giet_vm/sys/common.c
r232 r238 20 20 /////////////////////////////////////////////////////////////////////////////////// 21 21 22 // current context cache TODO23 24 22 // SR save (used by _it_mask() / it_restore() 25 23 unsigned int _status_register_save[NB_CLUSTERS*NB_PROCS_MAX]; … … 27 25 /////////////////////////////////////////////////////////////////////////////////// 28 26 // _get_sched() 29 // Access CP0 and returns scheduler physical address. 30 /////////////////////////////////////////////////////////////////////////////////// 31 inline unsigned int _get_sched() { 32 unsigned int ret; 33 asm volatile( 34 "mfc0 %0, $22" 35 : "=r"(ret)); 36 return ret; 37 } 38 27 // Access CP0 and returns a pointer (virtual address) on the calling 28 // processor scheduler (taking into account the processor local index). 29 /////////////////////////////////////////////////////////////////////////////////// 30 static_scheduler_t* _get_sched() 31 { 32 static_scheduler_t* psched; 33 unsigned int vaddr; 34 unsigned int lpid = _procid() % NB_PROCS_MAX; 35 36 asm volatile( 37 "mfc0 %0, $22 \n" 38 : "=r"(vaddr) ); 39 40 psched = (static_scheduler_t*)vaddr; 41 return (psched + lpid); 42 } 39 43 40 44 /////////////////////////////////////////////////////////////////////////////////// … … 42 46 // Access CP2 and returns PTPR register. 43 47 /////////////////////////////////////////////////////////////////////////////////// 44 inline unsigned int _get_ptpr() { 48 inline unsigned int _get_ptpr() 49 { 45 50 unsigned int ret; 46 51 asm volatile( … … 50 55 } 51 56 52 53 57 /////////////////////////////////////////////////////////////////////////////////// 54 58 // _get_epc() 55 59 // Access CP0 and returns EPC register. 56 60 /////////////////////////////////////////////////////////////////////////////////// 57 inline unsigned int _get_epc() { 61 inline unsigned int _get_epc() 62 { 58 63 unsigned int ret; 59 64 asm volatile("mfc0 %0, $14" … … 62 67 } 63 68 64 65 69 /////////////////////////////////////////////////////////////////////////////////// 66 70 // _get_bar() 67 71 // Access CP0 and returns BAR register. 68 72 /////////////////////////////////////////////////////////////////////////////////// 69 inline unsigned int _get_bvar() { 73 inline unsigned int _get_bvar() 74 { 70 75 unsigned int ret; 71 76 asm volatile( … … 75 80 } 76 81 77 78 82 /////////////////////////////////////////////////////////////////////////////////// 79 83 // _get_cr() 80 84 // Access CP0 and returns CR register. 81 85 /////////////////////////////////////////////////////////////////////////////////// 82 inline unsigned int _get_cause() { 86 inline unsigned int _get_cause() 87 { 83 88 unsigned int ret; 84 89 asm volatile("mfc0 %0, $13" … … 87 92 } 88 93 89 90 94 /////////////////////////////////////////////////////////////////////////////////// 91 95 // _get_sr() 92 96 // Access CP0 and returns SR register. 93 97 /////////////////////////////////////////////////////////////////////////////////// 94 inline unsigned int _get_sr() { 98 inline unsigned int _get_sr() 99 { 95 100 unsigned int ret; 96 101 asm volatile( … … 106 111 // This function is NOT USED and NOT TESTED 107 112 /////////////////////////////////////////////////////////////////////////////////// 108 inline void _it_mask() { 113 inline void _it_mask() 114 { 109 115 unsigned int sr_value; 110 116 unsigned int proc_id; … … 121 127 } 122 128 123 124 129 /////////////////////////////////////////////////////////////////////////////////// 125 130 // _it_restore() … … 128 133 // This function is NOT USED and NOT TESTED 129 134 /////////////////////////////////////////////////////////////////////////////////// 130 inline void _it_restore() { 135 inline void _it_restore() 136 { 131 137 unsigned int proc_id; 132 138 // get the processor id to index the _status_register_save table … … 140 146 // Access CP0 and disables IRQs 141 147 /////////////////////////////////////////////////////////////////////////////////// 142 inline void _it_disable() { 148 inline void _it_disable() 149 { 143 150 asm volatile( 144 151 "li $3, 0xFFFFFFFE \n" … … 153 160 // Access CP0 and enables IRQs 154 161 /////////////////////////////////////////////////////////////////////////////////// 155 inline void _it_enable() { 162 inline void _it_enable() 163 { 156 164 asm volatile( 157 165 "li $3, 0x00000001 \n" … … 162 170 } 163 171 164 165 172 //////////////////////////////////////////////////////////////////////////// 166 173 // _get_lock() … … 169 176 // (delay average value = 100 cycles) 170 177 //////////////////////////////////////////////////////////////////////////// 171 inline void _get_lock(unsigned int * plock) { 178 inline void _get_lock(unsigned int * plock) 179 { 172 180 register unsigned int delay = ( _proctime() ^ _procid() << 4) & 0xFF; 173 181 … … 191 199 } 192 200 193 194 201 //////////////////////////////////////////////////////////////////////////// 195 202 // _release_lock() 196 203 //////////////////////////////////////////////////////////////////////////// 197 inline void _release_lock(unsigned int * plock) { 204 inline void _release_lock(unsigned int * plock) 205 { 198 206 asm volatile ( 199 207 "sync\n" /* necessary because of the consistency model in tsar */ … … 202 210 } 203 211 204 205 212 //////////////////////////////////////////////////////////////////////////// 206 213 // _puts() 207 214 // display a string on TTY0 / used for system code debug and log 208 215 //////////////////////////////////////////////////////////////////////////// 209 void _puts(char * buffer) { 216 void _puts(char * buffer) 217 { 210 218 unsigned int * tty_address = (unsigned int *) &seg_tty_base; 211 219 unsigned int n; 212 220 213 for (n = 0; n < 100; n++) { 214 if (buffer[n] == 0) { 215 break; 216 } 221 for (n = 0; n < 100; n++) 222 { 223 if (buffer[n] == 0) break; 217 224 tty_address[TTY_WRITE] = (unsigned int) buffer[n]; 218 225 } 219 226 } 220 227 221 222 228 //////////////////////////////////////////////////////////////////////////// 223 229 // _putx() 224 // display an int (hexa) on TTY0 / used for system code debug and log 225 //////////////////////////////////////////////////////////////////////////// 226 void _putx(unsigned int val) { 230 // display a 32 bits unsigned int as an hexadecimal string on TTY0 231 //////////////////////////////////////////////////////////////////////////// 232 void _putx(unsigned int val) 233 { 227 234 static const char HexaTab[] = "0123456789ABCDEF"; 228 235 char buf[11]; … … 233 240 buf[10] = 0; 234 241 235 for (c = 0; c < 8; c++) { 242 for (c = 0; c < 8; c++) 243 { 236 244 buf[9 - c] = HexaTab[val & 0xF]; 237 245 val = val >> 4; … … 240 248 } 241 249 250 //////////////////////////////////////////////////////////////////////////// 251 // _putl() 252 // display a 64 bits unsigned long as an hexadecimal string on TTY0 253 //////////////////////////////////////////////////////////////////////////// 254 void _putl(paddr_t val) 255 { 256 static const char HexaTab[] = "0123456789ABCDEF"; 257 char buf[19]; 258 unsigned int c; 259 260 buf[0] = '0'; 261 buf[1] = 'x'; 262 buf[18] = 0; 263 264 for (c = 0; c < 16; c++) 265 { 266 buf[17 - c] = HexaTab[(unsigned int)val & 0xF]; 267 val = val >> 4; 268 } 269 _puts(buf); 270 } 242 271 243 272 //////////////////////////////////////////////////////////////////////////// … … 266 295 } 267 296 268 269 297 //////////////////////////////////////////////////////////////////////////// 270 298 // _strncmp() … … 284 312 } 285 313 286 287 314 //////////////////////////////////////////////////////////////////////////// 288 315 // _dcache_buf_invalidate() … … 310 337 } 311 338 312 313 //////////////////////////////////////////////////////////////////////////// 314 // _physical_read_access() 315 // This function makes a physical read access to a 32 bits word in memory, 316 // after a temporary DTLB desactivation. 317 //////////////////////////////////////////////////////////////////////////// 318 unsigned int _physical_read_access(unsigned int * paddr) { 319 unsigned int value; 320 321 asm volatile( 322 "li $3, 0xFFFFFFFE \n" 323 "mfc0 $2, $12 \n" /* $2 <= SR */ 324 "and $3, $3, $2 \n" 325 "mtc0 $3, $12 \n" /* interrupt masked */ 326 "li $3, 0xB \n" 327 "mtc2 $3, $1 \n" /* DTLB off */ 328 329 "lw %0, 0(%1) \n" /* entry <= *pslot */ 330 331 "li $3, 0xF \n" 332 "mtc2 $3, $1 \n" /* DTLB on */ 333 "mtc0 $2, $12 \n" /* restore SR */ 334 : "=r" (value) 335 : "r" (paddr) 336 : "$2", "$3"); 337 return value; 338 } 339 340 341 //////////////////////////////////////////////////////////////////////////// 342 // _physical_write_access() 343 // This function makes a physical write access to a 32 bits word in memory, 344 // after a temporary DTLB desactivation. 345 //////////////////////////////////////////////////////////////////////////// 346 void _physical_write_access(unsigned int * paddr, unsigned int value) { 347 asm volatile( 348 "li $3, 0xFFFFFFFE \n" 349 "mfc0 $2, $12 \n" /* $26 <= SR */ 350 "and $3, $3, $2 \n" 351 "mtc0 $3, $12 \n" /* interrupt masked */ 352 "li $3, 0xB \n" 353 "mtc2 $3, $1 \n" /* DTLB off */ 354 355 "sw %0, 0(%1) \n" /* entry <= *pslot */ 356 357 "li $3, 0xF \n" 358 "mtc2 $3, $1 \n" /* DTLB on */ 359 "mtc0 $2, $12 \n" /* restore SR */ 360 : 361 : "r" (value), "r" (paddr) 362 : "$2", "$3"); 363 } 364 365 366 //////////////////////////////////////////////////////////////////////////// 367 // _get_tasks_number() 368 // This function returns the number of tasks allocated to processor. 369 //////////////////////////////////////////////////////////////////////////// 370 unsigned int _get_tasks_number() { 371 static_scheduler_t * psched = (static_scheduler_t *) _get_sched(); 372 return _physical_read_access(&(psched->tasks)); 373 } 374 375 376 //////////////////////////////////////////////////////////////////////////// 377 // _get_proc_task_id() 378 // This function returns the index of the currently running task. 379 //////////////////////////////////////////////////////////////////////////// 380 unsigned int _get_proc_task_id() { 381 static_scheduler_t * psched = (static_scheduler_t *) _get_sched(); 382 return _physical_read_access(&(psched->current)); 383 } 384 385 386 //////////////////////////////////////////////////////////////////////////// 387 // _set_proc_task_id() 388 // This function returns the index of the currently running task. 389 //////////////////////////////////////////////////////////////////////////// 390 void _set_proc_task_id(unsigned int value) { 391 static_scheduler_t * psched = (static_scheduler_t *) _get_sched(); 392 _physical_write_access(&(psched->current), value); 393 } 394 395 396 //////////////////////////////////////////////////////////////////////////// 397 // _get_global_task_id() 398 // This function returns the global index of the running task. 399 //////////////////////////////////////////////////////////////////////////// 400 unsigned int _get_global_task_id() { 401 return _get_context_slot(_get_proc_task_id(), CTX_GTID_ID); 402 } 403 404 405 /////////////////////////////////////////////////////////////////////////////// 406 // _get_context_slot() 407 // This function returns a slot content for the task defined by task_id. 408 /////////////////////////////////////////////////////////////////////////////// 409 unsigned int _get_context_slot(unsigned int task_id, unsigned int slot_id) { 410 static_scheduler_t * psched = (static_scheduler_t *) _get_sched(); 411 return _physical_read_access(&(psched->context[task_id][slot_id])); 412 } 413 414 415 /////////////////////////////////////////////////////////////////////////////// 416 // _set_context_slot() 417 // This function returns a slot content for the task defined by task_id. 418 /////////////////////////////////////////////////////////////////////////////// 419 void _set_context_slot( unsigned int task_id, 420 unsigned int slot_id, 421 unsigned int value) { 422 static_scheduler_t * psched = (static_scheduler_t *) _get_sched(); 423 _physical_write_access(&(psched->context[task_id][slot_id]), value); 424 } 425 426 427 //////////////////////////////////////////////////////////////////////////////// 428 // _get_interrupt_vector_entry() 429 // This function returns the interrupt_vector entry defined by argument index. 430 //////////////////////////////////////////////////////////////////////////////// 431 unsigned int _get_interrupt_vector_entry(unsigned int index) { 432 static_scheduler_t * psched = (static_scheduler_t *) _get_sched(); 433 return _physical_read_access( &(psched->interrupt_vector[index])); 434 } 435 339 ///////////////////////////////////////////////////////////////////////////// 340 // _get_task_slot() 341 // This function returns the content of a context slot 342 // for the task identified by the ltid argument (local index). 343 ///////////////////////////////////////////////////////////////////////////// 344 unsigned int _get_task_slot( unsigned int ltid, 345 unsigned int slot ) 346 { 347 static_scheduler_t* psched = _get_sched(); 348 return psched->context[ltid][slot]; 349 } 350 351 ///////////////////////////////////////////////////////////////////////////// 352 // _set_task_slot() 353 // This function updates the content of a context slot 354 // for the task identified by the ltid argument (local index). 355 ///////////////////////////////////////////////////////////////////////////// 356 void _set_task_slot( unsigned int ltid, 357 unsigned int slot, 358 unsigned int value ) 359 { 360 static_scheduler_t* psched = _get_sched(); 361 psched->context[ltid][slot] = value; 362 } 363 364 ///////////////////////////////////////////////////////////////////////////// 365 // _get_context_slot() 366 // This function returns the content of a context slot 367 // for the running task (defined by the scheduler current field). 368 ///////////////////////////////////////////////////////////////////////////// 369 unsigned int _get_context_slot( unsigned int slot ) 370 { 371 static_scheduler_t* psched = _get_sched(); 372 unsigned int task_id = psched->current; 373 return psched->context[task_id][slot]; 374 } 375 376 ///////////////////////////////////////////////////////////////////////////// 377 // _set_context_slot() 378 // This function updates the content of a context slot for the running task. 379 ///////////////////////////////////////////////////////////////////////////// 380 void _set_context_slot( unsigned int slot, 381 unsigned int value ) 382 { 383 static_scheduler_t* psched = _get_sched(); 384 unsigned int task_id = psched->current; 385 psched->context[task_id][slot] = value; 386 } 436 387 437 388 ///////////////////////////////////////////////////////////////////////////// 438 389 // access functions to mapping_info data structure 439 390 ///////////////////////////////////////////////////////////////////////////// 440 mapping_cluster_t * _get_cluster_base(mapping_header_t * header) { 391 mapping_cluster_t * _get_cluster_base(mapping_header_t * header) 392 { 441 393 return (mapping_cluster_t *) ((char *) header + 442 394 MAPPING_HEADER_SIZE); 443 395 } 444 445 446 ///////////////////////////////////////////////////////////////////////////// 447 mapping_pseg_t * _get_pseg_base(mapping_header_t * header) { 396 ///////////////////////////////////////////////////////////////////////////// 397 mapping_pseg_t * _get_pseg_base(mapping_header_t * header) 398 { 448 399 return (mapping_pseg_t *) ((char *) header + 449 400 MAPPING_HEADER_SIZE + … … 451 402 } 452 403 ///////////////////////////////////////////////////////////////////////////// 453 mapping_vspace_t * _get_vspace_base(mapping_header_t * header) { 404 mapping_vspace_t * _get_vspace_base(mapping_header_t * header) 405 { 454 406 return (mapping_vspace_t *) ((char *) header + 455 407 MAPPING_HEADER_SIZE + … … 471 423 472 424 ///////////////////////////////////////////////////////////////////////////// 473 mapping_vobj_t * _get_vobj_base(mapping_header_t * header) { 425 mapping_vobj_t * _get_vobj_base(mapping_header_t * header) 426 { 474 427 return (mapping_vobj_t *) ((char *) header + 475 428 MAPPING_HEADER_SIZE + … … 482 435 483 436 ///////////////////////////////////////////////////////////////////////////// 484 mapping_task_t * _get_task_base(mapping_header_t * header) { 437 mapping_task_t * _get_task_base(mapping_header_t * header) 438 { 485 439 return (mapping_task_t *) ((char *) header + 486 440 MAPPING_HEADER_SIZE + -
soft/giet_vm/sys/common.h
r232 r238 11 11 #include <mapping_info.h> 12 12 #include <giet_config.h> 13 #include <ctx_handler.h> 13 14 14 15 /////////////////////////////////////////////////////////////////////////////////// … … 34 35 /////////////////////////////////////////////////////////////////////////////////// 35 36 36 void _puts(char *string); 37 void _putx(unsigned int val); 38 void _putd(unsigned int val); 37 void _puts(char *string); 38 void _putx(unsigned int val); 39 void _putd(unsigned int val); 40 void _putl(paddr_t val); 39 41 40 unsigned int _strncmp(const char * s1, const char * s2, unsigned int n); 41 void _dcache_buf_invalidate(const void * buffer, unsigned int size); 42 unsigned int _strncmp(const char * s1, 43 const char * s2, 44 unsigned int n); 42 45 43 void _dtlb_off(void);44 void _dtlb_on(void);46 void _dcache_buf_invalidate(const void * buffer, 47 unsigned int size); 45 48 46 void _it_mask(void); 47 void _it_restore(void); 48 void _it_disable(void); 49 void _it_enable(void); 49 void _dtlb_off(void); 50 void _dtlb_on(void); 50 51 51 unsigned int _get_epc(void); 52 unsigned int _get_ptpr(void); 53 unsigned int _get_bvar(void); 54 unsigned int _get_cr(void); 55 unsigned int _get_sched(void); 52 void _it_mask(void); 53 void _it_restore(void); 54 void _it_disable(void); 55 void _it_enable(void); 56 56 57 unsigned int _get_context_slot(unsigned int task_id, unsigned int slot_id); 58 void _set_context_slot(unsigned int task_id, unsigned int slot_id, unsigned int value); 57 unsigned int _get_epc(void); 58 unsigned int _get_ptpr(void); 59 unsigned int _get_bvar(void); 60 unsigned int _get_cr(void); 59 61 60 unsigned int _get_interrupt_vector_entry(unsigned int index);62 static_scheduler_t* _get_sched(void); 61 63 62 unsigned int _get_proc_task_id(void); 63 void _set_proc_task_id(unsigned int value); 64 unsigned int _get_context_slot( unsigned int slot ); 64 65 65 unsigned int _get_global_task_id(void); 66 void _set_context_slot( unsigned int slot, 67 unsigned int value ); 66 68 69 unsigned int _get_task_slot( unsigned int ltid, 70 unsigned int slot ); 67 71 68 unsigned int _get_tasks_number(void); 72 void _set_task_slot( unsigned int ltid, 73 unsigned int slot, 74 unsigned int value ); 69 75 70 void _get_lock(unsigned int * lock);71 void _release_lock(unsigned int * lock);76 void _get_lock(unsigned int * lock); 77 void _release_lock(unsigned int * lock); 72 78 73 mapping_cluster_t * _get_cluster_base(mapping_header_t* header);74 mapping_pseg_t * _get_pseg_base(mapping_header_t* header);75 mapping_vspace_t * _get_vspace_base(mapping_header_t* header);76 mapping_vseg_t * _get_vseg_base(mapping_header_t* header);77 mapping_vobj_t * _get_vobj_base(mapping_header_t* header);78 mapping_task_t * _get_task_base(mapping_header_t* header);79 mapping_cluster_t * _get_cluster_base(mapping_header_t* header); 80 mapping_pseg_t * _get_pseg_base(mapping_header_t* header); 81 mapping_vspace_t * _get_vspace_base(mapping_header_t* header); 82 mapping_vseg_t * _get_vseg_base(mapping_header_t* header); 83 mapping_vobj_t * _get_vobj_base(mapping_header_t* header); 84 mapping_task_t * _get_task_base(mapping_header_t* header); 79 85 80 86 -
soft/giet_vm/sys/ctx_handler.c
r232 r238 25 25 // A task context is an array of 64 words = 256 bytes. 26 26 // It contains copies of processor registers (when the task is preempted): 27 // - GPR[i], generally stored in slot (i). $0, *26 & $27 are not saved.27 // - GPR[i], generally stored in slot (i). $0, $26 & $27 are not saved. 28 28 // - HI & LO registers 29 29 // - CP0 registers: EPC, SR, CR, BVAR 30 30 // - CP2 registers : PTPR 31 31 // It contains some general informations associated to the task: 32 // - TTY : terminal global index 33 // - FBDMA : DMA channel global index 32 // - TTY : TTY channel global index 34 33 // - NIC : NIC channel global index 35 // - TIMER : Timer global index 34 // - CMA : CMA channel global index 35 // - IOC : IOC channel global index 36 // - DMA : DMA channel local index 37 // - TIM : TIM channel local index 36 38 // - PTAB : page table virtual base address 37 // - LTID 39 // - LTID : Task local index (in scheduler) 38 40 // - VSID : Virtual space index 39 41 // - RUN : Task state (0 => sleeping / 1 => runable ) … … 42 44 // ctx[1]<- $1 |ctx[9] <- $9 |ctx[17]<- $17|ctx[25]<- $25|ctx[33]<- CR |ctx[41]<- DMA 43 45 // ctx[2]<- $2 |ctx[10]<- $10|ctx[18]<- $18|ctx[26]<- LO |ctx[34]<- SR |ctx[42]<- NIC 44 // ctx[3]<- $3 |ctx[11]<- $11|ctx[19]<- $19|ctx[27]<- HI |ctx[35]<- BVAR |ctx[43]<- TIM ER45 // ctx[4]<- $4 |ctx[12]<- $12|ctx[20]<- $20|ctx[28]<- $28|ctx[36]<- *** |ctx[44]<- PTAB46 // ctx[5]<- $5 |ctx[13]<- $13|ctx[21]<- $21|ctx[29]<- SP |ctx[37]<- *** |ctx[45]<- LTID47 // ctx[6]<- $6 |ctx[14]<- $14|ctx[22]<- $22|ctx[30]<- $30|ctx[38]<- *** |ctx[46]<- VSID46 // ctx[3]<- $3 |ctx[11]<- $11|ctx[19]<- $19|ctx[27]<- HI |ctx[35]<- BVAR |ctx[43]<- TIM 47 // ctx[4]<- $4 |ctx[12]<- $12|ctx[20]<- $20|ctx[28]<- $28|ctx[36]<- PTAB |ctx[44]<- IOC 48 // ctx[5]<- $5 |ctx[13]<- $13|ctx[21]<- $21|ctx[29]<- SP |ctx[37]<- LTID |ctx[45]<- CMA 49 // ctx[6]<- $6 |ctx[14]<- $14|ctx[22]<- $22|ctx[30]<- $30|ctx[38]<- VSID |ctx[46]<- GTID 48 50 // ctx[7]<- $7 |ctx[15]<- $15|ctx[23]<- $23|ctx[31]<- RA |ctx[39]<- PTPR |ctx[47]<- RUN 49 51 ////////////////////////////////////////////////////////////////////////////////////////// … … 60 62 // If there is no runable task, the scheduler switch to the default "idle" task. 61 63 // 62 // Implementation notes: 63 // - As we only have the scheduler physical address (in CP0_SCHED register), 64 // this function must use specific assess functions to access the scheduler. 65 // - All the context switch procedure is executed with interrupts masked. 66 // - The return address contained in $31 is saved in the current task context 67 // (in the ctx[31] slot), and the function actually returns to the address 68 // contained in the ctx[31] slot of the next task context. 64 // Implementation note 65 // The return address contained in $31 is saved in the current task context 66 // (in the ctx[31] slot), and the function actually returns to the address 67 // contained in the ctx[31] slot of the next task context. 69 68 ///////////////////////////////////////////////////////////////////////////////// 70 void _ctx_switch() { 71 // get scheduler physical address 72 static_scheduler_t * psched = (static_scheduler_t *) _get_sched(); 69 void _ctx_switch() 70 { 71 // get scheduler address 72 static_scheduler_t* psched = _get_sched(); 73 73 74 74 // get number of tasks allocated to scheduler 75 unsigned int tasks = _get_tasks_number();75 unsigned int tasks = psched->tasks; 76 76 77 77 // get current task index 78 unsigned int curr_task_id = _get_proc_task_id();78 unsigned int curr_task_id = psched->current; 79 79 80 80 // select the next task using a round-robin policy … … 83 83 unsigned int found = 0; 84 84 85 for (tid = curr_task_id + 1; tid < curr_task_id + 1 + tasks; tid++) { 85 for (tid = curr_task_id + 1; tid < curr_task_id + 1 + tasks; tid++) 86 { 86 87 next_task_id = tid % tasks; 87 88 88 // test if the task is runable 89 if (_get_context_slot(next_task_id, CTX_RUN_ID)) { 89 if ( psched->context[next_task_id][CTX_RUN_ID] ) 90 { 90 91 found = 1; 91 92 break; … … 94 95 95 96 // launch "idle" task if no runable task 96 if (found == 0) { 97 if (found == 0) 98 { 97 99 next_task_id = IDLE_TASK_INDEX; 98 100 } 99 101 100 102 // no switch if no change 101 if (curr_task_id != next_task_id) { 102 unsigned int * curr_ctx_paddr = &(psched->context[curr_task_id][0]); 103 unsigned int * next_ctx_paddr = &(psched->context[next_task_id][0]); 103 if (curr_task_id != next_task_id) 104 { 105 unsigned int* curr_ctx_vaddr = &(psched->context[curr_task_id][0]); 106 unsigned int* next_ctx_vaddr = &(psched->context[next_task_id][0]); 104 107 105 _set_proc_task_id(next_task_id); 106 //_timer_reset_irq_cpt(cluster_id, local_id); // commented until not properly supported in soclib 108 // set current task index 109 psched->current = next_task_id; 110 111 //_timer_reset_irq_cpt(cluster_id, local_id); 112 // commented until not properly supported in soclib 107 113 // (the function is not yet present in drivers.c) 108 _task_switch(curr_ctx_paddr, next_ctx_paddr); 114 115 _task_switch(curr_ctx_vaddr, next_ctx_vaddr); 109 116 110 117 #if GIET_DEBUG_SWITCH 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 118 _get_lock(&_tty_put_lock); 119 _puts("\n[GIET DEBUG] Context switch for processor "); 120 _putd(_procid()); 121 _puts(" at cycle "); 122 _putd(_proctime()); 123 _puts("\n"); 124 _puts(" - tasks = "); 125 _putd(tasks); 126 _puts("\n"); 127 _puts(" - curr_task_id = "); 128 _putd( curr_task_id ); 129 _puts("\n"); 130 _puts(" - next_task_id = "); 131 _putd(next_task_id); 132 _puts("\n"); 133 _release_lock( &_tty_put_lock); 127 134 #endif 128 135 … … 133 140 // This function is executed as the"idle" task when no other task can be executed 134 141 ///////////////////////////////////////////////////////////////////////////////////// 135 void _ctx_idle() { 142 void _ctx_idle() 143 { 136 144 unsigned int delay = 1000000; 137 145 … … 164 172 // in the "idle" task context. 165 173 ///////////////////////////////////////////////////////////////////////////////// 166 void _ctx_eret() { 174 void _ctx_eret() 175 { 167 176 asm volatile("eret"); 168 177 } -
soft/giet_vm/sys/ctx_handler.h
r232 r238 8 8 ///////////////////////////////////////////////////////////////////////////////// 9 9 10 typedef struct static_scheduler_s { 10 typedef struct static_scheduler_s 11 { 11 12 unsigned int context[15][64]; // at most 15 task contexts 12 13 unsigned int tasks; // actual number of tasks … … 29 30 #define CTX_RA_ID 31 30 31 31 #define CTX_EPC_ID 32 32 #define CTX_CR_ID 33 33 #define CTX_SR_ID 34 34 #define CTX_BVAR_ID 35 32 #define CTX_EPC_ID 32 // Exception Program Counter (CP0) 33 #define CTX_CR_ID 33 // Cause Register (CP0) 34 #define CTX_SR_ID 34 // Status Register (CP0) 35 #define CTX_BVAR_ID 35 // Bad Virtual Address Register (CP0) 35 36 36 #define CTX_PTPR_ID 39 37 #define CTX_PTAB_ID 36 // Page Table Virtual address 38 #define CTX_LTID_ID 37 // Local Task Index (in scheduler) 39 #define CTX_VSID_ID 38 // Vspace Index 40 #define CTX_PTPR_ID 39 // Page Table Pointer Register (PADDR>>13) 37 41 38 #define CTX_TTY_ID 40 // Integer : global TTY terminal index 39 #define CTX_DMA_ID 41 // Integer : global DMA channel index 40 #define CTX_NIC_ID 42 // Integer : global NIC channel index 41 #define CTX_TIMER_ID 43 // Integer : user level timer index / UNUSED 42 #define CTX_PTAB_ID 44 // Pointer : page table virtual base adress 43 #define CTX_LTID_ID 45 // Integer : local task index (in scheduler) / UNUSED 44 #define CTX_VSID_ID 46 // Integer : vspace index 45 #define CTX_RUN_ID 47 // Boolean : task runable 46 #define CTX_GTID_ID 48 // Integer : Global task id 42 #define CTX_TTY_ID 40 // global TTY terminal 43 #define CTX_DMA_ID 41 // local DMA channel 44 #define CTX_NIC_ID 42 // global NIC channel 45 #define CTX_TIM_ID 43 // local TIMER channel 46 #define CTX_IOC_ID 44 // global IOC channel 47 #define CTX_CMA_ID 45 // global CMA channel index (in scheduler) / UNUSED 48 #define CTX_GTID_ID 46 // Global Task Index 49 #define CTX_RUN_ID 47 // Boolean: task runable 47 50 48 51 ////////////////////////////////////////////////////////////////////////////////// -
soft/giet_vm/sys/drivers.c
r237 r238 1 1 /////////////////////////////////////////////////////////////////////////////////// 2 2 // File : drivers.c 3 // Date : 01/04/20123 // Date : 23/05/2013 4 4 // Author : alain greiner 5 5 // Copyright (c) UPMC-LIP6 … … 16 16 // - vci_block_device 17 17 // 18 // The following global parameters must be defined in the giet_config.h file: 19 // - CLUSTER_SIZE 18 // For the peripherals replicated in each cluster (ICU, TIMER, DMA), 19 // the corresponding (virtual) base addresses must be completed by an offset 20 // depending on the cluster index. 21 // 22 // The following global parameter must be defined in the giet_config.h file: 23 // - GIET_CLUSTER_INCREMENT 24 // 25 // The following global parameters must be defined in the hard_config.h file: 20 26 // - NB_CLUSTERS 21 27 // - NB_PROCS_MAX 22 // - NB_TIM ERS_MAX23 // - NB_DMA S_MAX24 // - NB_TTY S28 // - NB_TIM_CHANNELS 29 // - NB_DMA_CHANNELS 30 // - NB_TTY_CHANNELS_MAX 25 31 // 26 32 // The following virtual base addresses must be defined in the giet_vsegs.ld file: 27 33 // - seg_icu_base 28 34 // - seg_tim_base 35 // - seg_dma_base 29 36 // - seg_tty_base 30 37 // - seg_gcd_base 31 // - seg_dma_base32 38 // - seg_fbf_base 33 39 // - seg_ioc_base 34 40 // - seg_nic_base 35 // As some peripherals can be replicated in the clusters (ICU, TIMER, DMA)36 // These addresses must be completed by an offset depending on the cluster index37 // full_base_address = seg_***_base + cluster_id * CLUSTER_SIZE41 // - seg_cma_base 42 // - seg_iob_base 43 // 38 44 /////////////////////////////////////////////////////////////////////////////////// 39 45 … … 48 54 49 55 #if !defined(NB_CLUSTERS) 50 # error: You must define NB_CLUSTERS in the configsfile56 # error: You must define NB_CLUSTERS in the hard_config.h file 51 57 #endif 52 58 53 59 #if !defined(NB_PROCS_MAX) 54 # error: You must define NB_PROCS_MAX in the configsfile60 # error: You must define NB_PROCS_MAX in the hard_config.h file 55 61 #endif 56 62 … … 59 65 #endif 60 66 61 #if !defined(CLUSTER_SIZE) 62 # error: You must define CLUSTER_SIZE in the configs file 63 #endif 64 65 #if !defined(NB_TTYS) 66 # error: You must define NB_TTYS in the configs file 67 #endif 68 69 #if (NB_TTYS < 1) 70 # error: NB_TTYS cannot be smaller than 1! 71 #endif 72 73 #if !defined(NB_DMAS_MAX) 74 #define NB_DMAS_MAX 0 75 #endif 76 77 #if !defined(NB_TIMERS_MAX) 78 #define NB_TIMERS_MAX 0 79 #endif 80 81 #if ( (NB_TIMERS_MAX) > 32 ) 82 # error: NB_TIMERS_MAX + NB_PROCS_MAX cannot be larger than 32 83 #endif 84 85 #if !defined(NB_IOCS) 86 # error: You must define NB_IOCS in the configs file 87 #endif 88 89 #if ( NB_IOCS > 1 ) 90 # error: NB_IOCS cannot be larger than 1 67 #if !defined(GIET_CLUSTER_INCREMENT) 68 # error: You must define GIET_CLUSTER_INCREMENT in the giet_config.h file 69 #endif 70 71 #if !defined(NB_TTY_CHANNELS) 72 # error: You must define NB_TTY_CHANNELS in the hard_config.h file 73 #endif 74 75 #if (NB_TTY_CHANNELS < 1) 76 # error: NB_TTY_CHANNELS cannot be smaller than 1! 77 #endif 78 79 #if !defined(NB_DMA_CHANNELS) 80 # error: You must define NB_DMA_CHANNELS in the hard_config.h file 81 #endif 82 83 #if (NB_DMA_CHANNELS > 8) 84 # error: NB_DMA_CHANNELS cannot be smaller than 8! 85 #endif 86 87 #if !defined(NB_TIM_CHANNELS) 88 #define NB_TIM_CHANNELS 0 89 #endif 90 91 #if ( (NB_TIM_CHANNELS + NB_PROC_MAX) > 32 ) 92 # error: NB_TIM_CHANNELS + NB_PROCS_MAX cannot be larger than 32 93 #endif 94 95 #if !defined(NB_IOC_CHANNELS) 96 # error: You must define NB_IOC_CHANNELS in the hard_config.h file 97 #endif 98 99 #if ( NB_IOC_CHANNELS > 8 ) 100 # error: NB_IOC_CHANNELS cannot be larger than 8 101 #endif 102 103 #if !defined(NB_NIC_CHANNELS) 104 # error: You must define NB_NIC_CHANNELS in the hard_config.h file 105 #endif 106 107 #if ( NB_NIC_CHANNELS > 8 ) 108 # error: NB_NIC_CHANNELS cannot be larger than 8 109 #endif 110 111 #if !defined(NB_CMA_CHANNELS) 112 # error: You must define NB_CMA_CHANNELS in the hard_config.h file 113 #endif 114 115 #if ( NB_CMA_CHANNELS > 8 ) 116 # error: NB_CMA_CHANNELS cannot be larger than 8 91 117 #endif 92 118 93 119 #if !defined( USE_XICU ) 94 # error: You must define USE_XICU in the configsfile120 # error: You must define USE_XICU in the hard_config.h file 95 121 #endif 96 122 97 123 #if !defined( IOMMU_ACTIVE ) 98 # error: You must define IOMMU_ACTIVE in the configsfile124 # error: You must define IOMMU_ACTIVE in the hard_config.h file 99 125 #endif 100 126 … … 105 131 // Timers driver 106 132 ////////////////////////////////////////////////////////////////////////////// 133 // This peripheral is replicated in all clusters. 107 134 // The timers can be implemented in a vci_timer component or in a vci_xicu 108 135 // component (depending on the USE_XICU parameter). … … 113 140 // - "user" timers : requested by the task in the mapping_info data structure. 114 141 // For each user timer, the timer_id is stored in the context of the task. 115 // The global index is cluster_id * (NB_PROCS_MAX+NB_TIM ERS_MAX) + local_id142 // The global index is cluster_id * (NB_PROCS_MAX+NB_TIM_CHANNELS) + local_id 116 143 ////////////////////////////////////////////////////////////////////////////// 144 // The (virtual) base address of the associated segment is: 145 // 146 // timer_address = seg_icu_base + cluster_id * GIET_CLUSTER_INCREMENT 147 // 148 // - cluster id is an explicit argument of all access functions 149 // - seg_icu_base must be defined in the giet_vsegs.ld file 150 // - GIET_CLUSTER_INCREMENT must be defined in the giet_config.h file 151 //////////////////////////////////////////////////////////////////////////////// 117 152 118 153 // User Timer signaling variables 119 154 120 #if (NB_TIM ERS_MAX> 0)121 in_unckdata volatile unsigned char _user_timer_event[NB_CLUSTERS * NB_TIM ERS_MAX]122 = { [0 ... ((NB_CLUSTERS * NB_TIM ERS_MAX) - 1)] = 0 };155 #if (NB_TIM_CHANNELS > 0) 156 in_unckdata volatile unsigned char _user_timer_event[NB_CLUSTERS * NB_TIM_CHANNELS] 157 = { [0 ... ((NB_CLUSTERS * NB_TIM_CHANNELS) - 1)] = 0 }; 123 158 #endif 124 159 … … 131 166 // Returns 0 if success, > 0 if error. 132 167 ////////////////////////////////////////////////////////////////////////////// 133 unsigned int _timer_start(unsigned int cluster_id, unsigned int local_id, unsigned int period) { 168 unsigned int _timer_start( unsigned int cluster_id, 169 unsigned int local_id, 170 unsigned int period) 171 { 134 172 // parameters checking 135 if (cluster_id >= NB_CLUSTERS) { 136 return 1; 137 } 138 if (local_id >= NB_TIMERS_MAX) { 139 return 2; 140 } 173 if (cluster_id >= NB_CLUSTERS) return 1; 174 if (local_id >= NB_TIM_CHANNELS) return 2; 141 175 142 176 #if USE_XICU 143 unsigned int * timer_address = (unsigned int *) ((char *) &seg_icu_base + (cluster_id * CLUSTER_SIZE)); 177 unsigned int * timer_address = (unsigned int *) ((char *) &seg_icu_base + 178 (cluster_id * GIET_CLUSTER_INCREMENT)); 144 179 145 180 timer_address[XICU_REG(XICU_PTI_PER, local_id)] = period; 146 181 #else 147 unsigned int* timer_address = (unsigned int *) ((char *) &seg_tim_base + (cluster_id * CLUSTER_SIZE)); 182 unsigned int* timer_address = (unsigned int *) ((char *) &seg_tim_base + 183 (cluster_id * GIET_CLUSTER_INCREMENT)); 148 184 149 185 timer_address[local_id * TIMER_SPAN + TIMER_PERIOD] = period; … … 152 188 return 0; 153 189 } 154 155 190 156 191 ////////////////////////////////////////////////////////////////////////////// … … 160 195 // Returns 0 if success, > 0 if error. 161 196 ////////////////////////////////////////////////////////////////////////////// 162 unsigned int _timer_stop(unsigned int cluster_id, unsigned int local_id) { 197 unsigned int _timer_stop( unsigned int cluster_id, 198 unsigned int local_id) 199 { 163 200 // parameters checking 164 if (cluster_id >= NB_CLUSTERS) { 165 return 1; 166 } 167 if (local_id >= NB_TIMERS_MAX) { 168 return 2; 169 } 201 if (cluster_id >= NB_CLUSTERS) return 1; 202 if (local_id >= NB_TIM_CHANNELS) return 2; 170 203 171 204 #if USE_XICU 172 unsigned int * timer_address = (unsigned int *) ((char *) &seg_icu_base + (cluster_id * CLUSTER_SIZE)); 205 unsigned int * timer_address = (unsigned int *) ((char *) &seg_icu_base + 206 (cluster_id * GIET_CLUSTER_INCREMENT)); 173 207 174 208 timer_address[XICU_REG(XICU_PTI_PER, local_id)] = 0; 175 209 #else 176 unsigned int* timer_address = (unsigned int *) ((char *) &seg_tim_base + (cluster_id * CLUSTER_SIZE)); 210 unsigned int* timer_address = (unsigned int *) ((char *) &seg_tim_base + 211 (cluster_id * GIET_CLUSTER_INCREMENT)); 212 177 213 timer_address[local_id * TIMER_SPAN + TIMER_MODE] = 0; 178 214 #endif … … 189 225 // Returns 0 if success, > 0 if error. 190 226 ////////////////////////////////////////////////////////////////////////////// 191 unsigned int _timer_reset_irq(unsigned int cluster_id, unsigned int local_id) { 227 unsigned int _timer_reset_irq( unsigned int cluster_id, 228 unsigned int local_id ) 229 { 192 230 // parameters checking 193 if (cluster_id >= NB_CLUSTERS) { 194 return 1; 195 } 196 if (local_id >= NB_TIMERS_MAX) { 197 return 2; 198 } 231 if (cluster_id >= NB_CLUSTERS) return 1; 232 if (local_id >= NB_TIM_CHANNELS) return 2; 199 233 200 234 #if USE_XICU 201 235 unsigned int * timer_address = (unsigned int *) ((char *) &seg_icu_base + 202 (cluster_id * (unsigned) CLUSTER_SIZE));236 (cluster_id * GIET_CLUSTER_INCREMENT)); 203 237 204 238 unsigned int bloup = timer_address[XICU_REG(XICU_PTI_ACK, local_id)]; … … 206 240 #else 207 241 unsigned int * timer_address = (unsigned int *) ((char *) &seg_tim_base + 208 (cluster_id * CLUSTER_SIZE));242 (cluster_id * GIET_CLUSTER_INCREMENT)); 209 243 210 244 timer_address[local_id * TIMER_SPAN + TIMER_RESETIRQ] = 0; 211 245 #endif 212 213 246 return 0; 214 247 } … … 223 256 // return 1; 224 257 // } 225 // if (local_id >= NB_TIM ERS_MAX) {258 // if (local_id >= NB_TIM_CHANNELS) { 226 259 // return 2; 227 260 // } … … 230 263 //#error // not implemented 231 264 //#else 232 // unsigned int * timer_address = (unsigned int *) ((char *) &seg_tim_base + (cluster_id * CLUSTER_SIZE));265 // unsigned int * timer_address = (unsigned int *) ((char *) &seg_tim_base + (cluster_id * GIET_CLUSTER_INCREMENT)); 233 266 // unsigned int timer_period = timer_address[local_id * TIMER_SPAN + TIMER_PERIOD]; 234 267 // … … 244 277 ///////////////////////////////////////////////////////////////////////////////// 245 278 // There is only one multi_tty controler in the architecture. 246 // The total number of TTYs is defined by the configuration parameter NB_TTY S.279 // The total number of TTYs is defined by the configuration parameter NB_TTY_CHANNELS. 247 280 // The "system" terminal is TTY[0]. 248 281 // The "user" TTYs are allocated to applications by the GIET in the boot phase, … … 253 286 254 287 // TTY variables 255 in_unckdata volatile unsigned char _tty_get_buf[NB_TTYS]; 256 in_unckdata volatile unsigned char _tty_get_full[NB_TTYS] = { [0 ... NB_TTYS - 1] = 0 }; 288 in_unckdata volatile unsigned char _tty_get_buf[NB_TTY_CHANNELS]; 289 in_unckdata volatile unsigned char _tty_get_full[NB_TTY_CHANNELS] 290 = { [0 ... NB_TTY_CHANNELS - 1] = 0 }; 257 291 in_unckdata unsigned int _tty_put_lock = 0; // protect kernel TTY[0] 258 292 … … 260 294 // _tty_error() 261 295 //////////////////////////////////////////////////////////////////////////////// 262 void _tty_error(unsigned int tty_id, unsigned int task_id) { 296 void _tty_error(unsigned int tty_id, unsigned int task_id) 297 { 263 298 unsigned int proc_id = _procid(); 264 299 … … 287 322 // The function returns the number of characters that have been written. 288 323 ///////////////////////////////////////////////////////////////////////////////// 289 unsigned int _tty_write(const char * buffer, unsigned int length) { 324 unsigned int _tty_write(const char * buffer, 325 unsigned int length) 326 { 290 327 unsigned int nwritten; 291 unsigned int task_id = _get_proc_task_id(); 292 unsigned int tty_id = _get_context_slot(task_id, CTX_TTY_ID); 293 294 if (tty_id >= NB_TTYS) { 295 _tty_error(tty_id , task_id); 296 return 0; 297 } 298 299 unsigned int * tty_address = (unsigned int *) &seg_tty_base; 300 301 for (nwritten = 0; nwritten < length; nwritten++) { 328 unsigned int tty_id = _get_context_slot(CTX_TTY_ID); 329 unsigned int* tty_address = (unsigned int *) &seg_tty_base; 330 331 for (nwritten = 0; nwritten < length; nwritten++) 332 { 302 333 // check tty's status 303 if ((tty_address[tty_id * TTY_SPAN + TTY_STATUS] & 0x2) == 0x2) { 304 break; 305 } 306 else { 307 // write character 308 tty_address[tty_id * TTY_SPAN + TTY_WRITE] = (unsigned int) buffer[nwritten]; 309 } 334 if ((tty_address[tty_id * TTY_SPAN + TTY_STATUS] & 0x2) == 0x2) break; 335 tty_address[tty_id * TTY_SPAN + TTY_WRITE] = (unsigned int) buffer[nwritten]; 310 336 } 311 337 return nwritten; 312 338 } 313 314 339 315 340 ////////////////////////////////////////////////////////////////////////////// … … 324 349 // Returns 0 if the kernel buffer is empty, 1 if the buffer is full. 325 350 ////////////////////////////////////////////////////////////////////////////// 326 unsigned int _tty_read(char * buffer, unsigned int length) { 327 unsigned int task_id = _get_proc_task_id(); 328 unsigned int tty_id = _get_context_slot(task_id, CTX_TTY_ID); 329 330 if (tty_id >= NB_TTYS) { 331 _tty_error(tty_id, task_id); 351 unsigned int _tty_read(char * buffer, 352 unsigned int length) 353 { 354 unsigned int tty_id = _get_context_slot(CTX_TTY_ID); 355 356 if (_tty_get_full[tty_id] == 0) 357 { 332 358 return 0; 333 359 } 334 335 if (_tty_get_full[tty_id] == 0) { 336 return 0; 337 } 338 else { 360 else 361 { 339 362 *buffer = _tty_get_buf[tty_id]; 340 363 _tty_get_full[tty_id] = 0; … … 342 365 } 343 366 } 344 345 367 346 368 //////////////////////////////////////////////////////////////////////////////// … … 351 373 // Returns 0 if success, 1 if tty_id too large. 352 374 //////////////////////////////////////////////////////////////////////////////// 353 unsigned int _tty_get_char(unsigned int tty_id, unsigned char * buffer) { 375 unsigned int _tty_get_char(unsigned int tty_id, 376 unsigned char * buffer) 377 { 354 378 // checking argument 355 if (tty_id >= NB_TTYS) { 356 return 1; 357 } 379 if (tty_id >= NB_TTY_CHANNELS) { return 1; } 358 380 359 381 // compute terminal base address … … 366 388 367 389 //////////////////////////////////////////////////////////////////////////////// 368 // VciMultiIcu and VciXicu drivers 369 //////////////////////////////////////////////////////////////////////////////// 390 // VciMultiIcu or VciXicu driver 391 //////////////////////////////////////////////////////////////////////////////// 392 // This hardware component is replicated in all clusters. 370 393 // There is one vci_multi_icu (or vci_xicu) component per cluster, 371 394 // and the number of independant ICUs is equal to NB_PROCS_MAX, 372 // because there is one private interrupr controler per processor. 395 // because there is one private interrupt controler per processor. 396 //////////////////////////////////////////////////////////////////////////////// 397 // The (virtual) base address of the associated segment is: 398 // 399 // icu_address = seg_icu_base + cluster_id * GIET_CLUSTER_INCREMENT 400 // 401 // - cluster id is an explicit argument of all access functions 402 // - seg_icu_base must be defined in the giet_vsegs.ld file 403 // - GIET_CLUSTER_INCREMENT must be defined in the giet_config.h file 373 404 //////////////////////////////////////////////////////////////////////////////// 374 405 … … 380 411 // Returns 0 if success, > 0 if error. 381 412 //////////////////////////////////////////////////////////////////////////////// 382 unsigned int _icu_set_mask( 383 unsigned int cluster_id,384 unsigned int proc_id,385 unsigned int value,386 unsigned int is_timer){413 unsigned int _icu_set_mask( unsigned int cluster_id, 414 unsigned int proc_id, 415 unsigned int value, 416 unsigned int is_timer) 417 { 387 418 // parameters checking 388 if (cluster_id >= NB_CLUSTERS) { 389 return 1; 390 } 391 if (proc_id >= NB_PROCS_MAX) { 392 return 1; 393 } 419 if (cluster_id >= NB_CLUSTERS) return 1; 420 if (proc_id >= NB_PROCS_MAX) return 1; 394 421 395 422 unsigned int * icu_address = (unsigned int *) ((char *) &seg_icu_base + 396 (cluster_id * (unsigned) CLUSTER_SIZE));423 (cluster_id * GIET_CLUSTER_INCREMENT)); 397 424 #if USE_XICU 398 if (is_timer) { 425 if (is_timer) 426 { 399 427 icu_address[XICU_REG(XICU_MSK_PTI_ENABLE, proc_id)] = value; 400 428 } 401 else { 429 else 430 { 402 431 icu_address[XICU_REG(XICU_MSK_HWI_ENABLE, proc_id)] = value; 403 432 } … … 405 434 icu_address[proc_id * ICU_SPAN + ICU_MASK_SET] = value; 406 435 #endif 407 408 436 return 0; 409 437 } … … 417 445 // Returns 0 if success, > 0 if error. 418 446 //////////////////////////////////////////////////////////////////////////////// 419 unsigned int _icu_get_index(unsigned int cluster_id, unsigned int proc_id, unsigned int * buffer) { 447 unsigned int _icu_get_index( unsigned int cluster_id, 448 unsigned int proc_id, 449 unsigned int * buffer) 450 { 420 451 // parameters checking 421 if (cluster_id >= NB_CLUSTERS) { 422 return 1; 423 } 424 if (proc_id >= NB_PROCS_MAX) { 425 return 1; 426 } 452 if (cluster_id >= NB_CLUSTERS) return 1; 453 if (proc_id >= NB_PROCS_MAX) return 1; 427 454 428 455 unsigned int * icu_address = (unsigned int *) ((char *) &seg_icu_base + 429 (cluster_id * (unsigned) CLUSTER_SIZE));456 (cluster_id * GIET_CLUSTER_INCREMENT)); 430 457 #if USE_XICU 431 458 unsigned int prio = icu_address[XICU_REG(XICU_PRIO, proc_id)]; … … 436 463 unsigned int hwi_id = (prio & 0x001F0000) >> 16; 437 464 unsigned int swi_id = (prio & 0x1F000000) >> 24; 438 if (pti_ok) { 439 *buffer = pti_id; 440 } 441 else if (hwi_ok) { 442 *buffer = hwi_id; 443 } 444 else if (swi_ok) { 445 *buffer = swi_id; 446 } 447 else { 448 *buffer = 32; 449 } 465 if (pti_ok) { *buffer = pti_id; } 466 else if (hwi_ok) { *buffer = hwi_id; } 467 else if (swi_ok) { *buffer = swi_id; } 468 else { *buffer = 32; } 450 469 #else 451 470 *buffer = icu_address[proc_id * ICU_SPAN + ICU_IT_VECTOR]; 452 471 #endif 453 454 472 return 0; 455 473 } … … 469 487 // Returns 0 if success, > 0 if error. 470 488 //////////////////////////////////////////////////////////////////////////////// 471 unsigned int _gcd_write(unsigned int register_index, unsigned int value) { 489 unsigned int _gcd_write( unsigned int register_index, 490 unsigned int value) 491 { 472 492 // parameters checking 473 if (register_index >= GCD_END) { 474 return 1; 475 } 493 if (register_index >= GCD_END) return 1; 476 494 477 495 unsigned int * gcd_address = (unsigned int *) &seg_gcd_base; … … 487 505 // Returns 0 if success, > 0 if error. 488 506 //////////////////////////////////////////////////////////////////////////////// 489 unsigned int _gcd_read(unsigned int register_index, unsigned int * buffer) { 507 unsigned int _gcd_read( unsigned int register_index, 508 unsigned int * buffer ) 509 { 490 510 // parameters checking 491 if (register_index >= GCD_END) { 492 return 1; 493 } 511 if (register_index >= GCD_END) return 1; 494 512 495 513 unsigned int * gcd_address = (unsigned int *) &seg_gcd_base; … … 567 585 // Returns 0 if success, > 0 if error. 568 586 /////////////////////////////////////////////////////////////////////////////// 569 unsigned int _ioc_access( 570 unsigned int to_mem, 571 unsigned int lba, 572 unsigned int user_vaddr, 573 unsigned int count) { 574 unsigned int user_vpn_min; // first virtuel page index in user space 575 unsigned int user_vpn_max; // last virtual page index in user space 576 unsigned int vpn; // current virtual page index in user space 577 unsigned int ppn; // physical page number 578 unsigned int flags; // page protection flags 579 unsigned int ix2; // page index in IOMMU PT1 page table 580 unsigned int addr; // buffer address for IOC peripheral 581 unsigned int ppn_first; // first physical page number for user buffer 582 587 unsigned int _ioc_access( unsigned int to_mem, 588 unsigned int lba, 589 unsigned int user_vaddr, 590 unsigned int count) 591 { 592 unsigned int user_vpn_min; // first virtuel page index in user space 593 unsigned int user_vpn_max; // last virtual page index in user space 594 unsigned int vpn; // current virtual page index in user space 595 unsigned int ppn; // physical page number 596 unsigned int flags; // page protection flags 597 unsigned int ix2; // page index in IOMMU PT1 page table 598 unsigned int ppn_first; // first physical page number for user buffer 599 unsigned int buf_xaddr = 0; // user buffer virtual address in IO space (if IOMMU) 600 paddr_t buf_paddr = 0; // user buffer physical address (if no IOMMU), 601 583 602 // check buffer alignment 584 if ((unsigned int) user_vaddr & 0x3) { 585 return 1; 603 if ((unsigned int) user_vaddr & 0x3) 604 { 605 _get_lock(&_tty_put_lock); 606 _puts("[GIET ERROR] in _ioc_access() : user buffer not word aligned\n"); 607 _release_lock(&_tty_put_lock); 608 return 1; 586 609 } 587 610 … … 592 615 593 616 // get user space page table virtual address 594 unsigned int task_id = _get_proc_task_id(); 595 unsigned int user_pt_vbase = _get_context_slot(task_id, CTX_PTAB_ID); 617 unsigned int user_pt_vbase = _get_context_slot(CTX_PTAB_ID); 596 618 597 619 user_vpn_min = user_vaddr >> 12; 598 620 user_vpn_max = (user_vaddr + length - 1) >> 12; 599 ix2 = 0;600 621 601 622 // loop on all virtual pages covering the user buffer 602 for (vpn = user_vpn_min; vpn <= user_vpn_max; vpn++) { 623 for (vpn = user_vpn_min, ix2 = 0 ; 624 vpn <= user_vpn_max ; 625 vpn++, ix2++ ) 626 { 603 627 // get ppn and flags for each vpn 604 unsigned int ko = _v2p_translate((page_table_t *) user_pt_vbase, vpn, &ppn, &flags); 605 628 unsigned int ko = _v2p_translate( (page_table_t*)user_pt_vbase, 629 vpn, 630 &ppn, 631 &flags); 606 632 // check access rights 607 if (ko) { 608 return 2; // unmapped 633 if (ko) 634 { 635 _get_lock(&_tty_put_lock); 636 _puts("[GIET ERROR] in _ioc_access() : user buffer unmapped\n"); 637 _release_lock(&_tty_put_lock); 638 return 1; 609 639 } 610 if ((flags & PTE_U) == 0) { 611 return 3; // not in user space 640 if ((flags & PTE_U) == 0) 641 { 642 _get_lock(&_tty_put_lock); 643 _puts("[GIET ERROR] in _ioc_access() : user buffer not in user space\n"); 644 _release_lock(&_tty_put_lock); 645 return 1; 612 646 } 613 if (((flags & PTE_W) == 0 ) && to_mem) { 614 return 4; // not writable 647 if (((flags & PTE_W) == 0 ) && to_mem) 648 { 649 _get_lock(&_tty_put_lock); 650 _puts("[GIET ERROR] in _ioc_access() : user buffer not writable\n"); 651 _release_lock(&_tty_put_lock); 652 return 1; 615 653 } 616 654 617 655 // save first ppn value 618 if (ix2 == 0) { 619 ppn_first = ppn; 656 if (ix2 == 0) ppn_first = ppn; 657 658 if (IOMMU_ACTIVE) // the user buffer must be remapped in the I/0 space 659 { 660 // check buffer length < 2 Mbytes 661 if (ix2 > 511) 662 { 663 _get_lock(&_tty_put_lock); 664 _puts("[GIET ERROR] in _ioc_access() : user buffer > 2 Mbytes\n"); 665 _release_lock(&_tty_put_lock); 666 return 1; 667 } 668 669 // map the physical page in IOMMU page table 670 _iommu_add_pte2( _ioc_iommu_ix1, // PT1 index 671 ix2, // PT2 index 672 ppn, // Physical page number 673 flags); // Protection flags 674 675 // compute user buffer virtual adress in IO space 676 buf_xaddr = (_ioc_iommu_ix1) << 21 | (user_vaddr & 0xFFF); 620 677 } 621 622 if (IOMMU_ACTIVE) { 623 // the user buffer must be remapped in the I/0 space 624 // check buffer length < 2 Mbytes 625 if (ix2 > 511) { 626 return 2; 678 else // No IOMMU 679 { 680 // check that physical pages are contiguous 681 if ((ppn - ppn_first) != ix2) 682 { 683 _get_lock(&_tty_put_lock); 684 _puts("[GIET ERROR] in _ioc_access() : split physical user buffer\n"); 685 _release_lock(&_tty_put_lock); 686 return 1; 627 687 } 628 688 629 // map the physical page in IOMMU page table 630 _iommu_add_pte2( 631 _ioc_iommu_ix1, // PT1 index 632 ix2, // PT2 index 633 ppn, // Physical page number 634 flags); // Protection flags 689 // compute user buffer physical adress 690 buf_paddr = (((paddr_t)ppn_first) << 12) | (user_vaddr & 0xFFF); 635 691 } 636 else {637 // no IOMMU : check that physical pages are contiguous638 if ((ppn - ppn_first) != ix2) {639 return 5; // split physical buffer640 }641 }642 643 // increment page index644 ix2++;645 692 } // end for vpn 646 693 … … 649 696 650 697 // invalidate data cache in case of memory write 651 if (to_mem) { 652 _dcache_buf_invalidate((void *) user_vaddr, length); 653 } 654 655 // compute buffer base address for IOC depending on IOMMU activation 656 if (IOMMU_ACTIVE) { 657 addr = (_ioc_iommu_ix1) << 21 | (user_vaddr & 0xFFF); 658 } 659 else { 660 addr = (ppn_first << 12) | (user_vaddr & 0xFFF); 661 } 698 if (to_mem) _dcache_buf_invalidate((void *) user_vaddr, length); 699 700 #if GIET_DEBUG_IOC_DRIVER 701 _get_lock(&_tty_put_lock); 702 _puts("\n[GIET DEBUG] IOC_ACCESS at cycle "); 703 _putd( _proctime() ); 704 _puts("\n - proc_id = "); 705 _putd( _procid() ); 706 _puts("\n - ioc_vbase = "); 707 _putx( (unsigned int)ioc_address ); 708 _puts("\n - psched_vbase = "); 709 _putx( (unsigned int)_get_sched() ); 710 _puts("\n - pt_vbase = "); 711 _putx( user_pt_vbase ); 712 _puts("\n - user_buf_vbase = "); 713 _putx( user_vaddr ); 714 _puts("\n - user_buf_length = "); 715 _putx( length ); 716 _puts("\n - user_buf_paddr = "); 717 _putl( buf_paddr ); 718 _puts("\n - user_buf_xaddr = "); 719 _putx( buf_xaddr ); 720 _puts("\n"); 721 _release_lock(&_tty_put_lock); 722 #endif 662 723 663 724 // get the lock on ioc device … … 665 726 666 727 // peripheral configuration 667 ioc_address[BLOCK_DEVICE_BUFFER] = addr; 728 if ( IOMMU_ACTIVE ) 729 { 730 ioc_address[BLOCK_DEVICE_BUFFER] = buf_xaddr; 731 } 732 else 733 { 734 ioc_address[BLOCK_DEVICE_BUFFER] = (unsigned int)buf_paddr; 735 ioc_address[BLOCK_DEVICE_BUFFER_EXT] = (unsigned int)(buf_paddr>>32); 736 } 668 737 ioc_address[BLOCK_DEVICE_COUNT] = count; 669 738 ioc_address[BLOCK_DEVICE_LBA] = lba; 670 if (to_mem == 0) { 739 if (to_mem == 0) 740 { 671 741 ioc_address[BLOCK_DEVICE_OP] = BLOCK_DEVICE_WRITE; 672 742 } 673 else { 743 else 744 { 674 745 ioc_address[BLOCK_DEVICE_OP] = BLOCK_DEVICE_READ; 675 746 } 676 677 return 0; 678 } 679 747 return 0; 748 } 680 749 681 750 ///////////////////////////////////////////////////////////////////////////////// … … 688 757 // Returns 0 if success, > 0 if error. 689 758 ///////////////////////////////////////////////////////////////////////////////// 690 unsigned int _ioc_completed() { 759 unsigned int _ioc_completed() 760 { 691 761 unsigned int ret; 692 762 unsigned int ix2; 693 763 694 764 // busy waiting 695 while (_ioc_done == 0) { 696 asm volatile("nop"); 697 } 765 while (_ioc_done == 0) { asm volatile("nop"); } 766 767 #if GIET_DEBUG_IOC_DRIVER 768 _get_lock(&_tty_put_lock); 769 _puts("\n[GIET DEBUG] IOC_COMPLETED at cycle "); 770 _putd( _proctime() ); 771 _puts("\n - proc_id = "); 772 _putd( _procid() ); 773 _puts("\n"); 774 _release_lock(&_tty_put_lock); 775 #endif 698 776 699 777 // unmap the buffer from IOMMU page table if IOMMU is activated 700 if (IOMMU_ACTIVE) { 778 if (IOMMU_ACTIVE) 779 { 701 780 unsigned int * iob_address = (unsigned int *) &seg_iob_base; 702 781 703 for (ix2 = 0; ix2 < _ioc_iommu_npages; ix2++) { 782 for (ix2 = 0; ix2 < _ioc_iommu_npages; ix2++) 783 { 704 784 // unmap the page in IOMMU page table 705 785 _iommu_inval_pte2( … … 714 794 // test IOC status 715 795 if ((_ioc_status != BLOCK_DEVICE_READ_SUCCESS) 716 && (_ioc_status != BLOCK_DEVICE_WRITE_SUCCESS)) { 717 ret = 1; // error 718 } 719 else { 720 ret = 0; // success 721 } 796 && (_ioc_status != BLOCK_DEVICE_WRITE_SUCCESS)) ret = 1; // error 797 else ret = 0; // success 722 798 723 799 // reset synchronization variables … … 738 814 // Returns 0 if success, > 0 if error. 739 815 /////////////////////////////////////////////////////////////////////////////// 740 unsigned int _ioc_read(unsigned int lba, void * buffer, unsigned int count) { 816 unsigned int _ioc_read( unsigned int lba, 817 void * buffer, 818 unsigned int count) 819 { 741 820 return _ioc_access( 742 821 1, // read access … … 755 834 // Returns 0 if success, > 0 if error. 756 835 /////////////////////////////////////////////////////////////////////////////// 757 unsigned int _ioc_write(unsigned int lba, const void * buffer, unsigned int count) { 836 unsigned int _ioc_write( unsigned int lba, 837 const void * buffer, 838 unsigned int count) 839 { 758 840 return _ioc_access( 759 841 0, // write access … … 769 851 // Returns 0 if success, > 0 if error. 770 852 /////////////////////////////////////////////////////////////////////////////// 771 unsigned int _ioc_get_status(unsigned int * status) { 853 unsigned int _ioc_get_status(unsigned int * status) 854 { 772 855 // get IOC base address 773 856 unsigned int * ioc_address = (unsigned int *) &seg_ioc_base; … … 782 865 // This function returns the block_size with which the IOC has been configured. 783 866 /////////////////////////////////////////////////////////////////////////////// 784 unsigned int _ioc_get_block_size() { 867 unsigned int _ioc_get_block_size() 868 { 785 869 // get IOC base address 786 870 unsigned int * ioc_address = (unsigned int *) &seg_ioc_base; … … 794 878 ////////////////////////////////////////////////////////////////////////////////// 795 879 // The DMA controllers are physically distributed in the clusters. 796 // There is (NB_CLUSTERS * NB_DMA S_MAX) channels, indexed by a global index:797 // dma_id = cluster_id * NB_DMA_ MAX+ loc_id798 // 799 // As a DMA channel can be used by several tasks, each DMA channel is protected800 // by a specific lock: _dma_lock[dma_id]880 // There is (NB_CLUSTERS * NB_DMA_CHANNELS) channels, indexed by a global index: 881 // dma_id = cluster_id * NB_DMA_CHANNELS + loc_id 882 // 883 // As a DMA channel is a private ressource allocated to a task, 884 // there is no lock protecting exclusive access to the channel. 801 885 // The signalisation between the OS and the DMA uses the _dma_done[dma_id] 802 886 // synchronisation variables (set by the ISR, and reset by the OS). 803 887 // The transfer status is copied by the ISR in the _dma_status[dma_id] variables. 804 // 805 // These DMA channels can be used by the FB driver, or by the NIC driver. 806 ////////////////////////////////////////////////////////////////////////////////// 807 808 #if NB_DMAS_MAX > 0 809 in_unckdata unsigned int _dma_lock[NB_DMAS_MAX * NB_CLUSTERS] = { 810 [0 ... (NB_DMAS_MAX * NB_CLUSTERS) - 1] = 0 811 }; 812 813 in_unckdata volatile unsigned int _dma_done[NB_DMAS_MAX * NB_CLUSTERS] = { 814 [0 ... (NB_DMAS_MAX * NB_CLUSTERS) - 1] = 0 815 }; 816 817 in_unckdata volatile unsigned int _dma_status[NB_DMAS_MAX * NB_CLUSTERS]; 888 ////////////////////////////////////////////////////////////////////////////////// 889 // The (virtual) base address of the associated segment is: 890 // 891 // dma_address = seg_dma_base + cluster_id * GIET_CLUSTER_INCREMENT 892 // 893 // - seg_dma_base must be defined in the giet_vsegs.ld file 894 // - GIET_CLUSTER_INCREMENT must be defined in the giet_config.h file 895 //////////////////////////////////////////////////////////////////////////////// 896 897 #if NB_DMA_CHANNELS > 0 898 899 // in_unckdata unsigned int _dma_lock[NB_DMA_CHANNELS * NB_CLUSTERS] 900 // = { [0 ... (NB_DMA_CHANNELS * NB_CLUSTERS) - 1] = 0 }; 901 902 in_unckdata volatile unsigned int _dma_done[NB_DMA_CHANNELS * NB_CLUSTERS] 903 = { [0 ... (NB_DMA_CHANNELS * NB_CLUSTERS) - 1] = 0 }; 904 in_unckdata volatile unsigned int _dma_status[NB_DMA_CHANNELS * NB_CLUSTERS]; 818 905 in_unckdata unsigned int _dma_iommu_ix1 = 1; 819 in_unckdata unsigned int _dma_iommu_npages[NB_DMA S_MAX* NB_CLUSTERS];906 in_unckdata unsigned int _dma_iommu_npages[NB_DMA_CHANNELS * NB_CLUSTERS]; 820 907 #endif 821 908 … … 823 910 // _dma_reset_irq() 824 911 ////////////////////////////////////////////////////////////////////////////////// 825 unsigned int _dma_reset_irq(unsigned int cluster_id, unsigned int channel_id) { 826 #if NB_DMAS_MAX > 0 912 unsigned int _dma_reset_irq( unsigned int cluster_id, 913 unsigned int channel_id) 914 { 915 #if NB_DMA_CHANNELS > 0 827 916 // parameters checking 828 if (cluster_id >= NB_CLUSTERS) { 829 return 1; 830 } 831 if (channel_id >= NB_DMAS_MAX) { 832 return 1; 833 } 917 if (cluster_id >= NB_CLUSTERS) return 1; 918 if (channel_id >= NB_DMA_CHANNELS) return 1; 834 919 835 920 // compute DMA base address 836 921 unsigned int * dma_address = (unsigned int *) ((char *) &seg_dma_base + 837 (cluster_id * (unsigned) CLUSTER_SIZE));922 (cluster_id * GIET_CLUSTER_INCREMENT)); 838 923 839 924 dma_address[channel_id * DMA_SPAN + DMA_RESET] = 0; … … 848 933 // _dma_get_status() 849 934 ////////////////////////////////////////////////////////////////////////////////// 850 unsigned int _dma_get_status(unsigned int cluster_id, unsigned int channel_id, unsigned int * status) { 851 #if NB_DMAS_MAX > 0 935 unsigned int _dma_get_status( unsigned int cluster_id, 936 unsigned int channel_id, 937 unsigned int * status) 938 { 939 #if NB_DMA_CHANNELS > 0 852 940 // parameters checking 853 if (cluster_id >= NB_CLUSTERS) { 854 return 1; 855 } 856 if (channel_id >= NB_DMAS_MAX) { 857 return 1; 858 } 941 if (cluster_id >= NB_CLUSTERS) return 1; 942 if (channel_id >= NB_DMA_CHANNELS) return 1; 859 943 860 944 // compute DMA base address 861 945 unsigned int * dma_address = (unsigned int *) ((char *) &seg_dma_base + 862 (cluster_id * (unsigned) CLUSTER_SIZE));946 (cluster_id * GIET_CLUSTER_INCREMENT)); 863 947 864 948 *status = dma_address[channel_id * DMA_SPAN + DMA_LEN]; … … 873 957 // _dma_transfer() 874 958 // Transfer data between a user buffer and a device buffer using DMA. 875 // Two devices types are supported: Frame Buffer if dev_type == 0 876 // Multi-Nic if dev_type != 0 959 // Only one device type is supported: Frame Buffer (dev_type == 0) 877 960 // Arguments are: 878 961 // - dev_type : device type. … … 882 965 // - length : number of bytes to be transfered. 883 966 // 884 // The DMA channel is obtained from task context (CTX_FBDMA_ID / CTX_NIDMA_ID.967 // The cluster_id and channel_id are obtained from task context (CTX_DMA_ID). 885 968 // The user buffer must be mapped in user address space and word-aligned. 886 969 // The user buffer length must be multiple of 4 bytes. 887 // Me mustcompute the physical base addresses for both the device buffer970 // We compute the physical base addresses for both the device buffer 888 971 // and the user buffer before programming the DMA transfer. 889 972 // The GIET being fully static, we don't need to split the transfer in 4 Kbytes … … 891 974 // Returns 0 if success, > 0 if error. 892 975 ////////////////////////////////////////////////////////////////////////////////// 893 unsigned int _dma_transfer( 894 unsigned int dev_type,895 unsigned int to_user,896 unsigned int offset,897 unsigned int user_vaddr,898 unsigned int length){899 #if NB_DMA S_MAX> 0976 unsigned int _dma_transfer( unsigned int dev_type, 977 unsigned int to_user, 978 unsigned int offset, 979 unsigned int user_vaddr, 980 unsigned int length ) 981 { 982 #if NB_DMA_CHANNELS > 0 900 983 unsigned int ko; // unsuccessfull V2P translation 984 unsigned int device_vbase; // device buffer vbase address 901 985 unsigned int flags; // protection flags 902 986 unsigned int ppn; // physical page number 903 unsigned int user_pbase; // user buffer pbase address 904 unsigned int device_pbase; // frame buffer pbase address 905 unsigned int device_vaddr; // device buffer vbase address 987 paddr_t user_pbase; // user buffer pbase address 988 paddr_t device_pbase; // frame buffer pbase address 906 989 907 990 // check user buffer address and length alignment 908 if ((user_vaddr & 0x3) || (length & 0x3)) { 991 if ((user_vaddr & 0x3) || (length & 0x3)) 992 { 909 993 _get_lock(&_tty_put_lock); 910 994 _puts("\n[GIET ERROR] in _dma_transfer : user buffer not word aligned\n"); … … 914 998 915 999 // get DMA channel and compute DMA vbase address 916 unsigned int task_id = _get_proc_task_id(); 917 unsigned int dma_id = _get_context_slot(task_id, CTX_DMA_ID); 918 unsigned int cluster_id = dma_id / NB_DMAS_MAX; 919 unsigned int loc_id = dma_id % NB_DMAS_MAX; 920 unsigned int * dma_base = (unsigned int *) ((char *) &seg_dma_base + 921 (cluster_id * (unsigned) CLUSTER_SIZE)); 922 1000 unsigned int dma_id = _get_context_slot(CTX_DMA_ID); 1001 if ( dma_id == 0xFFFFFFFF ) 1002 { 1003 _get_lock(&_tty_put_lock); 1004 _puts("\n[GIET ERROR] in _dma_transfer : no DMA channel allocated\n"); 1005 _release_lock(&_tty_put_lock); 1006 return 1; 1007 } 1008 unsigned int cluster_id = dma_id / NB_DMA_CHANNELS; 1009 unsigned int channel_id = dma_id % NB_DMA_CHANNELS; 1010 unsigned int * dma_vbase = (unsigned int *) ((char *) &seg_dma_base + 1011 (cluster_id * GIET_CLUSTER_INCREMENT)); 923 1012 // get page table address 924 unsigned int user_ptab = _get_context_slot(task_id, CTX_PTAB_ID); 925 926 // get peripheral buffer virtual address 927 if (dev_type) { 928 device_vaddr = (unsigned int) &seg_nic_base + offset; 929 } 930 else { 931 device_vaddr = (unsigned int) &seg_fbf_base + offset; 1013 unsigned int user_ptab = _get_context_slot(CTX_PTAB_ID); 1014 1015 // get devic buffer virtual address, depending on peripheral type 1016 if (dev_type == 0) 1017 { 1018 device_vbase = (unsigned int) &seg_fbf_base + offset; 1019 } 1020 else 1021 { 1022 _get_lock(&_tty_put_lock); 1023 _puts("\n[GIET ERROR] in _dma_transfer : device type not supported\n"); 1024 _release_lock(&_tty_put_lock); 1025 return 1; 932 1026 } 933 1027 934 1028 // get device buffer physical address 935 ko = _v2p_translate((page_table_t *) user_ptab, (device_vaddr >> 12), &ppn, &flags); 936 if (ko) { 1029 ko = _v2p_translate( (page_table_t*) user_ptab, 1030 (device_vbase >> 12), 1031 &ppn, 1032 &flags ); 1033 if (ko) 1034 { 937 1035 _get_lock(&_tty_put_lock); 938 1036 _puts("\n[GIET ERROR] in _dma_transfer : device buffer unmapped\n"); 939 1037 _release_lock(&_tty_put_lock); 940 return 2;941 } 942 device_pbase = ( ppn << 12) | (device_vaddr& 0x00000FFF);1038 return 1; 1039 } 1040 device_pbase = ((paddr_t)ppn << 12) | (device_vbase & 0x00000FFF); 943 1041 944 1042 // Compute user buffer physical address 945 ko = _v2p_translate((page_table_t*) user_ptab, (user_vaddr >> 12), &ppn, &flags); 946 if (ko) { 1043 ko = _v2p_translate( (page_table_t*) user_ptab, 1044 (user_vaddr >> 12), 1045 &ppn, 1046 &flags ); 1047 if (ko) 1048 { 947 1049 _get_lock(&_tty_put_lock); 948 1050 _puts("\n[GIET ERROR] in _dma_transfer() : user buffer unmapped\n"); 949 1051 _release_lock(&_tty_put_lock); 950 return 3;1052 return 1; 951 1053 } 952 if ((flags & PTE_U) == 0) { 1054 if ((flags & PTE_U) == 0) 1055 { 953 1056 _get_lock(&_tty_put_lock); 954 1057 _puts("[GIET ERROR] in _dma_transfer() : user buffer not in user space\n"); 955 1058 _release_lock(&_tty_put_lock); 956 return 4; 957 } 958 if (((flags & PTE_W) == 0 ) && to_user) { 1059 return 1; 1060 } 1061 if (((flags & PTE_W) == 0 ) && to_user) 1062 { 959 1063 _get_lock(&_tty_put_lock); 960 1064 _puts("\n[GIET ERROR] in _dma_transfer() : user buffer not writable\n"); 961 1065 _release_lock(&_tty_put_lock); 962 return 5;963 } 964 user_pbase = ( ppn<< 12) | (user_vaddr & 0x00000FFF);965 966 1066 return 1; 1067 } 1068 user_pbase = (((paddr_t)ppn) << 12) | (user_vaddr & 0x00000FFF); 1069 1070 /* This is a draft for IOMMU support 967 1071 968 1072 // loop on all virtual pages covering the user buffer … … 1014 1118 1015 1119 // invalidate data cache in case of memory write 1016 if (to_user) { 1017 _dcache_buf_invalidate((void *) user_vaddr, length); 1018 } 1019 1020 // get the lock 1021 _get_lock(&_dma_lock[dma_id]); 1120 if (to_user) _dcache_buf_invalidate((void *) user_vaddr, length); 1121 1122 // get the lock 1123 // _get_lock(&_dma_lock[dma_id]); 1124 1125 #if GIET_DEBUG_DMA_DRIVER 1126 _get_lock(&_tty_put_lock); 1127 _puts("\n[GIET DEBUG] DMA TRANSFER at cycle "); 1128 _putd( _proctime() ); 1129 _puts("\n - cluster_id = "); 1130 _putx( cluster_id ); 1131 _puts("\n - channel_id = "); 1132 _putx( channel_id ); 1133 _puts("\n - dma_vbase = "); 1134 _putx( (unsigned int)dma_vbase ); 1135 _puts("\n - device_buf_vbase = "); 1136 _putx( device_vbase ); 1137 _puts("\n - device_buf_pbase = "); 1138 _putl( device_pbase ); 1139 _puts("\n - user_buf_vbase = "); 1140 _putx( user_vaddr ); 1141 _puts("\n - user_buf_pbase = "); 1142 _putl( user_pbase ); 1143 _puts("\n"); 1144 _release_lock(&_tty_put_lock); 1145 #endif 1022 1146 1023 1147 // DMA configuration 1024 if (to_user) { 1025 dma_base[loc_id * DMA_SPAN + DMA_SRC] = (unsigned int) device_pbase; 1026 dma_base[loc_id * DMA_SPAN + DMA_DST] = (unsigned int) user_pbase; 1027 } 1028 else { 1029 dma_base[loc_id * DMA_SPAN + DMA_SRC] = (unsigned int) user_pbase; 1030 dma_base[loc_id * DMA_SPAN + DMA_DST] = (unsigned int) device_pbase; 1031 } 1032 dma_base[loc_id * DMA_SPAN + DMA_LEN] = (unsigned int) length; 1033 1034 return 0; 1035 #else //NB_DMAS_MAX == 0 1036 return -1; 1037 #endif 1148 if (to_user) 1149 { 1150 dma_vbase[channel_id * DMA_SPAN + DMA_SRC] = (unsigned int)(device_pbase); 1151 dma_vbase[channel_id * DMA_SPAN + DMA_SRC_EXT] = (unsigned int)(device_pbase>>32); 1152 dma_vbase[channel_id * DMA_SPAN + DMA_DST] = (unsigned int)(user_pbase); 1153 dma_vbase[channel_id * DMA_SPAN + DMA_DST_EXT] = (unsigned int)(user_pbase>>32); 1154 } 1155 else 1156 { 1157 dma_vbase[channel_id * DMA_SPAN + DMA_SRC] = (unsigned int)(user_pbase); 1158 dma_vbase[channel_id * DMA_SPAN + DMA_SRC_EXT] = (unsigned int)(user_pbase>>32); 1159 dma_vbase[channel_id * DMA_SPAN + DMA_DST] = (unsigned int)(device_pbase); 1160 dma_vbase[channel_id * DMA_SPAN + DMA_DST_EXT] = (unsigned int)(device_pbase>>32); 1161 } 1162 dma_vbase[channel_id * DMA_SPAN + DMA_LEN] = (unsigned int) length; 1163 1164 return 0; 1165 1166 #else // NB_DMA_CHANNELS == 0 1167 _get_lock(&_tty_put_lock); 1168 _puts("\n[GIET ERROR] in _dma_transfer() : NB_DMA_CHANNELS == 0"); 1169 _release_lock(&_tty_put_lock); 1170 return 1; 1171 #endif 1172 1038 1173 } // end _dma_transfer() 1039 1174 … … 1047 1182 // (1 == read error / 2 == DMA idle error / 3 == write error) 1048 1183 ////////////////////////////////////////////////////////////////////////////////// 1049 unsigned int _dma_completed() {1050 #if NB_DMAS_MAX > 0 1051 unsigned int task_id = _get_proc_task_id(); 1052 unsigned int dma_id = _get_context_slot( task_id,CTX_DMA_ID);1184 unsigned int _dma_completed() 1185 { 1186 #if NB_DMA_CHANNELS > 0 1187 unsigned int dma_id = _get_context_slot(CTX_DMA_ID); 1053 1188 unsigned int dma_ret; 1054 1189 1055 1190 // busy waiting with a pseudo random delay between bus access 1056 while (_dma_done[dma_id] == 0) { 1191 while (_dma_done[dma_id] == 0) 1192 { 1057 1193 unsigned int delay = (( _proctime() ^ _procid() << 4) & 0x3F) + 1; 1058 1194 asm volatile( … … 1067 1203 } 1068 1204 1069 /* draft support for IOMMU 1070 // unmap the buffer from IOMMU page table if IOMMU is activated 1071 if ( GIET_IOMMU_ACTIVE ) 1072 { 1073 unsigned int* iob_address = (unsigned int*)&seg_iob_base; 1074 1075 unsigned int ix1 = _dma_iommu_ix1 + dma_id; 1076 unsigned int ix2; 1077 1078 for ( ix2 = 0 ; ix2 < _dma_iommu_npages[dma_id] ; ix2++ ) 1079 { 1080 // unmap the page in IOMMU page table 1081 _iommu_inval_pte2( ix1, // PT1 index 1082 ix2 ); // PT2 index 1083 1084 // clear IOMMU TLB 1085 iob_address[IOB_INVAL_PTE] = (ix1 << 21) | (ix2 << 12); 1086 } 1087 } 1088 */ 1205 #if GIET_DEBUG_DMA_DRIVER 1206 _get_lock(&_tty_put_lock); 1207 _puts("\n[GIET DEBUG] DMA COMPLETED at cycle "); 1208 _putd( _proctime() ); 1209 _puts("\n - cluster_id = "); 1210 _putx( dma_id/NB_DMA_CHANNELS ); 1211 _puts("\n - channel_id = "); 1212 _putx( dma_id%NB_DMA_CHANNELS ); 1213 _puts("\n"); 1214 _release_lock(&_tty_put_lock); 1215 #endif 1089 1216 1090 1217 // reset synchronization variables … … 1092 1219 dma_ret = _dma_status[dma_id]; 1093 1220 asm volatile("sync\n"); 1094 _dma_lock[dma_id] = 0; 1221 1222 // _dma_lock[dma_id] = 0; 1095 1223 1096 1224 return dma_ret; 1097 1225 1098 #else // NB_DMAS_MAX== 01226 #else // NB_DMA_CHANNELS == 0 1099 1227 return -1; 1100 1228 #endif 1229 1101 1230 } // end _dma_completed 1231 1102 1232 1103 1233 ////////////////////////////////////////////////////////////////////////////////// … … 1113 1243 // The '_fb_write()', '_fb_read()' and '_fb_completed()' functions use the 1114 1244 // VciMultiDma components (distributed in the clusters) to transfer data 1115 // between the user buffer and the frame buffer. A FBDMA channel is1245 // between the user buffer and the frame buffer. A DMA channel is 1116 1246 // allocated to each task requesting it in the mapping_info data structure. 1117 1247 ////////////////////////////////////////////////////////////////////////////////// … … 1124 1254 // - length : number of bytes to be transfered. 1125 1255 ////////////////////////////////////////////////////////////////////////////////// 1126 unsigned int _fb_sync_write(unsigned int offset, const void * buffer, unsigned int length) { 1127 unsigned char * fb_address = (unsigned char *) &seg_fbf_base + offset; 1256 unsigned int _fb_sync_write( unsigned int offset, 1257 const void* buffer, 1258 unsigned int length) 1259 { 1260 unsigned char* fb_address = (unsigned char *) &seg_fbf_base + offset; 1128 1261 memcpy((void *) fb_address, (void *) buffer, length); 1129 1262 return 0; … … 1138 1271 // - length : number of bytes to be transfered. 1139 1272 ////////////////////////////////////////////////////////////////////////////////// 1140 unsigned int _fb_sync_read(unsigned int offset, const void * buffer, unsigned int length) { 1141 unsigned char * fb_address = (unsigned char *) &seg_fbf_base + offset; 1273 unsigned int _fb_sync_read( unsigned int offset, 1274 const void* buffer, 1275 unsigned int length) 1276 { 1277 unsigned char* fb_address = (unsigned char *) &seg_fbf_base + offset; 1142 1278 memcpy((void *) buffer, (void *) fb_address, length); 1143 1279 return 0; … … 1153 1289 // Returns 0 if success, > 0 if error. 1154 1290 ////////////////////////////////////////////////////////////////////////////////// 1155 unsigned int _fb_write(unsigned int offset, const void * buffer, unsigned int length) { 1156 return _dma_transfer( 1157 0, // frame buffer 1158 0, // write 1159 offset, 1160 (unsigned int) buffer, 1161 length); 1291 unsigned int _fb_write( unsigned int offset, 1292 const void* buffer, 1293 unsigned int length) 1294 { 1295 return _dma_transfer( 0, // frame buffer 1296 0, // write 1297 offset, 1298 (unsigned int) buffer, 1299 length ); 1162 1300 } 1163 1301 … … 1171 1309 // Returns 0 if success, > 0 if error. 1172 1310 ////////////////////////////////////////////////////////////////////////////////// 1173 unsigned int _fb_read(unsigned int offset, const void * buffer, unsigned int length) { 1174 return _dma_transfer( 1175 0, // frame buffer 1176 1, // read 1177 offset, 1178 (unsigned int) buffer, 1179 length); 1311 unsigned int _fb_read( unsigned int offset, 1312 const void* buffer, 1313 unsigned int length ) 1314 { 1315 return _dma_transfer( 0, // frame buffer 1316 1, // read 1317 offset, 1318 (unsigned int) buffer, 1319 length ); 1180 1320 } 1181 1321 … … 1188 1328 // (1 == read error / 2 == DMA idle error / 3 == write error) 1189 1329 ////////////////////////////////////////////////////////////////////////////////// 1190 unsigned int _fb_completed() { 1330 unsigned int _fb_completed() 1331 { 1191 1332 return _dma_completed(); 1192 1333 } … … 1215 1356 // - length : number of bytes to be transfered. 1216 1357 ////////////////////////////////////////////////////////////////////////////////// 1217 unsigned int _nic_sync_write(unsigned int offset, const void * buffer, unsigned int length) { 1218 unsigned char * nic_address = (unsigned char *) &seg_nic_base + offset; 1358 unsigned int _nic_sync_write( unsigned int offset, 1359 const void* buffer, 1360 unsigned int length ) 1361 { 1362 unsigned char* nic_address = (unsigned char *) &seg_nic_base + offset; 1219 1363 memcpy((void *) nic_address, (void *) buffer, length); 1220 1364 return 0; … … 1279 1423 // (1 == read error / 2 == DMA idle error / 3 == write error) 1280 1424 ////////////////////////////////////////////////////////////////////////////////// 1281 unsigned int _nic_completed() { 1425 unsigned int _nic_completed() 1426 { 1282 1427 return _dma_completed(); 1283 1428 } … … 1286 1431 // _heap_info() 1287 1432 // This function returns the information associated to a heap (size and vaddr) 1288 // It uses the global task i d(CTX_GTID_ID, unique for each giet task) and the1289 // vspace i d (CTX_VSID_ID) defined in the context1433 // It uses the global task index (CTX_GTID_ID, unique for each giet task) and the 1434 // vspace index (CTX_VSID_ID) defined in the task context. 1290 1435 /////////////////////////////////////////////////////////////////////////////////// 1291 unsigned int _heap_info(unsigned int * vaddr, unsigned int * size) { 1436 unsigned int _heap_info( unsigned int* vaddr, 1437 unsigned int* size ) 1438 { 1292 1439 mapping_header_t * header = (mapping_header_t *) (&seg_mapping_base); 1293 1440 mapping_task_t * tasks = _get_task_base(header); 1294 1441 mapping_vobj_t * vobjs = _get_vobj_base(header); 1295 1442 mapping_vspace_t * vspaces = _get_vspace_base(header); 1296 unsigned int taskid = _get_context_slot(_get_proc_task_id(), CTX_GTID_ID); 1297 unsigned int vspaceid = _get_context_slot(_get_proc_task_id(), CTX_VSID_ID); 1443 1444 unsigned int taskid = _get_context_slot(CTX_GTID_ID); 1445 unsigned int vspaceid = _get_context_slot(CTX_VSID_ID); 1446 1298 1447 int heap_local_vobjid = tasks[taskid].heap_vobjid; 1299 if (heap_local_vobjid != -1) { 1448 if (heap_local_vobjid != -1) 1449 { 1300 1450 unsigned int vobjheapid = heap_local_vobjid + vspaces[vspaceid].vobj_offset; 1301 1451 *vaddr = vobjs[vobjheapid].vaddr; … … 1303 1453 return 0; 1304 1454 } 1305 else { 1455 else 1456 { 1306 1457 *vaddr = 0; 1307 1458 *size = 0; -
soft/giet_vm/sys/exc_handler.c
r232 r238 67 67 68 68 69 static void _display_cause(unsigned int type) { 69 static void _display_cause(unsigned int type) 70 { 70 71 _get_lock(&_tty_put_lock); 71 72 _puts("\n[GIET] Exception for task "); 72 _putd(_get_ proc_task_id());73 _putd(_get_context_slot(CTX_LTID_ID)); 73 74 _puts(" on processor "); 74 75 _putd(_procid()); … … 86 87 87 88 // goes to sleeping state 88 unsigned int task_id = _get_proc_task_id(); 89 _set_context_slot( task_id, CTX_RUN_ID, 0); 89 _set_context_slot(CTX_RUN_ID, 0); 90 90 91 91 // deschedule -
soft/giet_vm/sys/hwr_mapping.h
r228 r238 7 7 8 8 /* IOC (block device) */ 9 enum IOC_registers { 9 enum IOC_registers 10 { 10 11 BLOCK_DEVICE_BUFFER, 11 12 BLOCK_DEVICE_LBA, … … 16 17 BLOCK_DEVICE_SIZE, 17 18 BLOCK_DEVICE_BLOCK_SIZE, 19 BLOCK_DEVICE_BUFFER_EXT, 18 20 }; 19 enum IOC_operations { 21 enum IOC_operations 22 { 20 23 BLOCK_DEVICE_NOOP, 21 24 BLOCK_DEVICE_READ, 22 25 BLOCK_DEVICE_WRITE, 23 26 }; 24 enum IOC_status{ 27 enum IOC_status 28 { 25 29 BLOCK_DEVICE_IDLE, 26 30 BLOCK_DEVICE_BUSY, … … 33 37 34 38 /* DMA */ 35 enum DMA_registers { 39 enum DMA_registers 40 { 36 41 DMA_SRC = 0, 37 42 DMA_DST = 1, … … 39 44 DMA_RESET = 3, 40 45 DMA_IRQ_DISABLE = 4, 46 DMA_SRC_EXT = 5, 47 DMA_DST_EXT = 6, 41 48 /**/ 42 DMA_END = 5,49 DMA_END = 7, 43 50 DMA_SPAN = 8, 44 51 }; 45 52 46 53 /* GCD */ 47 enum GCD_registers { 54 enum GCD_registers 55 { 48 56 GCD_OPA = 0, 49 57 GCD_OPB = 1, … … 55 63 56 64 /* ICU */ 57 enum ICU_registers { 65 enum ICU_registers 66 { 58 67 ICU_INT = 0, 59 68 ICU_MASK = 1, … … 65 74 ICU_SPAN = 8, 66 75 }; 67 enum Xicu_registers { 76 enum Xicu_registers 77 { 68 78 XICU_WTI_REG = 0, 69 79 XICU_PTI_PER = 1, … … 92 102 93 103 /* TIMER */ 94 enum TIMER_registers { 104 enum TIMER_registers 105 { 95 106 TIMER_VALUE = 0, 96 107 TIMER_MODE = 1, … … 102 113 103 114 /* TTY */ 104 enum TTY_registers { 115 enum TTY_registers 116 { 105 117 TTY_WRITE = 0, 106 118 TTY_STATUS = 1, … … 112 124 113 125 /* IOB */ 114 enum IOB_registers { 126 enum IOB_registers 127 { 115 128 IOB_IOMMU_PTPR = 0, /* R/W : Page Table Pointer Register */ 116 129 IOB_IOMMU_ACTIVE = 1, /* R/W : IOMMU activated if not 0 */ … … 125 138 126 139 /* MWMR */ 127 enum SoclibMwmrRegisters { 140 enum SoclibMwmrRegisters 141 { 128 142 MWMR_IOREG_MAX = 16, 129 143 MWMR_RESET = MWMR_IOREG_MAX, … … 138 152 }; 139 153 140 enum SoclibMwmrWay { 154 enum SoclibMwmrWay 155 { 141 156 MWMR_TO_COPROC, 142 157 MWMR_FROM_COPROC, -
soft/giet_vm/sys/irq_handler.c
r237 r238 19 19 #include <hwr_mapping.h> 20 20 21 #if NB_TIM ERS_MAX22 extern volatile unsigned char _user_timer_event[NB_CLUSTERS * NB_TIM ERS_MAX] ;21 #if NB_TIM_CHANNELS 22 extern volatile unsigned char _user_timer_event[NB_CLUSTERS * NB_TIM_CHANNELS] ; 23 23 #endif 24 24 … … 33 33 // 34 34 // There is one interrupt vector per processor (stored in the scheduler associated 35 // to the processor. Each interrupt vector entry contains two 16 bits fields: 36 // - isr_id : defines the type of ISR to be executed. 35 // to the processor. Each interrupt vector entry contains three bits fields: 36 // - isr_id : defines the type of ISR to be executed. 37 // - type_id : HWI if zero / PTI if non zero 37 38 // - channel_id : defines the specific channel for multi-channels peripherals. 38 39 // … … 40 41 // a global index : channel_id = cluster_id * NB_CHANNELS_MAX + loc_id 41 42 /////////////////////////////////////////////////////////////////////////////////// 42 void _irq_demux() { 43 void _irq_demux() 44 { 43 45 unsigned int pid = _procid(); 44 46 unsigned int irq_id; 45 47 46 47 48 // get the highest priority active IRQ index 48 if (_icu_get_index( pid / NB_PROCS_MAX, pid % NB_PROCS_MAX, &irq_id)) { 49 if (_icu_get_index( pid / NB_PROCS_MAX, pid % NB_PROCS_MAX, &irq_id)) 50 { 49 51 _get_lock(&_tty_put_lock); 50 52 _puts("\n[GIET ERROR] Strange... Wrong _icu_read in _irq_demux()\n"); … … 52 54 } 53 55 54 55 if (irq_id < 32) { 56 // do nothing if no interrupt active 57 unsigned int entry = _get_interrupt_vector_entry(irq_id); 58 unsigned int isr_id = entry & 0x000000FF; 59 unsigned int type_id = (entry >> 8) & 0x000000FF; 60 unsigned int channel_id = (entry >> 16) & 0x0000FFFF; 56 // do nothing if no interrupt active 57 if (irq_id < 32) 58 { 59 static_scheduler_t* psched = _get_sched(); 60 unsigned int entry = psched->interrupt_vector[irq_id]; 61 unsigned int isr_id = entry & 0x000000FF; 62 unsigned int type_id = (entry >> 8) & 0x000000FF; 63 unsigned int channel_id = (entry >> 16) & 0x0000FFFF; 64 61 65 if(type_id == 0) // HARD irq type 62 66 { … … 76 80 } 77 81 78 79 82 /////////////////////////////////////////////////////////////////////////////////// 80 83 // _isr_default() … … 82 85 // interrupt vector. It simply displays an error message on kernel TTY[0]. 83 86 /////////////////////////////////////////////////////////////////////////////////// 84 void _isr_default() { 87 void _isr_default() 88 { 85 89 _get_lock(&_tty_put_lock); 86 90 _puts("\n[GIET ERROR] Strange... Default ISR activated for processor "); … … 96 100 // The multi_dma components can be distributed in the clusters. 97 101 // The channel_id argument is the local DMA channel index. 98 // dma_global_id = cluster_id*NB_DMA S_MAX+ channel_id102 // dma_global_id = cluster_id*NB_DMA_CHANNELS + channel_id 99 103 // - The ISR saves the transfert status in _dma_status[dma_global_id]. 100 104 // - It acknowledges the interrupt to reinitialize the DMA controler. 101 105 // - it resets the synchronisation variable _dma_busy[dma_global_id]. 102 106 /////////////////////////////////////////////////////////////////////////////////// 103 void _isr_dma(unsigned int channel_id) { 104 #if NB_DMAS_MAX > 0 107 void _isr_dma(unsigned int channel_id) 108 { 109 #if NB_DMA_CHANNELS > 0 105 110 // compute cluster_id 106 111 unsigned int cluster_id = _procid() / NB_PROCS_MAX; 107 112 108 113 // compute dma_global_id 109 unsigned int dma_global_id = cluster_id * NB_DMA S_MAX+ channel_id;114 unsigned int dma_global_id = cluster_id * NB_DMA_CHANNELS + channel_id; 110 115 111 116 // save DMA channel status 112 117 if (_dma_get_status(cluster_id, channel_id, 113 (unsigned int *) &_dma_status[dma_global_id])) { 118 (unsigned int *) &_dma_status[dma_global_id])) 119 { 114 120 _get_lock(&_tty_put_lock); 115 121 _puts("[GIET ERROR] illegal DMA channel detected by _isr_dma\n"); … … 119 125 120 126 // reset DMA channel irq 121 if (_dma_reset_irq(cluster_id, channel_id)) { 127 if (_dma_reset_irq(cluster_id, channel_id)) 128 { 122 129 _get_lock(&_tty_put_lock); 123 130 _puts("[GIET ERROR] illegal DMA channel detected by _isr_dma\n"); … … 128 135 // release DMA channel 129 136 _dma_done[dma_global_id] = 1; 137 130 138 #else 131 _puts("[GIET ERROR] NB_DMAS_MAX is set to zero\n"); 132 139 _get_lock(&_tty_put_lock); 140 _puts("[GIET ERROR] NB_DMA_CHANNELS is set to zero\n"); 141 _release_lock(&_tty_put_lock); 133 142 #endif 134 143 } … … 140 149 // - It sets the _ioc_done variable to signal completion. 141 150 /////////////////////////////////////////////////////////////////////////////////// 142 void _isr_ioc() { 151 void _isr_ioc() 152 { 143 153 // save status & reset IRQ 144 if (_ioc_get_status((unsigned int *) &_ioc_status )) { 154 if (_ioc_get_status((unsigned int *) &_ioc_status )) 155 { 145 156 _get_lock(&_tty_put_lock); 146 157 _puts("[GIET ERROR] bad access to IOC status detected by _isr_ioc\n"); … … 161 172 // in a vci_multi_timer component, or in a vci_xicu component. 162 173 // The timer_id argument is the user timer local index. 163 // timer_globa_id = cluster_id*(NB_TIM ERS_MAX) + timer_id174 // timer_globa_id = cluster_id*(NB_TIM_CHANNELS) + timer_id 164 175 // The ISR acknowledges the IRQ and registers the event in the proper entry 165 176 // of the _timer_event[] array, and a log message is displayed on kernel terminal. 166 177 /////////////////////////////////////////////////////////////////////////////////// 167 void _isr_timer(unsigned int timer_id) { 178 void _isr_timer(unsigned int timer_id) 179 { 168 180 // compute cluster_id 169 181 unsigned int cluster_id = _procid() / NB_PROCS_MAX; 170 182 171 183 // aknowledge IRQ 172 if (_timer_reset_irq( cluster_id, timer_id)) { 184 if (_timer_reset_irq( cluster_id, timer_id)) 185 { 173 186 _get_lock(&_tty_put_lock); 174 187 _puts("[GIET ERROR] illegal timer index detected by _isr_timer\n"); … … 177 190 } 178 191 179 #if NB_TIM ERS_MAX192 #if NB_TIM_CHANNELS 180 193 // register the event 181 unsigned int timer_global_id = cluster_id * NB_TIM ERS_MAX+ timer_id;194 unsigned int timer_global_id = cluster_id * NB_TIM_CHANNELS + timer_id; 182 195 _user_timer_event[timer_global_id] = 1; 183 196 #endif … … 207 220 // A character is lost if the buffer is full when the ISR is executed. 208 221 /////////////////////////////////////////////////////////////////////////////////// 209 void _isr_tty(unsigned int tty_id) { 222 void _isr_tty(unsigned int tty_id) 223 { 210 224 // save character and reset IRQ 211 if (_tty_get_char( tty_id, (unsigned char *) &_tty_get_buf[tty_id])) { 225 if (_tty_get_char( tty_id, (unsigned char *) &_tty_get_buf[tty_id])) 226 { 212 227 _get_lock(&_tty_put_lock); 213 228 _puts("[GIET ERROR] illegal tty index detected by _isr_tty\n"); … … 229 244 // The ISR acknowledges the IRQ and calls the _ctx_switch() function. 230 245 ///////////////////////////////////////////////////////////////////////////////////// 231 void _isr_switch( unsigned int timer_id) { 246 void _isr_switch( unsigned int timer_id) 247 { 232 248 // get cluster index and proc local index 233 249 unsigned int cluster_id = _procid() / NB_PROCS_MAX; 234 250 235 251 // acknowledge IRQ 236 if (_timer_reset_irq(cluster_id, timer_id)) { 252 if (_timer_reset_irq(cluster_id, timer_id)) 253 { 237 254 _get_lock(&_tty_put_lock); 238 255 _puts("[GIET ERROR] illegal proc index detected by _isr_switch\n"); -
soft/giet_vm/sys/irq_handler.h
r228 r238 2 2 #define _IRQ_HANDLER_H 3 3 4 enum 4 enum isr_type_t 5 5 { 6 6 ISR_DEFAULT = 0, -
soft/giet_vm/sys/kernel_init.c
r228 r238 33 33 34 34 /////////////////////////////////////////////////////////////////////////////////// 35 // array of pointers on the page tables ( both virtual and physical addresses)35 // array of pointers on the page tables (virtual addresses) 36 36 /////////////////////////////////////////////////////////////////////////////////// 37 37 38 38 __attribute__((section (".kdata"))) 39 unsigned int _ptabs_paddr[GIET_NB_VSPACE_MAX]; 39 unsigned int _ptabs[GIET_NB_VSPACE_MAX]; // virtual addresses 40 41 __attribute__((section (".kdata"))) 42 unsigned int _ptprs[GIET_NB_VSPACE_MAX]; // physical addresses >> 13 43 44 /////////////////////////////////////////////////////////////////////////////////// 45 // array of pointers on the schedulers (physical addresses) 46 /////////////////////////////////////////////////////////////////////////////////// 40 47 41 48 __attribute__((section (".kdata"))) 42 unsigned int _ptabs_vaddr[GIET_NB_VSPACE_MAX];43 44 /////////////////////////////////////////////////////////////////////////////////// 45 // array of pointers on the schedulers (physical addresses)49 static_scheduler_t* _schedulers[NB_CLUSTERS * NB_PROCS_MAX]; 50 51 /////////////////////////////////////////////////////////////////////////////////// 52 // staks for the "idle" tasks (256 bytes for each processor) 46 53 /////////////////////////////////////////////////////////////////////////////////// 47 54 48 55 __attribute__((section (".kdata"))) 49 static_scheduler_t * _schedulers_paddr[NB_CLUSTERS * NB_PROCS_MAX]; 50 51 /////////////////////////////////////////////////////////////////////////////////// 52 // staks for the "idle" tasks (256 bytes for each processor) 53 /////////////////////////////////////////////////////////////////////////////////// 54 55 __attribute__((section (".kdata"))) 56 unsigned int _idle_stack[NB_CLUSTERS*NB_PROCS_MAX * 64]; 57 58 void _sys_exit() { 56 unsigned int _idle_stack[NB_CLUSTERS * NB_PROCS_MAX * 64]; 57 58 //////////////// 59 void _sys_exit() 60 { 59 61 while (1); 60 62 } … … 63 65 ////////////////////////////////////////////////////////////////////////////////// 64 66 // This function is the entry point for the last step of the boot sequence. 67 // that is done in parallel by all processors, with MMU activated. 65 68 ////////////////////////////////////////////////////////////////////////////////// 66 __attribute__((section (".kinit"))) void _kernel_init() { 67 // compute cluster and local processor index 68 unsigned int global_pid = _procid(); 69 unsigned int cluster_id = global_pid / NB_PROCS_MAX; 70 unsigned int proc_id = global_pid % NB_PROCS_MAX; 71 72 // Step 0 : Compute number of tasks allocated to proc 73 74 unsigned int tasks = _get_tasks_number(); 75 76 #if GIET_DEBUG_INIT 77 _get_lock(&_tty_put_lock); 78 _puts("\n[GIET DEBUG] step 0 for processor "); 79 _putd(global_pid); 80 _puts(" : tasks = "); 81 _putd(tasks); 82 _puts("\n"); 83 _release_lock(&_tty_put_lock); 84 #endif 85 86 // step 1 : Initialise scheduler physical addresses array 87 // get scheduler physical address (from CP0 register) 88 89 static_scheduler_t * psched = (static_scheduler_t *) _get_sched(); 90 _schedulers_paddr[global_pid] = psched; 91 92 #if GIET_DEBUG_INIT 93 _get_lock(&_tty_put_lock); 94 _puts("\n[GIET DEBUG] step 1 for processor "); 95 _putd(global_pid); 96 _puts(" / scheduler pbase = "); 97 _putx((unsigned int) psched); 98 _puts("\n"); 99 _release_lock(&_tty_put_lock); 100 #endif 101 102 // step 2 : initialise page table addresse arrays 69 __attribute__((section (".kinit"))) void _kernel_init() 70 { 71 // Step 1 : get processor index, 72 // get scheduler address 73 // initialise _schedulers[] array 74 75 unsigned int global_pid = _procid(); 76 unsigned int cluster_id = global_pid / NB_PROCS_MAX; 77 unsigned int proc_id = global_pid % NB_PROCS_MAX; 78 static_scheduler_t* psched = _get_sched(); 79 unsigned int tasks = psched->tasks; 80 81 _schedulers[global_pid] = psched; 82 83 #if GIET_DEBUG_INIT 84 _get_lock(&_tty_put_lock); 85 _puts("\n[GIET DEBUG] step 1 for processor "); 86 _putd(global_pid); 87 _puts(" : tasks = "); 88 _putd(tasks); 89 _puts(" / scheduler vbase = "); 90 _putx((unsigned int) psched); 91 _puts("\n"); 92 _release_lock(&_tty_put_lock); 93 #endif 94 95 // step 2 : initialise ptabs[] & ptprs[] arrays 103 96 // each processor scans all tasks contexts in its 104 97 // private scheduler and get VSID, PTAB and PTPR values … … 106 99 unsigned int ltid; 107 100 108 for (ltid = 0; ltid < tasks; ltid++) { 109 unsigned int vspace_id = _get_context_slot(ltid , CTX_VSID_ID); 110 unsigned int ptab_vaddr = _get_context_slot(ltid , CTX_PTAB_ID); 111 unsigned int ptab_paddr = _get_context_slot(ltid , CTX_PTPR_ID) << 13; 112 113 _ptabs_vaddr[vspace_id] = ptab_vaddr; 114 _ptabs_paddr[vspace_id] = ptab_paddr; 115 116 #if GIET_DEBUG_INIT 117 _get_lock(&_tty_put_lock); 118 _puts("\n[GIET DEBUG] step 2 for processor "); 119 _putd(global_pid); 120 _puts(" / vspace "); 121 _putd(vspace_id); 122 _puts("\n- ptab vbase = "); 123 _putx(ptab_vaddr); 124 _puts("\n- ptab pbase = "); 125 _putx(ptab_paddr); 126 _puts("\n"); 127 _release_lock(&_tty_put_lock); 101 for (ltid = 0; ltid < tasks; ltid++) 102 { 103 unsigned int vsid = _get_task_slot(ltid , CTX_VSID_ID); 104 unsigned int ptab = _get_task_slot(ltid , CTX_PTAB_ID); 105 unsigned int ptpr = _get_task_slot(ltid , CTX_PTPR_ID); 106 107 _ptabs[vsid] = ptab; 108 _ptprs[vsid] = ptpr; 109 110 #if GIET_DEBUG_INIT 111 _get_lock(&_tty_put_lock); 112 _puts("\n[GIET DEBUG] step 2 for processor "); 113 _putd(global_pid); 114 _puts(" / vspace "); 115 _putd(vsid); 116 _puts("\n- ptab = "); 117 _putx(ptab); 118 _puts("\n- ptpr = "); 119 _putx(ptpr); 120 _puts("\n"); 121 _release_lock(&_tty_put_lock); 128 122 #endif 129 123 130 124 } 131 132 unsigned int isr_switch_channel = 0xFFFFFFFF;133 125 134 126 // step 3 : compute and set ICU masks … … 136 128 // software interrupts are not supported yet 137 129 138 unsigned int irq_id; 130 unsigned int isr_switch_channel = 0xFFFFFFFF; 131 unsigned int irq_id; // IN_IRQ index 139 132 unsigned int hwi_mask = 0; 140 133 unsigned int pti_mask = 0; 141 134 142 for (irq_id = 0; irq_id < 32; irq_id++) { 143 unsigned int entry = _get_interrupt_vector_entry(irq_id); 144 unsigned int isr = entry & 0x000000FF; 145 146 if ((isr == ISR_DMA) || (isr == ISR_IOC) || (isr == ISR_TTY)) { 135 for (irq_id = 0; irq_id < 32; irq_id++) 136 { 137 unsigned int entry = psched->interrupt_vector[irq_id]; 138 unsigned int isr = entry & 0x000000FF; 139 140 if ((isr == ISR_DMA) || (isr == ISR_IOC) || (isr == ISR_TTY)) 141 { 147 142 hwi_mask = hwi_mask | 0x1 << irq_id; 148 143 } 149 else if ((isr == ISR_SWITCH)) { 144 else if ((isr == ISR_SWITCH)) 145 { 150 146 pti_mask = pti_mask | 0x1 << irq_id; 151 147 isr_switch_channel = irq_id; 152 148 } 153 else if ((isr == ISR_TIMER)) { 149 else if ((isr == ISR_TIMER)) 150 { 154 151 pti_mask = pti_mask | 0x1 << irq_id; 155 152 } 156 153 } 154 155 #if GIET_DEBUG_INIT 156 _get_lock(&_tty_put_lock); 157 _puts("\n[GIET DEBUG] step 3 for processor "); 158 _putd(global_pid); 159 _puts("\n - ICU HWI_MASK = "); 160 _putx(hwi_mask); 161 _puts("\n - ICU PTI_MASK = "); 162 _putx(pti_mask); 163 _puts("\n"); 164 _release_lock(&_tty_put_lock); 165 #endif 166 157 167 _icu_set_mask(cluster_id, proc_id, hwi_mask, 0); // set HWI_MASK 158 168 _icu_set_mask(cluster_id, proc_id, pti_mask, 1); // set PTI_MASK 159 169 160 #if GIET_DEBUG_INIT161 _get_lock(&_tty_put_lock);162 _puts("\n[GIET DEBUG] step 3 for processor ");163 _putd(global_pid);164 _puts("\n - ICU HWI_MASK = ");165 _putx(hwi_mask);166 _puts("\n - ICU PTI_MASK = ");167 _putx(pti_mask);168 _puts("\n");169 _release_lock(&_tty_put_lock);170 #endif171 172 170 // step 4 : start TICK timer if more than one task 173 if (tasks > 1) { 174 if (isr_switch_channel == 0xFFFFFFFF) { 171 if (tasks > 1) 172 { 173 if (isr_switch_channel == 0xFFFFFFFF) 174 { 175 175 _get_lock(&_tty_put_lock); 176 176 _puts("\n[GIET ERROR] ISR_SWITCH not found on proc "); … … 181 181 } 182 182 183 if (_timer_start( cluster_id, isr_switch_channel, GIET_TICK_VALUE)) { 183 if (_timer_start( cluster_id, isr_switch_channel, GIET_TICK_VALUE)) 184 { 184 185 _get_lock(&_tty_put_lock); 185 186 _puts("\n[GIET ERROR] ISR_SWITCH init error for proc "); … … 191 192 192 193 #if GIET_DEBUG_INIT 193 194 195 196 197 194 _get_lock(&_tty_put_lock); 195 _puts("\n[GIET DEBUG] Step 4 for processor "); 196 _putd(global_pid); 197 _puts(" / context switch activated\n"); 198 _release_lock(&_tty_put_lock); 198 199 #endif 199 200 … … 206 207 // the stack size is 256 bytes 207 208 208 _set_context_slot( IDLE_TASK_INDEX, CTX_RUN_ID, 1); 209 _set_context_slot( IDLE_TASK_INDEX, CTX_SR_ID, 0xFF03); 210 _set_context_slot( IDLE_TASK_INDEX, CTX_SP_ID, (unsigned int) _idle_stack + ((global_pid + 1) << 8)); 211 _set_context_slot( IDLE_TASK_INDEX, CTX_RA_ID, (unsigned int) &_ctx_eret); 212 _set_context_slot( IDLE_TASK_INDEX, CTX_EPC_ID, (unsigned int) &_ctx_idle); 213 _set_context_slot( IDLE_TASK_INDEX, CTX_LTID_ID, IDLE_TASK_INDEX); 214 _set_context_slot( IDLE_TASK_INDEX, CTX_PTPR_ID, _ptabs_paddr[0] >> 13); 215 216 #if GIET_DEBUG_INIT 217 _get_lock(&_tty_put_lock); 218 _puts("\n[GIET DEBUG] Step 5 for processor "); 219 _putd(global_pid); 220 _puts(" / idle task context set\n"); 221 _release_lock(&_tty_put_lock); 209 unsigned int stack = (unsigned int)_idle_stack + ((global_pid + 1)<<8); 210 211 _set_task_slot( IDLE_TASK_INDEX, CTX_RUN_ID, 1); 212 _set_task_slot( IDLE_TASK_INDEX, CTX_SR_ID, 0xFF03); 213 _set_task_slot( IDLE_TASK_INDEX, CTX_SP_ID, stack); 214 _set_task_slot( IDLE_TASK_INDEX, CTX_RA_ID, (unsigned int) &_ctx_eret); 215 _set_task_slot( IDLE_TASK_INDEX, CTX_EPC_ID, (unsigned int) &_ctx_idle); 216 _set_task_slot( IDLE_TASK_INDEX, CTX_LTID_ID, IDLE_TASK_INDEX); 217 _set_task_slot( IDLE_TASK_INDEX, CTX_PTPR_ID, _ptprs[0]); 218 219 #if GIET_DEBUG_INIT 220 _get_lock(&_tty_put_lock); 221 _puts("\n[GIET DEBUG] Step 5 for processor "); 222 _putd(global_pid); 223 _puts(" / idle task context set\n"); 224 _release_lock(&_tty_put_lock); 222 225 #endif 223 226 … … 226 229 // and starts the "idle" task if there is no task allocated. 227 230 228 unsigned int task_id; 229 230 if (tasks == 0) { 231 task_id = IDLE_TASK_INDEX; 231 ltid = 0; 232 233 if (tasks == 0) 234 { 235 ltid = IDLE_TASK_INDEX; 232 236 233 237 _get_lock(&_tty_put_lock); … … 237 241 _release_lock (&_tty_put_lock); 238 242 } 239 else { 240 task_id = 0; 241 } 242 243 unsigned int sp_value = _get_context_slot(task_id, CTX_SP_ID); 244 unsigned int sr_value = _get_context_slot(task_id, CTX_SR_ID); 245 unsigned int ptpr_value = _get_context_slot(task_id, CTX_PTPR_ID); 246 unsigned int epc_value = _get_context_slot(task_id, CTX_EPC_ID); 247 248 #if GIET_DEBUG_INIT 249 _get_lock(&_tty_put_lock); 250 _puts("\n[GIET DEBUG] step 6 for processor "); 251 _putd(global_pid); 252 _puts(" / registers initialised \n"); 253 _puts("- sp = "); 254 _putx(sp_value); 255 _puts("\n"); 256 _puts("- sr = "); 257 _putx(sr_value); 258 _puts("\n"); 259 _puts("- ptpr = "); 260 _putx(ptpr_value << 13); 261 _puts("\n"); 262 _puts("- epc = "); 263 _putx(epc_value); 264 _puts("\n"); 265 _release_lock(&_tty_put_lock); 266 #endif 243 244 unsigned int sp_value = _get_task_slot(ltid, CTX_SP_ID); 245 unsigned int sr_value = _get_task_slot(ltid, CTX_SR_ID); 246 unsigned int ptpr_value = _get_task_slot(ltid, CTX_PTPR_ID); 247 unsigned int epc_value = _get_task_slot(ltid, CTX_EPC_ID); 248 249 _set_task_slot( ltid, CTX_LTID_ID, ltid); 250 251 #if GIET_DEBUG_INIT 252 _get_lock(&_tty_put_lock); 253 _puts("\n[GIET DEBUG] step 6 for processor "); 254 _putd(global_pid); 255 _puts(" / registers initialised \n"); 256 _puts("- sp = "); 257 _putx(sp_value); 258 _puts("\n"); 259 _puts("- sr = "); 260 _putx(sr_value); 261 _puts("\n"); 262 _puts("- ptpr = "); 263 _putx(ptpr_value); 264 _puts("\n"); 265 _puts("- epc = "); 266 _putx(epc_value); 267 _puts("\n"); 268 _release_lock(&_tty_put_lock); 269 #endif 270 271 _get_lock(&_tty_put_lock); 272 _puts("\n[GIET] Processor "); 273 _putd( global_pid ); 274 _puts(" starting user code at cycle "); 275 _putd( _proctime() ); 276 _puts("\n"); 277 _release_lock(&_tty_put_lock); 267 278 268 279 // set registers and jump to user code -
soft/giet_vm/sys/mips32_registers.h
r228 r238 47 47 /* CP0 registers */ 48 48 49 #define CP0_BVAR $8 50 #define CP0_TIME $9 51 #define CP0_SR $12 52 #define CP0_CR $13 53 #define CP0_EPC $14 54 #define CP0_PROCID $15,1 55 #define CP0_SCHED $22 49 #define CP0_BVAR $8 50 #define CP0_TIME $9 51 #define CP0_SR $12 52 #define CP0_CR $13 53 #define CP0_EPC $14 54 #define CP0_PROCID $15,1 55 #define CP0_SCHED $22,0 56 #define CP0_SCHED_EXT $22,1 56 57 57 58 /* CP2 registers */ -
soft/giet_vm/sys/switch.s
r199 r238 1 1 /****************************************************************************** 2 2 * This function receives two arguments that are the current task context 3 * physical addresses and the next task context physical address. 4 * The DTLB is temporary desactivated... 3 * (virtual) addresses and the next task context (virtual) address. 5 4 ******************************************************************************/ 6 5 … … 10 9 11 10 _task_switch: 12 13 /* desactivate DTLB */14 ori $27, $0, 0xB15 mtc2 $27, $1 /* DTLB desactivated */16 11 17 12 /* save _current task context */ … … 113 108 mtc2 $26, $0 /* restore PTPR */ 114 109 115 /* activate DTLB */116 ori $27, $0, 0xF117 mtc2 $27, $1118 119 110 /* returns to caller */ 120 111 jr $31 -
soft/giet_vm/sys/sys_handler.c
r237 r238 31 31 &_gcd_read, /* 0x07 */ 32 32 &_heap_info, /* 0x08 */ 33 &_ get_proc_task_id,/* 0x09 */34 &_g et_global_task_id,/* 0x0A */33 &_local_task_id, /* 0x09 */ 34 &_global_task_id, /* 0x0A */ 35 35 &_sys_ukn, /* 0x0B */ 36 36 &_sys_ukn, /* 0x0C */ … … 69 69 } 70 70 71 72 71 //////////////////////////////////////////////////////////////////////////// 73 72 // _exit() … … 77 76 unsigned int date = _proctime(); 78 77 unsigned int proc_id = _procid(); 79 unsigned int task_id = _get_ proc_task_id();78 unsigned int task_id = _get_context_slot(CTX_LTID_ID); 80 79 81 80 // print death message … … 91 90 92 91 // goes to sleeping state 93 _set_context_slot( task_id,CTX_RUN_ID, 0);92 _set_context_slot(CTX_RUN_ID, 0); 94 93 95 94 // deschedule … … 97 96 } 98 97 99 100 98 ////////////////////////////////////////////////////////////////////////////// 101 99 // _procid() … … 103 101 // Max number or processors is 1024. 104 102 ////////////////////////////////////////////////////////////////////////////// 105 unsigned int _procid() { 103 unsigned int _procid() 104 { 106 105 unsigned int ret; 107 106 asm volatile("mfc0 %0, $15, 1" : "=r" (ret)); … … 109 108 } 110 109 111 112 110 ////////////////////////////////////////////////////////////////////////////// 113 111 // _proctime() 114 112 // Access CP0 and returns current processor's elapsed clock cycles since boot. 115 113 ////////////////////////////////////////////////////////////////////////////// 116 unsigned int _proctime() { 114 unsigned int _proctime() 115 { 117 116 unsigned int ret; 118 117 asm volatile("mfc0 %0, $9" : "=r" (ret)); … … 120 119 } 121 120 122 123 121 ////////////////////////////////////////////////////////////////////////////// 124 122 // _procnumber() … … 126 124 // specified by the cluster_id argument. 127 125 ////////////////////////////////////////////////////////////////////////////// 128 unsigned int _procs_number(unsigned int cluster_id, unsigned int * buffer) { 126 unsigned int _procs_number(unsigned int cluster_id, 127 unsigned int* buffer) 128 { 129 129 mapping_header_t * header = (mapping_header_t *) &seg_mapping_base; 130 130 mapping_cluster_t * cluster = _get_cluster_base(header); … … 139 139 } 140 140 141 142 int _get_vobj(char * vspace_name, char * vobj_name, unsigned int vobj_type, mapping_vobj_t ** res_vobj) { 141 ///////////////////////////////////////////////////////////////////////////// 142 // _local_task_id() 143 // Returns current task local index. 144 ///////////////////////////////////////////////////////////////////////////// 145 unsigned int _local_task_id() 146 { 147 return _get_context_slot(CTX_LTID_ID); 148 } 149 150 ///////////////////////////////////////////////////////////////////////////// 151 // _global_task_id() 152 // Returns current task global index. 153 ///////////////////////////////////////////////////////////////////////////// 154 unsigned int _global_task_id() 155 { 156 return _get_context_slot(CTX_GTID_ID); 157 } 158 159 ///////////////////////////////////////////////////////////////////////////// 160 // _get_vobj() 161 // This function writes in res_vobj a pointer on a vobj 162 // identified by the (vspace_name / vobj_name ) couple. 163 // The vobj_type argument is here only for the purpose of checking . 164 // returns 0: success, else: failed. 165 ///////////////////////////////////////////////////////////////////////////// 166 int _get_vobj( char* vspace_name, 167 char* vobj_name, 168 unsigned int vobj_type, 169 mapping_vobj_t** res_vobj ) 170 { 143 171 mapping_header_t * header = (mapping_header_t *) &seg_mapping_base; 144 172 mapping_vspace_t * vspace = _get_vspace_base(header); 145 mapping_vobj_t * vobj = _get_vobj_base(header);173 mapping_vobj_t * vobj = _get_vobj_base(header); 146 174 147 175 unsigned int vspace_id; … … 149 177 150 178 // scan vspaces 151 for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++) { 152 if (_strncmp( vspace[vspace_id].name, vspace_name, 31) == 0) { 179 for (vspace_id = 0; vspace_id < header->vspaces; vspace_id++) 180 { 181 if (_strncmp( vspace[vspace_id].name, vspace_name, 31) == 0) 182 { 153 183 // scan vobjs 154 184 for (vobj_id = vspace[vspace_id].vobj_offset; 155 185 vobj_id < (vspace[vspace_id].vobj_offset + vspace[vspace_id].vobjs); 156 vobj_id++) { 157 158 if (_strncmp(vobj[vobj_id].name, vobj_name, 31) == 0) { 159 if (vobj[vobj_id].type != vobj_type) { 186 vobj_id++) 187 { 188 if (_strncmp(vobj[vobj_id].name, vobj_name, 31) == 0) 189 { 190 if (vobj[vobj_id].type != vobj_type) 191 { 160 192 _get_lock(&_tty_put_lock); 161 193 _puts("*** Error in _get_obj: wrong type\n"); … … 176 208 } 177 209 178 179 210 ///////////////////////////////////////////////////////////////////////////// 180 211 // _vobj_get_vbase() … … 184 215 // returns 0: success, else: failed. 185 216 ///////////////////////////////////////////////////////////////////////////// 186 unsigned int _vobj_get_vbase( 187 char * vspace_name, 188 char * vobj_name, 189 unsigned int vobj_type, 190 unsigned int * vobj_vaddr) { 191 mapping_vobj_t * res_vobj; 192 unsigned int ret; 193 if ((ret = _get_vobj(vspace_name, vobj_name, vobj_type, &res_vobj))) { 217 unsigned int _vobj_get_vbase( char* vspace_name, 218 char* vobj_name, 219 unsigned int vobj_type, 220 unsigned int* vobj_vaddr ) 221 { 222 mapping_vobj_t* res_vobj; 223 unsigned int ret; 224 if ((ret = _get_vobj(vspace_name, vobj_name, vobj_type, &res_vobj))) 225 { 194 226 return ret; 195 227 } 196 197 228 *vobj_vaddr = res_vobj->vaddr; 198 229 return 0; 199 230 } 200 201 231 202 232 ///////////////////////////////////////////////////////////////////////////// … … 207 237 // returns 0: success, else: failed. 208 238 ///////////////////////////////////////////////////////////////////////////// 209 unsigned int _vobj_get_length( 210 char * vspace_name, 211 char * vobj_name, 212 unsigned int vobj_type, 213 unsigned int * vobj_length) { 214 239 unsigned int _vobj_get_length( char* vspace_name, 240 char* vobj_name, 241 unsigned int vobj_type, 242 unsigned int* vobj_length ) 243 { 215 244 mapping_vobj_t * res_vobj; 216 245 unsigned int ret; 217 if ((ret = _get_vobj(vspace_name, vobj_name, vobj_type, &res_vobj))) { 246 if ((ret = _get_vobj(vspace_name, vobj_name, vobj_type, &res_vobj))) 247 { 218 248 return ret; 219 249 } 220 221 250 *vobj_length = res_vobj->length; 222 223 251 return 0; 224 252 } … … 229 257 // This functions masks interruptions before calling _ctx_switch 230 258 // (They are usually masked when we receive a isr_switch interrupt 231 // because we execute isrs with interrupt masked)259 // because we execute ISRs with interrupt masked) 232 260 //////////////////////////////////////////////////////////////// 233 void _context_switch() { 261 void _context_switch() 262 { 234 263 _it_disable(); 235 264 _ctx_switch(); -
soft/giet_vm/sys/sys_handler.h
r228 r238 19 19 ////////////////////////////////////////////////////////////////////////////////// 20 20 21 void _sys_ukn();22 void _exit();23 void _context_switch();21 void _sys_ukn(); 22 void _exit(); 23 void _context_switch(); 24 24 unsigned int _procid(); 25 25 unsigned int _proctime(); 26 unsigned int _procs_number(unsigned int cluster_id, unsigned int * buffer ); 27 unsigned int _vobj_get_vbase(char * vspace_name, char * vobj_name, unsigned vobj_type, unsigned int * vobj_buffer); 26 unsigned int _local_task_id(); 27 unsigned int _global_task_id(); 28 29 unsigned int _procs_number( unsigned int cluster_id, 30 unsigned int* buffer ); 31 32 unsigned int _vobj_get_vbase( char* vspace_name, 33 char* vobj_name, 34 unsigned vobj_type, 35 unsigned int* vobj_buffer); 28 36 29 37 #endif -
soft/giet_vm/sys/vm_handler.c
r228 r238 8 8 // They contains the kernel data structures and functions used to dynamically 9 9 // handle the iommu page table. 10 //11 // TODO : We must transfer here the functions used to statically build12 // the page tables associated to the various vspaces (now in boot_handler.c)13 //14 10 /////////////////////////////////////////////////////////////////////////////////// 15 11 … … 18 14 #include <common.h> 19 15 #include <giet_config.h> 16 #include <drivers.h> 20 17 21 18 ///////////////////////////////////////////////////////////////////////////// … … 88 85 // Returns 0 if success, 1 if PTE1 or PTE2 unmapped 89 86 ////////////////////////////////////////////////////////////////////////////// 90 unsigned int _v2p_translate( 91 page_table_t * pt, 92 unsigned int vpn, 93 unsigned int * ppn, 94 unsigned int * flags) { 95 unsigned int ptba; 96 register unsigned int * pte2; 97 register unsigned int flags_value; 98 register unsigned int ppn_value; 87 unsigned int _v2p_translate( page_table_t* pt, 88 unsigned int vpn, 89 unsigned int* ppn, 90 unsigned int* flags ) 91 { 92 paddr_t ptba; 93 paddr_t pte2; 94 95 register unsigned int pte2_msb; 96 register unsigned int pte2_lsb; 97 register unsigned int flags_value; 98 register unsigned int ppn_value; 99 99 100 100 unsigned int ix1 = vpn >> 9; 101 101 unsigned int ix2 = vpn & 0x1FF; 102 /* 103 _puts("\n\n********************** entering v2p_translate"); 104 _puts("\n - pt = "); 105 _putx( (unsigned int)pt ); 106 _puts("\n - vpn = "); 107 _putx( vpn << 12 ); 108 _puts("\n - ptba = "); 109 _putx( pt->pt1[ix1] << 12 ) ; 110 _puts("\n - &pte2 = "); 111 _putx( (pt->pt1[ix1] << 12) + 8*ix2 ); 112 _puts("\n - flags = "); 113 _putx( *(unsigned int*)((pt->pt1[ix1] << 12) + 8*ix2) ); 114 _puts("\n"); 115 */ 102 116 103 // check PTE1 mapping 117 if ((pt->pt1[ix1] & PTE_V) == 0) { 118 return 1; 119 } 120 else { 104 if ((pt->pt1[ix1] & PTE_V) == 0) return 1; 105 else 106 { 121 107 // get physical addresses of pte2 122 ptba = pt->pt1[ix1] << 12; 123 pte2 = (unsigned int *) (ptba + 8 * ix2); 108 ptba = (paddr_t)(pt->pt1[ix1] & 0x0FFFFFFF) << 12; 109 pte2 = ptba + 8*ix2; 110 pte2_lsb = (unsigned int)pte2; 111 pte2_msb = (unsigned int)(pte2 >> 32); 124 112 125 113 // gets ppn_value and flags_value, after temporary DTLB desactivation 126 114 asm volatile ( 127 "li $2 7,0xFFFFFFFE \n" /* Mask for IE bits */128 "mfc0 $ 26, $12 \n" /* save SR*/129 "and $2 7, $26, $27\n"130 "mtc0 $2 7,$12 \n" /* disable Interrupts */115 "li $2, 0xFFFFFFFE \n" /* Mask for IE bits */ 116 "mfc0 $4, $12 \n" /* $4 <= SR */ 117 "and $2, $2, $4 \n" 118 "mtc0 $2, $12 \n" /* disable Interrupts */ 131 119 132 "li $ 27,0xB \n"133 "mtc2 $ 27,$1 \n" /* DTLB unactivated */120 "li $3, 0xB \n" 121 "mtc2 $3, $1 \n" /* DTLB unactivated */ 134 122 135 "move $27, %2 \n" /* $27 <= pte2 */ 136 "lw %0, 0($27) \n" /* read flags */ 137 "lw %1, 4($27) \n" /* read ppn */ 123 "mtc2 %2, $24 \n" /* PADDR_EXT <= msb */ 124 "lw %0, 0(%3) \n" /* read flags */ 125 "lw %1, 4(%3) \n" /* read ppn */ 126 "mtc2 $0, $24 \n" /* PADDR_EXT <= 0 */ 138 127 139 "li $ 27,0xF \n"140 "mtc2 $ 27,$1 \n" /* DTLB activated */128 "li $3, 0xF \n" 129 "mtc2 $3, $1 \n" /* DTLB activated */ 141 130 142 "mtc0 $ 26,$12 \n" /* restore SR */131 "mtc0 $4, $12 \n" /* restore SR */ 143 132 : "=r" (flags_value), "=r" (ppn_value) 144 : "r" (pte2 )145 : "$2 6","$27","$8");133 : "r" (pte2_msb), "r" (pte2_lsb) 134 : "$2","$3","$4"); 146 135 147 136 // check PTE2 mapping 148 if ((flags_value & PTE_V) == 0) { 149 return 1; 150 } 137 if ((flags_value & PTE_V) == 0) return 1; 151 138 152 139 // set return values
Note: See TracChangeset
for help on using the changeset viewer.