- Timestamp:
- Jul 18, 2015, 3:04:15 PM (9 years ago)
- Location:
- soft/giet_vm/giet_drivers
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
soft/giet_vm/giet_drivers/bdv_driver.c
r593 r630 23 23 24 24 /////////////////////////////////////////////////////////////////////////////// 25 // Extern variables 26 /////////////////////////////////////////////////////////////////////////////// 27 28 // allocated in the boot.c or kernel_init.c files 29 extern static_scheduler_t* _schedulers[X_SIZE][Y_SIZE][NB_PROCS_MAX]; 30 31 /////////////////////////////////////////////////////////////////////////////// 25 32 // Global variables 26 33 /////////////////////////////////////////////////////////////////////////////// … … 162 169 _it_disable( &save_sr ); 163 170 164 // reset runnable 165 _set_task_slot( x, y, p, ltid, CTX_RUN_ID, 0 ); 171 // Set NORUN_MASK_IOC bit 172 static_scheduler_t* psched = (static_scheduler_t*)_schedulers[x][y][p]; 173 unsigned int* ptr = &psched->context[ltid][CTX_NORUN_ID]; 174 _atomic_or( ptr , NORUN_MASK_IOC ); 166 175 167 176 // launch transfer … … 234 243 235 244 // identify task waiting on BDV 236 unsigned int remote_procid = _bdv_gtid>>16; 237 unsigned int ltid = _bdv_gtid & 0xFFFF; 238 unsigned int remote_cluster = remote_procid >> P_WIDTH; 239 unsigned int remote_x = remote_cluster >> Y_WIDTH; 240 unsigned int remote_y = remote_cluster & ((1<<Y_WIDTH)-1); 241 unsigned int remote_p = remote_procid & ((1<<P_WIDTH)-1); 242 243 // re-activates sleeping task 244 _set_task_slot( remote_x, 245 remote_y, 246 remote_p, 247 ltid, 248 CTX_RUN_ID, // CTX_RUN slot 249 1 ); // running value 245 unsigned int procid = _bdv_gtid>>16; 246 unsigned int ltid = _bdv_gtid & 0xFFFF; 247 unsigned int cluster = procid >> P_WIDTH; 248 unsigned int x = cluster >> Y_WIDTH; 249 unsigned int y = cluster & ((1<<Y_WIDTH)-1); 250 unsigned int p = procid & ((1<<P_WIDTH)-1); 251 252 // Reset NORUN_MASK_IOC bit 253 static_scheduler_t* psched = (static_scheduler_t*)_schedulers[x][y][p]; 254 unsigned int* ptr = &psched->context[ltid][CTX_NORUN_ID]; 255 _atomic_and( ptr , ~NORUN_MASK_IOC ); 250 256 251 257 // send a WAKUP WTI to processor running the sleeping task 252 _xcu_send_wti( remote_cluster,253 remote_p,258 _xcu_send_wti( cluster, 259 p, 254 260 0 ); // don't force context switch 255 261 256 262 #if GIET_DEBUG_IOC 257 unsigned int p rocid = _get_procid();258 unsigned int x = procid >> (Y_WIDTH + P_WIDTH);259 unsigned int y = (procid >> P_WIDTH) & ((1<<Y_WIDTH)-1);260 unsigned int p = procid & ((1<<P_WIDTH)-1);263 unsigned int pid = _get_procid(); 264 unsigned int c_x = pid >> (Y_WIDTH + P_WIDTH); 265 unsigned int c_y = (pid >> P_WIDTH) & ((1<<Y_WIDTH)-1); 266 unsigned int c_p = pid & ((1<<P_WIDTH)-1); 261 267 if ( _get_proctime() > GIET_DEBUG_IOC ) 262 268 _printf("\n[BDV DEBUG] Processor[%d,%d,%d] enters _bdv_isr() at cycle %d\n" 263 269 " for task %d running on P[%d,%d,%d] / bdv_status = %x\n", 264 x , y ,p , _get_proctime() ,265 ltid , remote_x , remote_y , remote_p , status );270 c_x , c_y , c_p , _get_proctime() , 271 ltid , x , y , p , status ); 266 272 #endif 267 273 -
soft/giet_vm/giet_drivers/hba_driver.c
r603 r630 23 23 #include <vmem.h> 24 24 25 /////////////////////////////////////////////////////////////////////////////////// 25 ////////////////////////////////////////////////////////////////////////////////// 26 // Extern variables 27 ////////////////////////////////////////////////////////////////////////////////// 28 29 // allocated in the boot.c or kernel_init.c files 30 extern static_scheduler_t* _schedulers[X_SIZE][Y_SIZE][NB_PROCS_MAX]; 31 32 ////////////////////////////////////////////////////////////////////////////////// 26 33 // Global variables 27 ///////////////////////////////////////////////////////////////////////////////////28 29 34 ////////////////////////////////////////////////////////////////////////////////// 30 35 // The global variable hba_boot_mode defines the way the HBA component is used 31 36 // and must be defined in both kernel_init.c and boot.c files. 32 // - during the boot phase, only one processor has access tothe HBA in synchronous33 // mode , there is no need for the allocator to use a lock37 // - during the boot phase, only one processor access the HBA in synchronous 38 // mode. There is no need for the allocator to use a lock. 34 39 // - after the boot phase, the HBA device can be used by several processors. The 35 40 // allocator is protected by a sqt_lock. … … 278 283 // in descheduling mode, we deschedule the task 279 284 // and use an interrupt to reschedule the task. 280 // We need a critical section, because we must reset theRUN bit285 // We need a critical section, because we must set the NORUN bit 281 286 // before to launch the transfer, and we don't want to be 282 287 // descheduled between these two operations. … … 303 308 _it_disable( &save_sr ); 304 309 305 // reset runnable 306 _set_task_slot( x, y, p, ltid, CTX_RUN_ID, 0 ); 307 310 // Set NORUN_MASK_IOC bit 311 static_scheduler_t* psched = (static_scheduler_t*)_schedulers[x][y][p]; 312 unsigned int* ptr = &psched->context[ltid][CTX_NORUN_ID]; 313 _atomic_or( ptr , NORUN_MASK_IOC ); 314 308 315 // start HBA transfer 309 316 _hba_set_register( HBA_PXCI, (1<<cmd_id) ); … … 432 439 433 440 // identify waiting task 434 unsigned int remote_procid = _hba_gtid[cmd_id]>>16;435 unsigned int ltid 436 unsigned int remote_cluster = remote_procid >> P_WIDTH;437 unsigned int remote_x = remote_cluster >> Y_WIDTH;438 unsigned int remote_y = remote_cluster & ((1<<Y_WIDTH)-1);439 unsigned int remote_p = remote_procid & ((1<<P_WIDTH)-1);441 unsigned int procid = _hba_gtid[cmd_id]>>16; 442 unsigned int ltid = _hba_gtid[cmd_id] & 0xFFFF; 443 unsigned int cluster = procid >> P_WIDTH; 444 unsigned int x = cluster >> Y_WIDTH; 445 unsigned int y = cluster & ((1<<Y_WIDTH)-1); 446 unsigned int p = procid & ((1<<P_WIDTH)-1); 440 447 441 // re-activates waiting task 442 _set_task_slot( remote_x, 443 remote_y, 444 remote_p, 445 ltid, 446 CTX_RUN_ID, 447 1 ); 448 // Reset NORUN_MASK_IOC bit 449 static_scheduler_t* psched = (static_scheduler_t*)_schedulers[x][y][p]; 450 unsigned int* ptr = &psched->context[ltid][CTX_NORUN_ID]; 451 _atomic_and( ptr , ~NORUN_MASK_IOC ); 448 452 449 453 // send a WAKUP WTI to processor running the waiting task 450 _xcu_send_wti( remote_cluster ,451 remote_p ,454 _xcu_send_wti( cluster , 455 p , 452 456 0 ); // don't force context switch 453 457 … … 457 461 " resume task %d running on P[%d,%d,%d]\n", 458 462 cmd_id , _get_proctime() , 459 ltid , remote_x , remote_y , remote_p );463 ltid , x , y , p ); 460 464 #endif 461 465 } -
soft/giet_vm/giet_drivers/mmc_driver.c
r615 r630 39 39 40 40 /////////////////////////////////////////////////////////////////////////////// 41 // Locks protecting MMC components (one per cluster) 42 // There are two kinds of lock: the _mmc_lock table contains all the locks and 43 // and is stored in the kernel data segment, whereas the _mmc_distributed_lock 44 // contains the addresses of locks which are distributed in every cluster (each 45 // cluster contains the lock which protects its own mmc_component). 41 // Global variables 42 /////////////////////////////////////////////////////////////////////////////// 43 // Two kinds of locks protecting the MMC components (one per cluster): 44 // - the _mmc_lock array contains spin_locks allocated in cluster[0,0]. 45 // They must be used by the boot code because the kernel heap is not set. 46 // - the _mmc_distributed_locks array contains pointers on distributed 47 // spin_loks allocated in the distributed heap in each cluster. 48 // Each cluster contains the lock protecting its own mmc_component. 49 // They can be used by the kernel code. 46 50 // The global variable mmc_boot_mode define the type of lock which is used, 47 51 // and must be defined in both kernel_init.c and boot.c files. 48 // - the boot code must use a spin_lock because the kernel heap is not set. 49 // - the kernel code can use a sqt_lock when the kernel heap is set. 50 /////////////////////////////////////////////////////////////////////////////// 51 52 extern unsigned int _mmc_boot_mode; 52 /////////////////////////////////////////////////////////////////////////////// 53 54 __attribute__((section(".kdata"))) 55 unsigned int _mmc_boot_mode; 53 56 54 57 __attribute__((section(".kdata"))) … … 64 67 void _mmc_init_locks() 65 68 { 66 unsigned int cx; // cluster X coordinate67 unsigned int cy; // cluster Y coordinate69 unsigned int x; // cluster X coordinate 70 unsigned int y; // cluster Y coordinate 68 71 69 for ( cx = 0 ; cx < X_SIZE ; cx++ )70 { 71 for ( cy = 0 ; cy < Y_SIZE ; cy++ )72 for ( x = 0 ; x < X_SIZE ; x++ ) 73 { 74 for ( y = 0 ; y < Y_SIZE ; y++ ) 72 75 { 73 _mmc_distributed_lock[cx][cy] = _remote_malloc( sizeof(spin_lock_t), cx, cy ); 74 _spin_lock_init( _mmc_distributed_lock[cx][cy] ); 76 if ( _mmc_boot_mode ) 77 { 78 _spin_lock_init( &_mmc_lock[x][y] ); 79 } 80 else 81 { 82 _mmc_distributed_lock[x][y] = _remote_malloc( sizeof(spin_lock_t), x, y ); 83 _spin_lock_init( _mmc_distributed_lock[x][y] ); 84 } 75 85 } 76 86 } -
soft/giet_vm/giet_drivers/mwr_driver.c
r563 r630 41 41 #endif 42 42 43 44 ///////////////////////////////////////////////////////////////////////////// 45 // Global variables (all arrays are indexed by the cluster index) 43 ///////////////////////////////////////////////////////////////////////////// 44 // Extern variables 45 ///////////////////////////////////////////////////////////////////////////// 46 47 // allocated in the boot.c or kernel_init.c files 48 extern static_scheduler_t* _schedulers[X_SIZE][Y_SIZE][NB_PROCS_MAX]; 49 50 ///////////////////////////////////////////////////////////////////////////// 51 // Global variables 52 ///////////////////////////////////////////////////////////////////////////// 53 // All arrays are indexed by the cluster index. 46 54 ///////////////////////////////////////////////////////////////////////////// 47 55 … … 150 158 { 151 159 // get coprocessor coordinates and characteristics 152 // the processor executing the ISR an the coprocessor 153 // are in the same cluster 160 // processor executing ISR and coprocessor are in the same cluster 154 161 unsigned int gpid = _get_procid(); 155 162 unsigned int cluster_xy = gpid >> P_WIDTH; … … 160 167 unsigned int nb_to = info & 0xFF; 161 168 unsigned int nb_from = (info>>8) & 0xFF; 169 162 170 unsigned int channel; 163 171 unsigned int status; 164 172 unsigned int error = 0; 165 173 166 // check status, report errors and reset all channels174 // check status, report errors and reset for all channels 167 175 for ( channel = 0 ; channel < (nb_to + nb_from) ; channel++ ) 168 176 { … … 199 207 // identify task waiting on coprocessor completion 200 208 // this task can run in a remote cluster 201 unsigned int gtid = _coproc_gtid[cluster_id]; 202 unsigned int remote_procid = gtid>>16; 203 unsigned int ltid = gtid & 0xFFFF; 204 unsigned int remote_cluster = remote_procid >> P_WIDTH; 205 unsigned int remote_x = remote_cluster >> Y_WIDTH; 206 unsigned int remote_y = remote_cluster & ((1<<Y_WIDTH)-1); 207 unsigned int remote_p = remote_procid & ((1<<P_WIDTH)-1); 208 209 // re-activates sleeping task 210 _set_task_slot( remote_x, 211 remote_y, 212 remote_p, 213 ltid, 214 CTX_RUN_ID, // CTX_RUN slot 215 1 ); // running value 209 unsigned int r_gtid = _coproc_gtid[cluster_id]; 210 unsigned int r_procid = r_gtid>>16; 211 unsigned int r_ltid = r_gtid & 0xFFFF; 212 unsigned int r_cluster = r_procid >> P_WIDTH; 213 unsigned int r_x = r_cluster >> Y_WIDTH; 214 unsigned int r_y = r_cluster & ((1<<Y_WIDTH)-1); 215 unsigned int r_p = r_procid & ((1<<P_WIDTH)-1); 216 217 // Reset NORUN_MASK_IOC bit 218 static_scheduler_t* psched = (static_scheduler_t*)_schedulers[r_x][r_y][r_p]; 219 unsigned int* ptr = &psched->context[r_ltid][CTX_NORUN_ID]; 220 _atomic_and( ptr , ~NORUN_MASK_IOC ); 216 221 217 222 // send a WAKUP WTI to processor running the sleeping task 218 _xcu_send_wti( r emote_cluster,219 r emote_p,223 _xcu_send_wti( r_cluster, 224 r_p, 220 225 0 ); // don't force context switch 221 226 … … 224 229 _printf("\n[GIET DEBUG COPROC] P[%d,%d,%d] executes _mwr_isr() at cycle %d\n" 225 230 " for task %d running on P[%d,%d,%d] / error = %d\n", 226 x , y , p , _get_proctime() , ltid , r emote_x , remote_y , remote_p , error );227 #endif 228 } 231 x , y , p , _get_proctime() , ltid , r_x , r_y , r_p , error ); 232 #endif 233 } // end _mwr_isr() 229 234 230 235 -
soft/giet_vm/giet_drivers/sdc_driver.c
r603 r630 20 20 21 21 #define SDC_POLLING_TIMEOUT 1000000 // number of retries for polling PXCI 22 23 ////////////////////////////////////////////////////////////////////////////////// 24 // Extern variables 25 ////////////////////////////////////////////////////////////////////////////////// 26 27 // allocated in the boot.c or kernel_init.c files 28 extern static_scheduler_t* _schedulers[X_SIZE][Y_SIZE][NB_PROCS_MAX]; 22 29 23 30 /////////////////////////////////////////////////////////////////////////////////// … … 401 408 // in descheduling mode, we deschedule the task 402 409 // and use an interrupt to reschedule the task. 403 // We need a critical section, because we must reset theRUN bit410 // We need a critical section, because we must set the NORUN bit 404 411 // before to launch the transfer, and we don't want to be 405 412 // descheduled between these two operations. … … 426 433 _it_disable( &save_sr ); 427 434 428 // reset runnable 429 _set_task_slot( x, y, p, ltid, CTX_RUN_ID, 0 ); 430 435 // Set NORUN_MASK_IOC bit 436 static_scheduler_t* psched = (static_scheduler_t*)_schedulers[x][y][p]; 437 unsigned int* ptr = &psched->context[ltid][CTX_NORUN_ID]; 438 _atomic_or( ptr , NORUN_MASK_IOC ); 439 431 440 // start transfer 432 441 _sdc_set_register( AHCI_PXCI, (1<<ptw) ); … … 489 498 490 499 // identify waiting task 491 unsigned int remote_procid = _ahci_gtid[ptr]>>16;492 unsigned int ltid 493 unsigned int remote_cluster = remote_procid >> P_WIDTH;494 unsigned int remote_x = remote_cluster >> Y_WIDTH;495 unsigned int remote_y = remote_cluster & ((1<<Y_WIDTH)-1);496 unsigned int remote_p = remote_procid & ((1<<P_WIDTH)-1);500 unsigned int procid = _ahci_gtid[ptr]>>16; 501 unsigned int ltid = _ahci_gtid[ptr] & 0xFFFF; 502 unsigned int cluster = procid >> P_WIDTH; 503 unsigned int x = cluster >> Y_WIDTH; 504 unsigned int y = cluster & ((1<<Y_WIDTH)-1); 505 unsigned int p = procid & ((1<<P_WIDTH)-1); 497 506 498 // re-activates waiting task 499 _set_task_slot( remote_x, 500 remote_y, 501 remote_p, 502 ltid, 503 CTX_RUN_ID, 504 1 ); 507 // Reset NORUN_MASK_IOC bit 508 static_scheduler_t* psched = (static_scheduler_t*)_schedulers[x][y][p]; 509 unsigned int* ptr = &psched->context[ltid][CTX_NORUN_ID]; 510 _atomic_and( ptr , ~NORUN_MASK_IOC ); 505 511 506 512 // send a WAKUP WTI to processor running the waiting task 507 _xcu_send_wti( remote_cluster ,508 remote_p ,513 _xcu_send_wti( cluster , 514 p , 509 515 0 ); // don't force context switch 510 516 … … 514 520 " resume task %d running on P[%d,%d,%d] / status = %x\n", 515 521 ptr , _get_proctime() , 516 ltid , remote_x , remote_y , remote_p , _ahci_status[ptr] );522 ltid , x , y , p , _ahci_status[ptr] ); 517 523 #endif 518 524 } … … 521 527 break; 522 528 } 523 } 529 } // end for completed commands 524 530 } // end _sdc_isr() 525 531
Note: See TracChangeset
for help on using the changeset viewer.