Changeset 709 for soft/giet_vm/giet_boot
- Timestamp:
- Oct 1, 2015, 4:20:46 PM (9 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
soft/giet_vm/giet_boot/boot.c
r695 r709 24 24 // - the "map.bin" file contains the hardware architecture description, 25 25 // the set of user applications that will be mapped on the architecture, 26 // and the mapping directives. The mapping includes the placement of t asks26 // and the mapping directives. The mapping includes the placement of threads 27 27 // on processors, and the placement of virtual segments on the physical 28 // segments. It must bestored in the the seg_boot_mapping segment28 // segments. It is stored in the the seg_boot_mapping segment 29 29 // (at address SEG_BOOT_MAPPING_BASE defined in hard_config.h file). 30 30 // - the "kernel.elf" file contains the kernel binary code and data. … … 37 37 // of the software objects (vsegs) on the physical memory banks (psegs). 38 38 // The max number of vspaces (GIET_NB_VSPACE_MAX) is a configuration parameter. 39 // The page table are statically build in the boot phase, and they do not40 // change during execution. For each application, the page tables are replicated41 // in all clusters.39 // The page tables are statically build in the boot phase, and they do not 40 // change during execution. 41 // For each application, the page tables are replicated in all clusters. 42 42 // The GIET_VM uses both small pages (4 Kbytes), and big pages (2 Mbytes). 43 43 // Each page table (one page table per virtual space) is monolithic, and … … 50 50 // 51 51 // 3) The Giet-VM implement one private scheduler per processor. 52 // For each application, the t asks are statically allocated to processors53 // and there is no t askmigration during execution.54 // Each sheduler occupies 8K bytes, and contains up to 14 t askcontexts55 // The t ask context [13] is reserved for the "idle" task that does nothing, and56 // is launched by the scheduler when there is no other runable task.52 // For each application, the threads are statically allocated to processors 53 // and there is no thread migration during execution. 54 // Each sheduler occupies 8K bytes, and contains up to 14 thread contexts 55 // The thread context [13] is reserved for the "idle" thread that does nothing, 56 // and is launched by the scheduler when there is no other runable thread. 57 57 /////////////////////////////////////////////////////////////////////////////////// 58 58 // Implementation Notes: … … 167 167 unsigned int _hba_boot_mode = 1; 168 168 169 // required for concurrent PTAB building 169 170 __attribute__((section(".kdata"))) 170 171 spin_lock_t _ptabs_spin_lock[GIET_NB_VSPACE_MAX][X_SIZE][Y_SIZE]; … … 983 984 //////////////////////////////////////////////////////////////////////////////////// 984 985 // This function is executed in parallel by all processors P[x][y][0]. 985 // It initialises all schedulers in cluster[x][y]. The MMU must be activated.986 // P[x][y][0] initialises all schedulers in cluster[x][y]. The MMU must be activated. 986 987 // It is split in two phases separated by a synchronisation barrier. 987 // - In Step 1, it initialises the _schedulers[x][y][ l] pointers array, the988 // idle_t askcontext, the HWI / PTI / WTI interrupt vectors,989 // and the CU HWI / PTI / WTI masks.990 // - In Step 2, it scan all t asks in all vspaces to complete the tasks contexts,988 // - In Step 1, it initialises the _schedulers[x][y][p] pointers array, the 989 // idle_thread context, the HWI / PTI / WTI interrupt vectors, 990 // and the XCU HWI / PTI / WTI masks. 991 // - In Step 2, it scan all threads in all vspaces to complete the threads contexts, 991 992 // initialisation as specified in the mapping_info data structure, 992 993 // and set the CP0_SCHED register. … … 999 1000 mapping_vspace_t* vspace = _get_vspace_base(header); 1000 1001 mapping_vseg_t* vseg = _get_vseg_base(header); 1001 mapping_t ask_t* task = _get_task_base(header);1002 mapping_thread_t* thread = _get_thread_base(header); 1002 1003 mapping_periph_t* periph = _get_periph_base(header); 1003 1004 mapping_irq_t* irq = _get_irq_base(header); … … 1007 1008 unsigned int vspace_id; 1008 1009 unsigned int vseg_id; 1009 unsigned int t ask_id;1010 unsigned int thread_id; 1010 1011 1011 1012 unsigned int sched_vbase; // schedulers array vbase address … … 1026 1027 //////////////////////////////////////////////////////////////////////////////// 1027 1028 // Step 1 : - initialize the schedulers[] array of pointers, 1028 // - initialize the "t asks" and "current variables.1029 // - initialise the idle taskcontext.1029 // - initialize the "threads" and "current variables. 1030 // - initialise the idle_thread context. 1030 1031 // - initialize the HWI, PTI and WTI interrupt vectors. 1031 1032 // - initialize the XCU masks for HWI / WTI / PTI interrupts. … … 1056 1057 _schedulers[x][y][lpid] = psched; 1057 1058 1058 // initialise the "t asks" and "current" variables default values1059 psched->t asks= 0;1060 psched->current = IDLE_T ASK_INDEX;1059 // initialise the "threads" and "current" variables default values 1060 psched->threads = 0; 1061 psched->current = IDLE_THREAD_INDEX; 1061 1062 1062 1063 // set default values for HWI / PTI / SWI vectors (valid bit = 0) … … 1069 1070 } 1070 1071 1071 // initializes the idle_t askcontext:1072 // - the SR slot is 0xFF03 because this t askrun in kernel mode.1072 // initializes the idle_thread context: 1073 // - the SR slot is 0xFF03 because this thread run in kernel mode. 1073 1074 // - it uses the page table of vspace[0] 1074 // - it uses the kernel TTY terminal1075 // - it uses the kernel TTY0 terminal 1075 1076 // - slots containing addresses (SP,RA,EPC) are initialised by kernel_init() 1076 1077 psched->context[IDLE_TASK_INDEX][CTX_CR_ID] = 0; 1078 psched->context[IDLE_TASK_INDEX][CTX_SR_ID] = 0xFF03; 1079 psched->context[IDLE_TASK_INDEX][CTX_PTPR_ID] = _ptabs_paddr[0][x][y]>>13; 1080 psched->context[IDLE_TASK_INDEX][CTX_PTAB_ID] = _ptabs_vaddr[0][x][y]; 1081 psched->context[IDLE_TASK_INDEX][CTX_TTY_ID] = 0; 1082 psched->context[IDLE_TASK_INDEX][CTX_LTID_ID] = IDLE_TASK_INDEX; 1083 psched->context[IDLE_TASK_INDEX][CTX_VSID_ID] = 0; 1084 psched->context[IDLE_TASK_INDEX][CTX_NORUN_ID] = 0; 1085 psched->context[IDLE_TASK_INDEX][CTX_SIG_ID] = 0; 1077 // - It is always executable (NORUN == 0) 1078 1079 psched->context[IDLE_THREAD_INDEX].slot[CTX_CR_ID] = 0; 1080 psched->context[IDLE_THREAD_INDEX].slot[CTX_SR_ID] = 0xFF03; 1081 psched->context[IDLE_THREAD_INDEX].slot[CTX_PTPR_ID] = _ptabs_paddr[0][x][y]>>13; 1082 psched->context[IDLE_THREAD_INDEX].slot[CTX_PTAB_ID] = _ptabs_vaddr[0][x][y]; 1083 psched->context[IDLE_THREAD_INDEX].slot[CTX_TTY_ID] = 0; 1084 psched->context[IDLE_THREAD_INDEX].slot[CTX_LTID_ID] = IDLE_THREAD_INDEX; 1085 psched->context[IDLE_THREAD_INDEX].slot[CTX_VSID_ID] = 0; 1086 psched->context[IDLE_THREAD_INDEX].slot[CTX_NORUN_ID] = 0; 1087 psched->context[IDLE_THREAD_INDEX].slot[CTX_SIGS_ID] = 0; 1088 psched->context[IDLE_THREAD_INDEX].slot[CTX_LOCKS_ID] = 0; 1086 1089 } 1087 1090 … … 1213 1216 1214 1217 /////////////////////////////////////////////////////////////////////////////// 1215 // Step 2 : Initialise the t asks context. The context of taskplaced1218 // Step 2 : Initialise the threads context. The context of a thread placed 1216 1219 // on processor P must be stored in the scheduler of P. 1217 // This require two nested loops: loop on the tasks, and loop 1218 // on the local processors. We complete the scheduler when the 1219 // required placement fit one local processor. 1220 // For each vspace, this require two nested loops: loop on the threads, 1221 // and loop on the local processors in cluster[x,y]. 1222 // We complete the scheduler when the required placement matches 1223 // the local processor. 1220 1224 /////////////////////////////////////////////////////////////////////////////// 1221 1225 … … 1226 1230 _set_mmu_ptpr( (unsigned int)(_ptabs_paddr[vspace_id][x][y] >> 13) ); 1227 1231 1228 // ctx_norun depends on the vspace active field 1229 unsigned int ctx_norun = (vspace[vspace_id].active == 0); 1230 1231 // loop on the tasks in vspace (task_id is the global index in mapping) 1232 for (task_id = vspace[vspace_id].task_offset; 1233 task_id < (vspace[vspace_id].task_offset + vspace[vspace_id].tasks); 1234 task_id++) 1235 { 1236 // get the required task placement coordinates [x,y,p] 1237 unsigned int req_x = cluster[task[task_id].clusterid].x; 1238 unsigned int req_y = cluster[task[task_id].clusterid].y; 1239 unsigned int req_p = task[task_id].proclocid; 1232 // loop on the threads in vspace (thread_id is the global index in mapping) 1233 for (thread_id = vspace[vspace_id].thread_offset; 1234 thread_id < (vspace[vspace_id].thread_offset + vspace[vspace_id].threads); 1235 thread_id++) 1236 { 1237 // get the required thread placement coordinates [x,y,p] 1238 unsigned int req_x = cluster[thread[thread_id].clusterid].x; 1239 unsigned int req_y = cluster[thread[thread_id].clusterid].y; 1240 unsigned int req_p = thread[thread_id].proclocid; 1241 1242 // ctx_norun : two conditions to activate a thread 1243 // - The vspace.active flag is set in the mapping 1244 // - The thread.is_main flag is set in the mapping 1245 unsigned int ctx_norun = (unsigned int)(vspace[vspace_id].active == 0) | 1246 (unsigned int)(thread[thread_id].is_main == 0); 1240 1247 1241 1248 // ctx_ptpr : page table physical base address (shifted by 13 bit) … … 1246 1253 1247 1254 // ctx_entry : Get the virtual address of the memory location containing 1248 // the t askentry point : the start_vector is stored by GCC in the1249 // seg_data segment, and we must wait the .elf loading to get1255 // the thread entry point : the start_vector is stored by GCC in the 1256 // seg_data segment, and we must wait the application.elf loading to get 1250 1257 // the entry point value... 1251 1258 vseg_id = vspace[vspace_id].start_vseg_id; 1252 unsigned int ctx_entry = vseg[vseg_id].vbase + (t ask[task_id].startid)*4;1259 unsigned int ctx_entry = vseg[vseg_id].vbase + (thread[thread_id].startid)*4; 1253 1260 1254 1261 // ctx_sp : Get the vseg containing the stack 1255 vseg_id = task[task_id].stack_vseg_id; 1256 unsigned int ctx_sp = vseg[vseg_id].vbase + vseg[vseg_id].length; 1257 1258 // get vspace thread index 1259 unsigned int thread_id = task[task_id].trdid; 1262 // allocate 16 slots (64 bytes) for possible arguments. 1263 vseg_id = thread[thread_id].stack_vseg_id; 1264 unsigned int ctx_sp = vseg[vseg_id].vbase + vseg[vseg_id].length - 64; 1260 1265 1261 1266 // loop on the local processors … … 1267 1272 psched = _schedulers[x][y][lpid]; 1268 1273 1269 // get local task index in scheduler 1270 unsigned int ltid = psched->tasks; 1271 1272 // update the "tasks" field in scheduler: 1273 psched->tasks = ltid + 1; 1274 1275 // initializes the task context 1276 psched->context[ltid][CTX_CR_ID] = 0; 1277 psched->context[ltid][CTX_SR_ID] = GIET_SR_INIT_VALUE; 1278 psched->context[ltid][CTX_SP_ID] = ctx_sp; 1279 psched->context[ltid][CTX_EPC_ID] = ctx_entry; 1280 psched->context[ltid][CTX_ENTRY_ID] = ctx_entry; 1281 psched->context[ltid][CTX_PTPR_ID] = ctx_ptpr; 1282 psched->context[ltid][CTX_PTAB_ID] = ctx_ptab; 1283 psched->context[ltid][CTX_LTID_ID] = ltid; 1284 psched->context[ltid][CTX_GTID_ID] = task_id; 1285 psched->context[ltid][CTX_TRDID_ID] = thread_id; 1286 psched->context[ltid][CTX_VSID_ID] = vspace_id; 1287 psched->context[ltid][CTX_NORUN_ID] = ctx_norun; 1288 psched->context[ltid][CTX_SIG_ID] = 0; 1289 1290 psched->context[ltid][CTX_TTY_ID] = 0xFFFFFFFF; 1291 psched->context[ltid][CTX_CMA_FB_ID] = 0xFFFFFFFF; 1292 psched->context[ltid][CTX_CMA_RX_ID] = 0xFFFFFFFF; 1293 psched->context[ltid][CTX_CMA_TX_ID] = 0xFFFFFFFF; 1294 psched->context[ltid][CTX_NIC_RX_ID] = 0xFFFFFFFF; 1295 psched->context[ltid][CTX_NIC_TX_ID] = 0xFFFFFFFF; 1296 psched->context[ltid][CTX_TIM_ID] = 0xFFFFFFFF; 1297 psched->context[ltid][CTX_HBA_ID] = 0xFFFFFFFF; 1298 1299 // update task ltid field in the mapping 1300 task[task_id].ltid = ltid; 1274 // ltid : compute local thread index in scheduler 1275 unsigned int ltid = psched->threads; 1276 1277 // update the threads field in scheduler: 1278 psched->threads = ltid + 1; 1279 1280 // ctx_trd_id : compute pthread global identifier 1281 unsigned int ctx_trdid = x << 24 | y<<16 | lpid<<8 | ltid; 1282 1283 // initializes the thread context 1284 psched->context[ltid].slot[CTX_CR_ID] = 0; 1285 psched->context[ltid].slot[CTX_SR_ID] = GIET_SR_INIT_VALUE; 1286 psched->context[ltid].slot[CTX_SP_ID] = ctx_sp; 1287 psched->context[ltid].slot[CTX_EPC_ID] = ctx_entry; 1288 psched->context[ltid].slot[CTX_ENTRY_ID] = ctx_entry; 1289 psched->context[ltid].slot[CTX_PTPR_ID] = ctx_ptpr; 1290 psched->context[ltid].slot[CTX_PTAB_ID] = ctx_ptab; 1291 psched->context[ltid].slot[CTX_LTID_ID] = ltid; 1292 psched->context[ltid].slot[CTX_TRDID_ID] = ctx_trdid; 1293 psched->context[ltid].slot[CTX_VSID_ID] = vspace_id; 1294 psched->context[ltid].slot[CTX_NORUN_ID] = ctx_norun; 1295 psched->context[ltid].slot[CTX_SIGS_ID] = 0; 1296 psched->context[ltid].slot[CTX_LOCKS_ID] = 0; 1297 1298 psched->context[ltid].slot[CTX_TTY_ID] = 0xFFFFFFFF; 1299 psched->context[ltid].slot[CTX_CMA_FB_ID] = 0xFFFFFFFF; 1300 psched->context[ltid].slot[CTX_CMA_RX_ID] = 0xFFFFFFFF; 1301 psched->context[ltid].slot[CTX_CMA_TX_ID] = 0xFFFFFFFF; 1302 psched->context[ltid].slot[CTX_NIC_RX_ID] = 0xFFFFFFFF; 1303 psched->context[ltid].slot[CTX_NIC_TX_ID] = 0xFFFFFFFF; 1304 psched->context[ltid].slot[CTX_TIM_ID] = 0xFFFFFFFF; 1305 psched->context[ltid].slot[CTX_HBA_ID] = 0xFFFFFFFF; 1306 1307 // update thread ltid field in the mapping 1308 thread[thread_id].ltid = ltid; 1301 1309 1302 1310 #if BOOT_DEBUG_SCHED 1303 _printf("\nT ask%s in vspace %s allocated to P[%d,%d,%d]\n"1311 _printf("\nThread %s in vspace %s allocated to P[%d,%d,%d]\n" 1304 1312 " - ctx[LTID] = %d\n" 1313 " - ctx[TRDID] = %d\n" 1305 1314 " - ctx[SR] = %x\n" 1306 1315 " - ctx[SP] = %x\n" … … 1309 1318 " - ctx[PTAB] = %x\n" 1310 1319 " - ctx[VSID] = %d\n" 1311 " - ctx[TRDID] = %d\n"1312 1320 " - ctx[NORUN] = %x\n" 1313 1321 " - ctx[SIG] = %x\n", 1314 t ask[task_id].name,1322 thread[thread_id].name, 1315 1323 vspace[vspace_id].name, 1316 1324 x, y, lpid, 1317 psched->context[ltid] [CTX_LTID_ID],1318 psched->context[ltid] [CTX_SR_ID],1319 psched->context[ltid] [CTX_SP_ID],1320 psched->context[ltid] [CTX_ENTRY_ID],1321 psched->context[ltid] [CTX_PTPR_ID],1322 psched->context[ltid] [CTX_PTAB_ID],1323 psched->context[ltid] [CTX_VSID_ID],1324 psched->context[ltid] [CTX_TRDID_ID],1325 psched->context[ltid] [CTX_NORUN_ID],1326 psched->context[ltid] [CTX_SIG_ID] );1325 psched->context[ltid].slot[CTX_LTID_ID], 1326 psched->context[ltid].slot[CTX_TRDID_ID], 1327 psched->context[ltid].slot[CTX_SR_ID], 1328 psched->context[ltid].slot[CTX_SP_ID], 1329 psched->context[ltid].slot[CTX_ENTRY_ID], 1330 psched->context[ltid].slot[CTX_PTPR_ID], 1331 psched->context[ltid].slot[CTX_PTAB_ID], 1332 psched->context[ltid].slot[CTX_VSID_ID], 1333 psched->context[ltid].slot[CTX_NORUN_ID], 1334 psched->context[ltid].slot[CTX_SIG_ID] ); 1327 1335 #endif 1328 1336 } // end if FIT 1329 1337 } // end for loop on local procs 1330 } // end loop on t asks1338 } // end loop on threads 1331 1339 } // end loop on vspaces 1332 1340 } // end boot_scheduler_init() … … 1544 1552 " check that all global variables are in data segment\n", 1545 1553 seg_vaddr, pathname , seg_memsz , seg_filesz ); 1546 _exit();1554 _exit(); 1547 1555 } 1548 1556
Note: See TracChangeset
for help on using the changeset viewer.