Changeset 321
- Timestamp:
- Jun 10, 2014, 1:26:45 PM (10 years ago)
- Location:
- soft/giet_vm/giet_boot
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
soft/giet_vm/giet_boot/boot.c
r316 r321 22 22 // - the "map.bin" file contains the hardware architecture description and the 23 23 // mapping directives. It must be stored in the the seg_boot_mapping segment 24 // (at address seg_boot_mapping_base).24 // (at address SEG_BOOT_MAPPING_BASE defined in hard_config.h file). 25 25 // - the "sys.elf" file contains the kernel binary code and data. 26 26 // - the various "application.elf" files. … … 110 110 #endif 111 111 112 #if !defined(SEG_BOOT_MAPPING_BASE) 113 # error: You must define SEG_BOOT_MAPPING_BASE in the hard_config.h file 114 #endif 115 116 #if !defined(SEG_BOOT_BUFFER_BASE) 117 # error: You must define SEG_BOOT_BUFFER_BASE in the hard_config.h file 118 #endif 119 120 #if !defined(SEG_BOOT_BUFFER_SIZE) 121 # error: You must define SEG_BOOT_BUFFER_SIZE in the hard_config.h file 122 #endif 123 112 124 #if !defined(NB_PROCS_MAX) 113 125 # error The NB_PROCS_MAX value must be defined in the 'hard_config.h' file ! … … 157 169 void boot_mapping_check() 158 170 { 159 mapping_header_t * header = (mapping_header_t *) & seg_boot_mapping_base;171 mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; 160 172 161 173 // checking mapping availability … … 262 274 mapping_pseg_t *boot_pseg_get(unsigned int seg_id) 263 275 { 264 mapping_header_t* header = (mapping_header_t*) (&seg_boot_mapping_base);276 mapping_header_t* header = (mapping_header_t*)SEG_BOOT_MAPPING_BASE; 265 277 mapping_pseg_t * pseg = _get_pseg_base(header); 266 278 … … 396 408 unsigned int verbose = 0; // can be used to activate trace in add_pte() 397 409 398 mapping_header_t * header = (mapping_header_t *) & seg_boot_mapping_base;410 mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; 399 411 mapping_vspace_t * vspace = _get_vspace_base(header); 400 412 mapping_vseg_t * vseg = _get_vseg_base(header); … … 523 535 524 536 // computes vseg alignment constraint 525 mapping_header_t* header = (mapping_header_t*) &seg_boot_mapping_base;537 mapping_header_t* header = (mapping_header_t*)SEG_BOOT_MAPPING_BASE; 526 538 mapping_vobj_t* vobj_base = _get_vobj_base( header ); 527 539 unsigned int align = vobj_base[vseg->vobj_offset].align; … … 627 639 628 640 // computes vseg alignment constraint 629 mapping_header_t* header = (mapping_header_t*) &seg_boot_mapping_base;641 mapping_header_t* header = (mapping_header_t*)SEG_BOOT_MAPPING_BASE; 630 642 mapping_vobj_t* vobj_base = _get_vobj_base( header ); 631 643 unsigned int align = vobj_base[vseg->vobj_offset].align; … … 689 701 _puts(" cannot be mapped on pseg "); 690 702 _puts( pseg->name ); 691 _puts("\n"); 703 _puts(" in cluster["); 704 _putd( pseg->clusterid ); 705 _puts("]\n"); 692 706 _exit(); 693 707 } … … 710 724 unsigned int offset; 711 725 712 mapping_header_t * header = (mapping_header_t *) & seg_boot_mapping_base;726 mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; 713 727 mapping_vobj_t * vobj = _get_vobj_base(header); 714 728 … … 808 822 void boot_pt_init() 809 823 { 810 mapping_header_t * header = (mapping_header_t *) &seg_boot_mapping_base;824 mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; 811 825 mapping_vspace_t * vspace = _get_vspace_base(header); 812 826 mapping_vseg_t * vseg = _get_vseg_base(header); … … 941 955 void boot_vobjs_init() 942 956 { 943 mapping_header_t* header = (mapping_header_t *) & seg_boot_mapping_base;957 mapping_header_t* header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; 944 958 mapping_vspace_t* vspace = _get_vspace_base(header); 945 959 mapping_vobj_t* vobj = _get_vobj_base(header); … … 1177 1191 unsigned int* length ) 1178 1192 { 1179 mapping_header_t* header = (mapping_header_t *) & seg_boot_mapping_base;1193 mapping_header_t* header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; 1180 1194 mapping_vobj_t* vobj = _get_vobj_base(header); 1181 1195 mapping_vseg_t* vseg = _get_vseg_base(header); … … 1212 1226 // - In Step 1, it initialises the _schedulers[gpid] pointers array, and scan 1213 1227 // the processors to initialise the schedulers, including the 1214 // idle_task context (ltid == 14) .1215 // - In Step 2, it scan all tasks in all vspaces to initialise the tasks contexts,1216 // as specified in the mapping_info data structure.1228 // idle_task context (ltid == 14) and HWI / SWI / PTI vectors. 1229 // - In Step 2, it scan all tasks in all vspaces to complete the tasks contexts, 1230 // initialisation as specified in the mapping_info data structure. 1217 1231 //////////////////////////////////////////////////////////////////////////////////// 1218 1232 void boot_schedulers_init() 1219 1233 { 1220 mapping_header_t* header = (mapping_header_t *) & seg_boot_mapping_base;1234 mapping_header_t* header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; 1221 1235 mapping_cluster_t* cluster = _get_cluster_base(header); 1222 1236 mapping_vspace_t* vspace = _get_vspace_base(header); … … 1231 1245 unsigned int vspace_id; // vspace index in mapping_info 1232 1246 unsigned int task_id; // task index in mapping_info 1233 1234 // TTY, NIC, CMA, HBA, TIM and DMA channels allocators 1247 unsigned int vobj_id; // vobj index in mapping_info 1248 1249 unsigned int lpid; // local processor index (for several loops) 1250 1251 // TTY, NIC, CMA, HBA, user timer, and WTI channel allocators to user tasks: 1235 1252 // - TTY[0] is reserved for the kernel 1236 1253 // - In all clusters the first NB_PROCS_MAX timers 1237 1254 // are reserved for the kernel (context switch) 1238 1239 unsigned int alloc_tty_channel = 1; // TTY channel allocator 1240 unsigned int alloc_nic_channel = 0; // NIC channel allocator 1241 unsigned int alloc_cma_channel = 0; // CMA channel allocator 1242 unsigned int alloc_hba_channel = 0; // HBA channel allocator 1243 unsigned int alloc_tim_channel[X_SIZE*Y_SIZE]; // user TIMER allocators 1255 unsigned int alloc_tty_channel = 1; // global 1256 unsigned int alloc_nic_channel = 0; // global 1257 unsigned int alloc_cma_channel = 0; // global 1258 unsigned int alloc_hba_channel = 0; // global 1259 unsigned int alloc_tim_channel[X_SIZE*Y_SIZE]; // one per cluster 1260 1261 // WTI allocators to processors 1262 // In all clusters, first NB_PROCS_MAX WTIs are for WAKUP 1263 unsigned int alloc_wti_channel[X_SIZE*Y_SIZE]; // one per cluster 1264 1265 // pointers on the XCU and PIC peripherals 1266 mapping_periph_t* xcu = NULL; 1267 mapping_periph_t* pic = NULL; 1268 1269 // schedulers array base address in a cluster 1270 unsigned int sched_vbase; 1271 unsigned int sched_length; 1272 static_scheduler_t* psched; 1244 1273 1245 1274 ///////////////////////////////////////////////////////////////////////// … … 1269 1298 #endif 1270 1299 alloc_tim_channel[cluster_id] = NB_PROCS_MAX; 1300 alloc_wti_channel[cluster_id] = NB_PROCS_MAX; 1271 1301 1272 1302 // checking processors number … … 1281 1311 } 1282 1312 1283 static_scheduler_t* psched; // schedulers array base address in cluster1284 1285 1313 // no schedulers initialisation if nprocs == 0 1286 1314 if ( cluster[cluster_id].procs > 0 ) 1287 1315 { 1288 // get scheduler array virtual base address and length from mapping 1289 unsigned int sched_vbase; // schedulers segment virtual base address 1290 unsigned int sched_length; // schedulers segment length 1316 // get scheduler array virtual base address from mapping 1291 1317 boot_get_sched_vaddr( cluster_id, &sched_vbase, &sched_length ); 1292 1318 … … 1303 1329 psched = (static_scheduler_t*)sched_vbase; 1304 1330 1305 // scan cluster peripherals to find the ICU/XCU 1306 unsigned int found = 0; 1331 // scan peripherals to find the ICU/XCU and the PIC component 1332 1333 xcu = NULL; 1307 1334 for ( periph_id = cluster[cluster_id].periph_offset ; 1308 1335 periph_id < cluster[cluster_id].periph_offset + cluster[cluster_id].periphs; … … 1312 1339 (periph[periph_id].type == PERIPH_TYPE_ICU) ) 1313 1340 { 1314 found = 1; 1315 break; 1341 xcu = &periph[periph_id]; 1342 1343 if ( xcu->arg < cluster[cluster_id].procs ) 1344 { 1345 _puts("\n[BOOT ERROR] Not enough inputs for XCU["); 1346 _putd( x ); 1347 _puts(","); 1348 _putd( y ); 1349 _puts("]\n"); 1350 _exit(); 1351 } 1352 } 1353 if( periph[periph_id].type == PERIPH_TYPE_PIC ) 1354 { 1355 pic = &periph[periph_id]; 1316 1356 } 1317 1357 } 1318 if ( found == 0)1358 if ( xcu == NULL ) 1319 1359 { 1320 1360 _puts("\n[BOOT ERROR] No ICU / XCU component in cluster["); … … 1326 1366 } 1327 1367 1328 // loop on schedulers for default values initialisation1329 unsigned int lpid;1368 // loop on processors for sechedulers default values 1369 // initialisation, including WTI and PTI vectors 1330 1370 for ( lpid = 0 ; lpid < cluster[cluster_id].procs ; lpid++ ) 1331 1371 { … … 1349 1389 psched[lpid].current = IDLE_TASK_INDEX; 1350 1390 1351 // initialiseHWI / PTI / SWI vectors (valid bit = 0)1391 // default values for HWI / PTI / SWI vectors (valid bit = 0) 1352 1392 unsigned int slot; 1353 1393 for (slot = 0; slot < 32; slot++) … … 1357 1397 psched[lpid].wti_vector[slot] = 0; 1358 1398 } 1399 1400 // WTI[lpid] <= ISR_WAKUP / PTI[lpid] <= ISR_TICK 1401 psched[lpid].wti_vector[lpid] = ISR_WAKUP | 0x80000000; 1402 psched[lpid].pti_vector[lpid] = ISR_TICK | 0x80000000; 1359 1403 1360 1404 // initializes the idle_task context in scheduler: … … 1373 1417 psched[lpid].context[IDLE_TASK_INDEX][CTX_VSID_ID] = 0; 1374 1418 psched[lpid].context[IDLE_TASK_INDEX][CTX_RUN_ID] = 1; 1375 } 1376 1377 1378 // loop on irqs in ICU for actual HWI / PTI / WTI vectors initialisation 1379 for ( irq_id = periph[periph_id].irq_offset ; 1380 irq_id < periph[periph_id].irq_offset + periph[periph_id].irqs ; 1419 } // end for processors 1420 1421 // scan HWIs connected to local XCU 1422 // for round-robin allocation to processors 1423 lpid = 0; 1424 for ( irq_id = xcu->irq_offset ; 1425 irq_id < xcu->irq_offset + xcu->irqs ; 1381 1426 irq_id++ ) 1382 1427 { 1383 unsigned int lpid = irq[irq_id].dstid; 1384 if ( lpid >= cluster[cluster_id].procs ) 1385 { 1386 _puts("\n[BOOT ERROR] Bad IRQ processor index in cluster["); 1428 unsigned int type = irq[irq_id].srctype; 1429 unsigned int srcid = irq[irq_id].srcid; 1430 unsigned int isr = irq[irq_id].isr & 0xFFFF; 1431 unsigned int channel = irq[irq_id].channel << 16; 1432 1433 if ( (type != IRQ_TYPE_HWI) || (srcid > 31) ) 1434 { 1435 _puts("\n[BOOT ERROR] Bad IRQ in XCU of cluster["); 1387 1436 _putd( x ); 1388 1437 _puts(","); … … 1391 1440 _exit(); 1392 1441 } 1393 unsigned int type = irq[irq_id].srctype; 1394 unsigned int index = irq[irq_id].srcid; 1395 unsigned int isr = irq[irq_id].isr; 1396 unsigned int channel = irq[irq_id].channel; 1397 1398 unsigned int entry = ((isr & 0xFFFF) ) | 1399 ((channel & 0x7FFF) << 16) | 1400 0x80000000; // Valid entry 1401 1402 if (type == IRQ_TYPE_HWI) psched[lpid].hwi_vector[index] = entry; 1403 else if (type == IRQ_TYPE_PTI) psched[lpid].pti_vector[index] = entry; 1404 else if (type == IRQ_TYPE_WTI) psched[lpid].wti_vector[index] = entry; 1405 1406 #if BOOT_DEBUG_SCHED 1407 _puts("- IRQ : type = "); 1408 _putd( type ); 1409 _puts(" / index = "); 1410 _putd( index ); 1411 _puts(" / isr = "); 1412 _putd( isr ); 1413 _puts(" / channel = "); 1414 _putd( channel ); 1415 _puts("\n"); 1416 #endif 1442 1443 psched[lpid].hwi_vector[srcid] = isr | channel | 0x80000000; 1444 lpid = (lpid + 1) % cluster[cluster_id].procs; 1417 1445 1418 1446 } // end for irqs 1419 1447 } // end if nprocs > 0 1420 1448 } // end for clusters 1449 1450 // If there is an external PIC component, we scan HWIs connected to PIC 1451 // for Round Robin allocation (as WTI) to processors. 1452 // We allocate one WTI per processor, starting from proc[0,0,0], 1453 // and we increment (cluster_id, lpid) as required. 1454 if ( pic != NULL ) 1455 { 1456 unsigned int cluster_id = 0; // index in clusters array 1457 unsigned int lpid = 0; // processor local index 1458 1459 // scan IRQS defined in PIC 1460 for ( irq_id = pic->irq_offset ; 1461 irq_id < pic->irq_offset + pic->irqs ; 1462 irq_id++ ) 1463 { 1464 // compute next values for (cluster_id,lpid) 1465 // if no more procesor available in current cluster 1466 unsigned int overflow = 0; 1467 while ( (lpid >= cluster[cluster_id].procs) || 1468 (alloc_wti_channel[cluster_id] >= xcu->arg) ) 1469 { 1470 overflow++; 1471 cluster_id = (cluster_id + 1) % (X_SIZE*Y_SIZE); 1472 lpid = 0; 1473 1474 // overflow detection 1475 if ( overflow > (X_SIZE*Y_SIZE*NB_PROCS_MAX*32) ) 1476 { 1477 _puts("\n[BOOT ERROR] Not enough processors for external IRQs\n"); 1478 _exit(); 1479 } 1480 } 1481 1482 unsigned int type = irq[irq_id].srctype; 1483 unsigned int srcid = irq[irq_id].srcid; 1484 unsigned int isr = irq[irq_id].isr & 0xFFFF; 1485 unsigned int channel = irq[irq_id].channel << 16; 1486 1487 if ( (type != IRQ_TYPE_HWI) || (srcid > 31) ) 1488 { 1489 _puts("\n[BOOT ERROR] Bad IRQ in PIC component\n"); 1490 _exit(); 1491 } 1492 1493 // get scheduler[cluster_id] address 1494 unsigned int x = cluster[cluster_id].x; 1495 unsigned int y = cluster[cluster_id].y; 1496 unsigned int cluster_xy = (x<<Y_WIDTH) + y; 1497 psched = _schedulers[cluster_xy * NB_PROCS_MAX]; 1498 1499 // update WTI vector for scheduler[cluster_id][lpid] 1500 unsigned int index = alloc_wti_channel[cluster_id]; 1501 psched[lpid].wti_vector[index] = isr | channel | 0x80000000; 1502 alloc_wti_channel[cluster_id] = index + 1; 1503 lpid = lpid + 1; 1504 1505 // update IRQ fields in mapping for PIC initialisation 1506 irq[irq_id].dest_id = index; 1507 irq[irq_id].dest_xy = cluster_xy; 1508 1509 } // end for IRQs 1510 } // end if PIC 1511 1512 #if BOOT_DEBUG_SCHED 1513 for ( cluster_id = 0 ; cluster_id < (X_SIZE*Y_SIZE) ; cluster_id++ ) 1514 { 1515 unsigned int x = cluster[cluster_id].x; 1516 unsigned int y = cluster[cluster_id].y; 1517 unsigned int cluster_xy = (x<<Y_WIDTH) + y; 1518 psched = _schedulers[cluster_xy * NB_PROCS_MAX]; 1519 unsigned int slot; 1520 unsigned int entry; 1521 for ( lpid = 0 ; lpid < cluster[cluster_id].procs ; lpid++ ) 1522 { 1523 _puts("\n*** IRQS for proc["); 1524 _putd( x ); 1525 _puts(","); 1526 _putd( y ); 1527 _puts(",["); 1528 _putd( lpid ); 1529 _puts("]\n"); 1530 for ( slot = 0 ; slot < 32 ; slot++ ) 1531 { 1532 entry = psched[lpid].hwi_vector[slot]; 1533 if ( entry & 0x80000000 ) 1534 { 1535 _puts(" - HWI "); 1536 _putd( slot ); 1537 _puts(" / isrtype = "); 1538 _putd( entry & 0xFFFF ); 1539 _puts(" / channel = "); 1540 _putd( (entry >> 16) & 0x7FFF ); 1541 _puts("\n"); 1542 } 1543 } 1544 for ( slot = 0 ; slot < 32 ; slot++ ) 1545 { 1546 entry = psched[lpid].wti_vector[slot]; 1547 if ( entry & 0x80000000 ) 1548 { 1549 _puts(" - WTI "); 1550 _putd( slot ); 1551 _puts(" / isrtype = "); 1552 _putd( entry & 0xFFFF ); 1553 _puts(" / channel = "); 1554 _putd( (entry >> 16) & 0x7FFF ); 1555 _puts("\n"); 1556 } 1557 } 1558 for ( slot = 0 ; slot < 32 ; slot++ ) 1559 { 1560 entry = psched[lpid].pti_vector[slot]; 1561 if ( entry & 0x80000000 ) 1562 { 1563 _puts(" - PTI "); 1564 _putd( slot ); 1565 _puts(" / isrtype = "); 1566 _putd( entry & 0xFFFF ); 1567 _puts(" / channel = "); 1568 _putd( (entry >> 16) & 0x7FFF ); 1569 _puts("\n"); 1570 } 1571 } 1572 } 1573 } 1574 #endif 1421 1575 1422 1576 /////////////////////////////////////////////////////////////////// … … 1450 1604 // compute gpid (global processor index) and scheduler base address 1451 1605 unsigned int gpid = cluster_xy * NB_PROCS_MAX + lpid; 1452 static_scheduler_t* psched= _schedulers[gpid];1606 psched = _schedulers[gpid]; 1453 1607 1454 1608 // ctx_sr : value required before an eret instruction … … 1558 1712 // the task entry point : the start_vector is stored by GCC in the seg_data 1559 1713 // segment and we must wait the .elf loading to get the entry point value... 1560 mapping_vobj_t* pvobj = &vobj[vspace[vspace_id].vobj_offset + 1561 vspace[vspace_id].start_offset]; 1562 unsigned int ctx_epc = pvobj->vaddr + (task[task_id].startid)*4; 1714 vobj_id = vspace[vspace_id].start_vobj_id; 1715 unsigned int ctx_epc = vobj[vobj_id].vaddr + (task[task_id].startid)*4; 1563 1716 1564 1717 // ctx_sp : Get the vobj containing the stack 1565 unsigned int vobj_id = task[task_id].stack_vobjid + vspace[vspace_id].vobj_offset;1718 vobj_id = task[task_id].stack_vobj_id; 1566 1719 unsigned int ctx_sp = vobj[vobj_id].vaddr + vobj[vobj_id].length; 1567 1720 … … 1683 1836 unsigned int ok = _fat_read( IOC_BOOT_MODE, 1684 1837 fd_id, 1685 (unsigned int*) ( &seg_boot_mapping_base),1838 (unsigned int*)SEG_BOOT_MAPPING_BASE, 1686 1839 nblocks, 1687 1840 0 ); // offset … … 1716 1869 1717 1870 // get boot buffer address and size 1718 char* boot_buffer = (char*) (&seg_boot_buffer_base);1719 unsigned int boot_buffer_size = (unsigned int)(&seg_boot_buffer_size);1871 char* boot_buffer = (char*)SEG_BOOT_BUFFER_BASE; 1872 unsigned int boot_buffer_size = SEG_BOOT_BUFFER_SIZE; 1720 1873 1721 1874 #if BOOT_DEBUG_ELF … … 1882 2035 void boot_elf_load() 1883 2036 { 1884 mapping_header_t* header = (mapping_header_t *) & seg_boot_mapping_base;2037 mapping_header_t* header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; 1885 2038 mapping_vspace_t* vspace = _get_vspace_base( header ); 1886 2039 mapping_vobj_t* vobj = _get_vobj_base( header ); … … 1970 2123 void boot_peripherals_init() 1971 2124 { 1972 mapping_header_t * header = (mapping_header_t *) & seg_boot_mapping_base;2125 mapping_header_t * header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; 1973 2126 mapping_cluster_t * cluster = _get_cluster_base(header); 1974 2127 mapping_periph_t * periph = _get_periph_base(header); 1975 2128 mapping_vobj_t * vobj = _get_vobj_base(header); 1976 mapping_vspace_t * vspace = _get_vspace_base(header);1977 2129 mapping_coproc_t * coproc = _get_coproc_base(header); 1978 2130 mapping_cp_port_t * cp_port = _get_cp_port_base(header); … … 2066 2218 #if 0 2067 2219 // initialize r_xicu_base & r_xicu_size registers 2068 unsigned int base = (unsigned int) &seg_xcu_base;2220 unsigned int base = (unsigned int)SEG_XCU_BASE; 2069 2221 2070 2222 #if BOOT_DEBUG_PERI … … 2108 2260 channel_id++ ) 2109 2261 { 2110 unsigned int hwi_id = irq[channel_id].srcid; // HWI index in PIC 2111 unsigned int wti_id = irq[channel_id].dstid; // WTI index in XCU 2112 unsigned int x = irq[channel_id].dstx; // XCU X coordinate 2113 unsigned int y = irq[channel_id].dsty; // XCU Y coordinate 2114 unsigned int cluster_xy = (x<<Y_WIDTH) + y; // XCU cluster 2262 unsigned int hwi_id = irq[channel_id].srcid; // HWI index in PIC 2263 unsigned int wti_id = irq[channel_id].dest_id; // WTI index in XCU 2264 unsigned int cluster_xy = irq[channel_id].dest_xy; // XCU coordinates 2115 2265 unsigned int vaddr; 2116 2266 … … 2126 2276 _putx( vaddr ); 2127 2277 _puts(" in cluster["); 2128 _putd( x);2278 _putd( cluster_xy >> Y_WIDTH ); 2129 2279 _puts(","); 2130 _putd( y);2280 _putd( cluster_xy & ((1<<Y_WIDTH)-1) ); 2131 2281 _puts("]\n"); 2132 2282 #endif … … 2163 2313 cp_port_id++ ) 2164 2314 { 2165 unsigned int vspace_id = cp_port[cp_port_id].vspaceid; 2166 unsigned int vobj_id = cp_port[cp_port_id].mwmr_vobjid + 2167 vspace[vspace_id].vobj_offset; 2315 // Get global index of associted vobj 2316 unsigned int vobj_id = cp_port[cp_port_id].mwmr_vobj_id; 2168 2317 2169 2318 // Get MWMR channel base address … … 2181 2330 _puts(" / name = "); 2182 2331 _puts(vobj[vobj_id].name); 2183 _puts(" / in vspace ");2184 _puts(vspace[vspace_id].name);2185 2332 _puts("\n"); 2186 2333 #endif … … 2196 2343 void boot_init() 2197 2344 { 2198 mapping_header_t* header = (mapping_header_t *) & seg_boot_mapping_base;2345 mapping_header_t* header = (mapping_header_t *)SEG_BOOT_MAPPING_BASE; 2199 2346 mapping_cluster_t* cluster = _get_cluster_base(header); 2200 2347 unsigned int gpid = _get_procid(); … … 2288 2435 } 2289 2436 2290 // all processors jump to kernel_init 2291 unsigned int kernel_entry = (unsigned int)& seg_kernel_init_base;2437 // all processors jump to kernel_init (address defined in giet_vsegs.ld) 2438 unsigned int kernel_entry = (unsigned int)&kernel_init_vbase; 2292 2439 asm volatile( "jr %0" ::"r"(kernel_entry) ); 2293 2440 -
soft/giet_vm/giet_boot/boot.ld
r258 r321 9 9 /* Definition of the entry point for the BOOT code. */ 10 10 /* The address of the boot_init function is stored in the .elf header, */ 11 /* and is used by the pr éloader to jump into the boot code. */11 /* and is used by the preloader to jump into the boot code. */ 12 12 /****************************************************************************/ 13 13 … … 19 19 SECTIONS 20 20 { 21 . = seg_boot_code_base;21 . = boot_code_vbase; 22 22 seg_boot_code : 23 23 { … … 27 27 } 28 28 29 . = seg_boot_data_base;29 . = boot_data_vbase; 30 30 seg_boot_data : 31 31 {
Note: See TracChangeset
for help on using the changeset viewer.