Changeset 656 for branches/fault_tolerance
- Timestamp:
- Mar 7, 2014, 10:24:17 AM (11 years ago)
- Location:
- branches/fault_tolerance/module/internal_component
- Files:
-
- 9 added
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
branches/fault_tolerance/module/internal_component/vci_cc_vcache_wrapper/caba/source/include/vci_cc_vcache_wrapper.h
r616 r656 113 113 DCACHE_XTN_IC_FLUSH, 114 114 DCACHE_XTN_IC_INVAL_PA, 115 DCACHE_XTN_IC_PADDR_EXT, 115 116 DCACHE_XTN_IT_INVAL, 116 117 DCACHE_XTN_DC_FLUSH, … … 322 323 bool m_debug_ok; 323 324 325 uint32_t m_dcache_paddr_ext_reset; 326 uint32_t m_icache_paddr_ext_reset; 327 324 328 //////////////////////////////////////// 325 329 // Communication with processor ISS … … 399 403 sc_signal<size_t> r_icache_cc_send_way; // ICACHE cc_send way 400 404 sc_signal<size_t> r_icache_cc_send_updt_tab_idx; // ICACHE cc_send update table index 405 406 // Physical address extension for data access 407 sc_signal<uint32_t> r_icache_paddr_ext; // CP2 register (if vci_address > 32) 401 408 402 409 /////////////////////////////// … … 747 754 } 748 755 756 ///////////////////////////////////////////////////////////// 757 // Set the m_dcache_paddr_ext_reset attribute 758 // 759 // The r_dcache_paddr_ext register will be initialized after 760 // reset with the m_dcache_paddr_ext_reset value 761 ///////////////////////////////////////////////////////////// 762 inline void set_dcache_paddr_ext_reset(uint32_t v) 763 { 764 m_dcache_paddr_ext_reset = v; 765 } 766 767 ///////////////////////////////////////////////////////////// 768 // Set the m_icache_paddr_ext_reset attribute 769 // 770 // The r_icache_paddr_ext register will be initialized after 771 // reset with the m_icache_paddr_ext_reset value 772 ///////////////////////////////////////////////////////////// 773 inline void set_icache_paddr_ext_reset(uint32_t v) 774 { 775 m_icache_paddr_ext_reset = v; 776 } 777 749 778 private: 750 779 void transition(); -
branches/fault_tolerance/module/internal_component/vci_cc_vcache_wrapper/caba/source/src/vci_cc_vcache_wrapper.cpp
r627 r656 32 32 #include "../include/vci_cc_vcache_wrapper.h" 33 33 34 #define DEBUG_DCACHE 35 #define DEBUG_ICACHE 36 #define DEBUG_CMD 34 #define DEBUG_DCACHE 1 35 #define DEBUG_ICACHE 1 36 #define DEBUG_CMD 0 37 37 38 38 namespace soclib { … … 85 85 "DCACHE_XTN_IC_FLUSH", 86 86 "DCACHE_XTN_IC_INVAL_PA", 87 "DCACHE_XTN_IC_PADDR_EXT" 87 88 "DCACHE_XTN_IT_INVAL", 88 89 "DCACHE_XTN_DC_FLUSH", … … 190 191 ///////////////////////////////// 191 192 tmpl(/**/)::VciCcVCacheWrapper( 192 sc_module_name 193 const int 194 const MappingTable 195 const IntTab 196 const size_t 197 const size_t 198 const size_t 199 const size_t 200 const size_t 201 const size_t 202 const size_t 203 const size_t 204 const size_t 205 const size_t 206 const size_t 207 const size_t 208 const size_t 209 const size_t 210 const size_t 211 const uint32_t 212 const uint32_t 213 const bool 193 sc_module_name name, 194 const int proc_id, 195 const MappingTable &mtd, 196 const IntTab &srcid, 197 const size_t cc_global_id, 198 const size_t itlb_ways, 199 const size_t itlb_sets, 200 const size_t dtlb_ways, 201 const size_t dtlb_sets, 202 const size_t icache_ways, 203 const size_t icache_sets, 204 const size_t icache_words, 205 const size_t dcache_ways, 206 const size_t dcache_sets, 207 const size_t dcache_words, 208 const size_t wbuf_nlines, 209 const size_t wbuf_nwords, 210 const size_t x_width, 211 const size_t y_width, 212 const uint32_t max_frozen_cycles, 213 const uint32_t debug_start_cycle, 214 const bool debug_ok ) 214 215 : soclib::caba::BaseModule(name), 215 216 … … 244 245 m_debug_start_cycle( debug_start_cycle ), 245 246 m_debug_ok( debug_ok ), 247 m_dcache_paddr_ext_reset(0), 248 m_icache_paddr_ext_reset(0), 246 249 247 250 r_mmu_ptpr("r_mmu_ptpr"), … … 371 374 r_vci_rsp_ins_error("r_vci_rsp_ins_error"), 372 375 r_vci_rsp_data_error("r_vci_rsp_data_error"), 373 r_vci_rsp_fifo_icache("r_vci_rsp_fifo_icache", 2), 374 r_vci_rsp_fifo_dcache("r_vci_rsp_fifo_dcache", 2), 376 r_vci_rsp_fifo_icache("r_vci_rsp_fifo_icache", 2), // 2 words depth 377 r_vci_rsp_fifo_dcache("r_vci_rsp_fifo_dcache", 2), // 2 words depth 375 378 376 379 r_cc_send_fsm("r_cc_send_fsm"), … … 380 383 r_cc_receive_data_ins("r_cc_receive_data_ins"), 381 384 r_cc_receive_word_idx("r_cc_receive_word_idx"), 382 r_cc_receive_updt_fifo_be("r_cc_receive_updt_fifo_be", 2), 383 r_cc_receive_updt_fifo_data("r_cc_receive_updt_fifo_data", 2), 384 r_cc_receive_updt_fifo_eop("r_cc_receive_updt_fifo_eop", 2), 385 r_cc_receive_updt_fifo_be("r_cc_receive_updt_fifo_be", 2), // 2 words depth 386 r_cc_receive_updt_fifo_data("r_cc_receive_updt_fifo_data", 2), // 2 words depth 387 r_cc_receive_updt_fifo_eop("r_cc_receive_updt_fifo_eop", 2), // 2 words depth 385 388 386 389 r_cc_receive_icache_req("r_cc_receive_icache_req"), … … 528 531 { 529 532 bool cache_hit; 530 size_t 531 size_t 532 size_t 533 uint32_t 533 size_t cache_way = 0; 534 size_t cache_set = 0; 535 size_t cache_word = 0; 536 uint32_t cache_rdata = 0; 534 537 535 538 cache_hit = r_dcache.read_neutral( addr, … … 547 550 << " / DATA = " << cache_rdata 548 551 << " / WAY = " << cache_way << std::endl; 549 552 m_debug_previous_d_hit = cache_hit; 550 553 } 551 554 … … 564 567 << " / DATA = " << cache_rdata 565 568 << " / WAY = " << cache_way << std::endl; 566 569 m_debug_previous_i_hit = cache_hit; 567 570 } 568 571 } … … 735 738 736 739 // reset data physical address extension 737 r_dcache_paddr_ext = 0; 740 r_dcache_paddr_ext = m_dcache_paddr_ext_reset; 741 742 // reset inst physical address extension 743 r_icache_paddr_ext = m_icache_paddr_ext_reset; 738 744 739 745 // reset dcache directory extension … … 752 758 r_mmu_mode = 0x3; 753 759 754 760 // No request from ICACHE FSM to CMD FSM 755 761 r_icache_miss_req = false; 756 762 r_icache_unc_req = false; … … 809 815 m_debug_previous_i_hit = false; 810 816 m_debug_previous_d_hit = false; 811 m_debug_activated 817 m_debug_activated = false; 812 818 813 819 // activity counters … … 870 876 m_cost_data_tlb_occup_cache_frz = 0; 871 877 872 873 874 875 878 m_cpt_ins_tlb_inval = 0; 879 m_cpt_data_tlb_inval = 0; 880 m_cost_ins_tlb_inval_frz = 0; 881 m_cost_data_tlb_inval_frz = 0; 876 882 877 883 m_cpt_cc_broadcast = 0; 878 884 879 880 881 882 883 884 885 885 m_cost_updt_data_frz = 0; 886 m_cost_inval_ins_frz = 0; 887 m_cost_inval_data_frz = 0; 888 m_cost_broadcast_frz = 0; 889 890 m_cpt_cc_cleanup_data = 0; 891 m_cpt_cc_cleanup_ins = 0; 886 892 887 893 m_cpt_itlbmiss_transaction = 0; … … 906 912 m_cpt_read = 0; 907 913 m_cpt_write = 0; 908 909 910 914 m_cpt_cc_update_data = 0; 915 m_cpt_cc_inval_ins = 0; 916 m_cpt_cc_inval_data = 0; 911 917 */ 912 918 … … 1018 1024 { 1019 1025 ///////////////// 1020 case ICACHE_IDLE: 1026 case ICACHE_IDLE: // In this state, we handle processor requests, XTN requests, 1021 1027 // and coherence requests with a fixed priority: 1022 1028 // 1/ Coherence requests => ICACHE_CC_CHECK … … 1043 1049 1044 1050 // XTN requests sent by DCACHE FSM 1045 // These request are not executed in this IDLE state , because1046 // they require access to icache or itlb, that are already accessed1051 // These request are not executed in this IDLE state (except XTN_INST_PADDR_EXT), 1052 // because they require access to icache or itlb, that are already accessed 1047 1053 if ( r_dcache_xtn_req.read() ) 1048 1054 { … … 1066 1072 else if ( (int)r_dcache_xtn_opcode.read() == (int)iss_t::XTN_MMU_ICACHE_PA_INV) 1067 1073 { 1068 1074 if (sizeof(paddr_t) <= 32) 1069 1075 { 1070 1071 1072 1076 assert(r_mmu_word_hi.read() == 0 && 1077 "illegal XTN request in ICACHE: high bits should be 0 for 32bit paddr"); 1078 r_icache_vci_paddr = (paddr_t)r_mmu_word_lo.read(); 1073 1079 } 1074 1080 else 1075 1081 { 1076 r_icache_vci_paddr = (paddr_t)r_mmu_word_hi.read() << 32 | 1077 (paddr_t)r_mmu_word_lo.read(); 1078 } 1079 r_icache_fsm = ICACHE_XTN_CACHE_INVAL_PA; 1082 r_icache_vci_paddr = (paddr_t)r_mmu_word_hi.read() << 32 | 1083 (paddr_t)r_mmu_word_lo.read(); 1084 } 1085 r_icache_fsm = ICACHE_XTN_CACHE_INVAL_PA; 1086 } 1087 else if ( (int)r_dcache_xtn_opcode.read() == (int)iss_t::XTN_INST_PADDR_EXT) 1088 { 1089 r_icache_paddr_ext = r_dcache_save_wdata.read(); 1090 r_dcache_xtn_req = false; 1080 1091 } 1081 1092 else … … 1090 1101 if ( m_ireq.valid ) 1091 1102 { 1092 bool 1093 paddr_t 1103 bool cacheable; 1104 paddr_t paddr; 1094 1105 bool tlb_hit = false; 1095 1106 pte_info_t tlb_flags; … … 1116 1127 &paddr, 1117 1128 &tlb_flags, 1118 &tlb_nline, 1119 &tlb_way, 1120 &tlb_set ); 1129 &tlb_nline, // unused 1130 &tlb_way, // unused 1131 &tlb_set ); // unused 1121 1132 } 1122 1133 else 1123 1134 { 1124 paddr = (paddr_t)m_ireq.addr; 1135 if (vci_param::N > 32) 1136 { 1137 paddr = (paddr_t)m_ireq.addr | 1138 ((paddr_t)r_icache_paddr_ext.read() << 32); 1139 } 1140 else 1141 { 1142 paddr = (paddr_t)m_ireq.addr; 1143 } 1125 1144 } 1126 1145 … … 1148 1167 // and there is no access rights checking 1149 1168 1150 if ( not (r_mmu_mode.read() & INS_TLB_MASK) ) 1169 if ( not (r_mmu_mode.read() & INS_TLB_MASK) ) // tlb not activated: 1151 1170 { 1152 1171 // cacheability … … 1154 1173 else cacheable = m_cacheability_table[(uint64_t)m_ireq.addr]; 1155 1174 } 1156 else 1157 { 1158 if ( tlb_hit ) 1175 else // itlb activated 1176 { 1177 if ( tlb_hit ) // ITLB hit 1159 1178 { 1160 1179 // cacheability … … 1200 1219 if ( cacheable ) 1201 1220 { 1202 if (cache_state == CACHE_SLOT_STATE_EMPTY) 1221 if (cache_state == CACHE_SLOT_STATE_EMPTY) // cache miss 1203 1222 { 1204 1223 … … 1215 1234 r_icache_miss_req = true; 1216 1235 } 1217 else if (cache_state == CACHE_SLOT_STATE_ZOMBI ) 1236 else if (cache_state == CACHE_SLOT_STATE_ZOMBI ) // pending cleanup 1218 1237 { 1219 1238 // stalled until cleanup is acknowledged 1220 1239 r_icache_fsm = ICACHE_IDLE; 1221 1240 } 1222 else 1241 else // cache hit 1223 1242 { 1224 1243 … … 1237 1256 } 1238 1257 } 1239 else 1258 else // non cacheable read 1240 1259 { 1241 1260 r_icache_unc_req = true; … … 1255 1274 } 1256 1275 ///////////////////// 1257 case ICACHE_TLB_WAIT: 1276 case ICACHE_TLB_WAIT: // Waiting the itlb update by the DCACHE FSM after a tlb miss 1258 1277 // the itlb is udated by the DCACHE FSM, as well as the 1259 1278 // r_mmu_ietr and r_mmu_ibvar registers in case of error. … … 1290 1309 r_icache_fsm = ICACHE_IDLE; 1291 1310 } 1292 else 1311 else // tlb updated : return to IDLE state 1293 1312 { 1294 1313 r_icache_fsm = ICACHE_IDLE; … … 1298 1317 } 1299 1318 ////////////////////////// 1300 case ICACHE_XTN_TLB_FLUSH: 1319 case ICACHE_XTN_TLB_FLUSH: // invalidate in one cycle all non global TLB entries 1301 1320 { 1302 1321 r_itlb.flush(); … … 1306 1325 } 1307 1326 //////////////////////////// 1308 case ICACHE_XTN_CACHE_FLUSH: 1327 case ICACHE_XTN_CACHE_FLUSH: // Invalidate sequencially all cache lines, using 1309 1328 // r_icache_flush_count as a slot counter, 1310 1329 // looping in this state until all slots are visited. 1311 1330 // It can require two cycles per slot: 1312 1331 // We test here the slot state, and make the actual inval 1313 1332 // (if line is valid) in ICACHE_XTN_CACHE_FLUSH_GO state. 1314 1333 // A cleanup request is generated for each valid line 1315 1334 { 1316 1335 // coherence clack interrupt … … 1334 1353 int state; 1335 1354 paddr_t tag; 1336 size_t 1337 size_t 1355 size_t way = r_icache_flush_count.read()/m_icache_sets; 1356 size_t set = r_icache_flush_count.read()%m_icache_sets; 1338 1357 1339 1358 #ifdef INSTRUMENTATION … … 1361 1380 (m_icache_sets*m_icache_ways - 1) ) // last slot 1362 1381 { 1363 1382 r_dcache_xtn_req = false; 1364 1383 m_drsp.valid = true; 1365 r_icache_fsm= ICACHE_IDLE;1384 r_icache_fsm = ICACHE_IDLE; 1366 1385 } 1367 1386 … … 1376 1395 } 1377 1396 /////////////////////////////// 1378 case ICACHE_XTN_CACHE_FLUSH_GO: 1379 { 1380 size_t 1381 size_t 1397 case ICACHE_XTN_CACHE_FLUSH_GO: // Switch slot state to ZOMBI for an XTN flush 1398 { 1399 size_t way = r_icache_miss_way.read(); 1400 size_t set = r_icache_miss_set.read(); 1382 1401 1383 1402 #ifdef INSTRUMENTATION … … 1392 1411 (m_icache_sets*m_icache_ways - 1) ) // last slot 1393 1412 { 1394 1413 r_dcache_xtn_req = false; 1395 1414 m_drsp.valid = true; 1396 r_icache_fsm= ICACHE_IDLE;1415 r_icache_fsm = ICACHE_IDLE; 1397 1416 } 1398 1417 else … … 1404 1423 1405 1424 ////////////////////////// 1406 case ICACHE_XTN_TLB_INVAL: 1407 1425 case ICACHE_XTN_TLB_INVAL: // invalidate one TLB entry selected by the virtual address 1426 // stored in the r_dcache_save_wdata register 1408 1427 { 1409 1428 r_itlb.inval(r_dcache_save_wdata.read()); … … 1413 1432 } 1414 1433 /////////////////////////////// 1415 case ICACHE_XTN_CACHE_INVAL_VA: 1434 case ICACHE_XTN_CACHE_INVAL_VA: // Selective cache line invalidate with virtual address 1416 1435 // requires 3 cycles (in case of hit on itlb and icache). 1417 1418 1419 { 1420 paddr_t 1421 bool 1436 // In this state, access TLB to translate virtual address 1437 // stored in the r_dcache_save_wdata register. 1438 { 1439 paddr_t paddr; 1440 bool hit; 1422 1441 1423 1442 // read physical address in TLB when MMU activated 1424 if ( r_mmu_mode.read() & INS_TLB_MASK ) 1443 if ( r_mmu_mode.read() & INS_TLB_MASK ) // itlb activated 1425 1444 { 1426 1445 … … 1431 1450 &paddr); 1432 1451 } 1433 else 1434 { 1435 paddr 1436 hit 1437 } 1438 1439 if ( hit ) 1452 else // itlb not activated 1453 { 1454 paddr = (paddr_t)r_dcache_save_wdata.read(); 1455 hit = true; 1456 } 1457 1458 if ( hit ) // continue the selective inval process 1440 1459 { 1441 1460 r_icache_vci_paddr = paddr; 1442 1461 r_icache_fsm = ICACHE_XTN_CACHE_INVAL_PA; 1443 1462 } 1444 else 1463 else // miss : send a request to DCACHE FSM 1445 1464 { 1446 1465 … … 1449 1468 #endif 1450 1469 r_icache_tlb_miss_req = true; 1451 1470 r_icache_vaddr_save = r_dcache_save_wdata.read(); 1452 1471 r_icache_fsm = ICACHE_TLB_WAIT; 1453 1472 } … … 1455 1474 } 1456 1475 /////////////////////////////// 1457 case ICACHE_XTN_CACHE_INVAL_PA: 1476 case ICACHE_XTN_CACHE_INVAL_PA: // selective invalidate cache line with physical address 1458 1477 // require 2 cycles. In this state, we read directory 1459 1478 // with address stored in r_icache_vci_paddr register. 1460 1479 { 1461 1480 int state; 1462 size_t 1463 size_t 1464 size_t 1481 size_t way; 1482 size_t set; 1483 size_t word; 1465 1484 1466 1485 #ifdef INSTRUMENTATION … … 1473 1492 &word); 1474 1493 1475 if ( state == CACHE_SLOT_STATE_VALID ) 1494 if ( state == CACHE_SLOT_STATE_VALID ) // inval to be done 1476 1495 { 1477 1496 r_icache_miss_way = way; … … 1479 1498 r_icache_fsm = ICACHE_XTN_CACHE_INVAL_GO; 1480 1499 } 1481 else 1500 else // miss : acknowlege the XTN request and return 1482 1501 { 1483 1502 r_dcache_xtn_req = false; … … 1543 1562 bool found; 1544 1563 bool cleanup; 1545 size_t 1546 size_t 1547 paddr_t 1564 size_t way; 1565 size_t set; 1566 paddr_t victim; 1548 1567 1549 1568 #ifdef INSTRUMENTATION … … 1603 1622 } 1604 1623 /////////////////////// 1605 case ICACHE_MISS_CLEAN: 1624 case ICACHE_MISS_CLEAN: // switch the slot to zombi state 1606 1625 { 1607 1626 if (m_ireq.valid) m_cost_ins_miss_frz++; … … 1627 1646 } 1628 1647 ////////////////////// 1629 case ICACHE_MISS_WAIT: 1648 case ICACHE_MISS_WAIT: // waiting response from VCI_RSP FSM 1630 1649 { 1631 1650 if (m_ireq.valid) m_cost_ins_miss_frz++; … … 1674 1693 } 1675 1694 /////////////////////////// 1676 case ICACHE_MISS_DATA_UPDT: 1695 case ICACHE_MISS_DATA_UPDT: // update the cache (one word per cycle) 1677 1696 { 1678 1697 if ( m_ireq.valid ) m_cost_ins_miss_frz++; 1679 1698 1680 if ( r_vci_rsp_fifo_icache.rok() ) 1699 if ( r_vci_rsp_fifo_icache.rok() ) // response available 1681 1700 { 1682 1701 … … 1702 1721 r_icache_miss_word = r_icache_miss_word.read() + 1; 1703 1722 1704 if ( r_icache_miss_word.read() == m_icache_words-1 ) 1723 if ( r_icache_miss_word.read() == m_icache_words-1 ) // last word 1705 1724 { 1706 1725 r_icache_fsm = ICACHE_MISS_DIR_UPDT; … … 1710 1729 } 1711 1730 ////////////////////////// 1712 case ICACHE_MISS_DIR_UPDT: 1731 case ICACHE_MISS_DIR_UPDT: // Stalled if a victim line has been evicted, 1713 1732 // and the cleanup ack has not been received, 1714 1733 // as indicated by r_icache_miss_clack. … … 1807 1826 } 1808 1827 //////////////////// 1809 case ICACHE_UNC_WAIT: 1828 case ICACHE_UNC_WAIT: // waiting a response to an uncacheable read from VCI_RSP FSM 1810 1829 { 1811 1830 // coherence clack interrupt … … 1848 1867 } 1849 1868 ///////////////////// 1850 case ICACHE_CC_CHECK: 1869 case ICACHE_CC_CHECK: // This state is the entry point of a sub-fsm 1851 1870 // handling coherence requests. 1852 1871 // if there is a matching pending miss, it is … … 1938 1957 // CC request handler 1939 1958 1940 int 1941 size_t 1942 size_t 1943 size_t 1959 int state = 0; 1960 size_t way = 0; 1961 size_t set = 0; 1962 size_t word = 0; 1944 1963 1945 1964 #ifdef INSTRUMENTATION … … 1989 2008 } 1990 2009 ///////////////////// 1991 case ICACHE_CC_INVAL: 2010 case ICACHE_CC_INVAL: // hit inval : switch slot to ZOMBI state 1992 2011 { 1993 2012 assert (not r_icache_cc_send_req.read() && … … 2025 2044 } 2026 2045 //////////////////// 2027 case ICACHE_CC_UPDT: 2046 case ICACHE_CC_UPDT: // hit update : write one word per cycle 2028 2047 { 2029 2048 assert (not r_icache_cc_send_req.read() && … … 2064 2083 } 2065 2084 2066 if ( r_cc_receive_updt_fifo_eop.read() ) 2085 if ( r_cc_receive_updt_fifo_eop.read() ) // last word 2067 2086 { 2068 2087 // no need to write in the cache anymore … … 2204 2223 { 2205 2224 paddr_t paddr; // physical address 2206 pte_info_t 2207 size_t 2208 size_t 2209 paddr_t 2210 size_t 2211 size_t 2212 size_t 2213 uint32_t 2214 bool 2215 int 2225 pte_info_t tlb_flags; 2226 size_t tlb_way; 2227 size_t tlb_set; 2228 paddr_t tlb_nline = 0; 2229 size_t cache_way; 2230 size_t cache_set; 2231 size_t cache_word; 2232 uint32_t cache_rdata = 0; 2233 bool tlb_hit = false; 2234 int cache_state = CACHE_SLOT_STATE_EMPTY; 2216 2235 2217 2236 bool tlb_inval_required = false; // request TLB inval after cache update … … 2301 2320 { 2302 2321 tlb_inval_required = true; 2303 2304 2322 r_dcache_tlb_inval_set = 0; 2323 r_dcache_tlb_inval_line = r_dcache_save_paddr.read()>> 2305 2324 (uint32_log2(m_dcache_words<<2)); 2306 2325 r_dcache_in_tlb[way*m_dcache_sets+set] = false; 2307 2326 } 2308 2327 else if ( r_dcache_contains_ptd[way*m_dcache_sets+set] ) … … 2310 2329 r_itlb.reset(); 2311 2330 r_dtlb.reset(); 2312 2331 r_dcache_contains_ptd[way*m_dcache_sets+set] = false; 2313 2332 } 2314 2333 … … 2467 2486 break; 2468 2487 2488 case iss_t::XTN_INST_PADDR_EXT: 2489 m_drsp.rdata = r_icache_paddr_ext.read(); 2490 m_drsp.valid = true; 2491 m_drsp.error = false; 2492 break; 2493 2469 2494 default: 2470 2495 r_mmu_detr = MMU_READ_UNDEFINED_XTN; … … 2508 2533 switch( xtn_opcode ) 2509 2534 { 2510 case iss_t::XTN_PTPR: 2535 case iss_t::XTN_PTPR: // itlb & dtlb must be flushed 2511 2536 r_dcache_xtn_req = true; 2512 2537 r_dcache_fsm = DCACHE_XTN_SWITCH; 2513 2538 break; 2514 2539 2515 case iss_t::XTN_TLB_MODE: 2540 case iss_t::XTN_TLB_MODE: // no cache or tlb access 2516 2541 r_mmu_mode = m_dreq.wdata; 2517 2542 m_drsp.valid = true; … … 2519 2544 break; 2520 2545 2521 case iss_t::XTN_DTLB_INVAL: 2546 case iss_t::XTN_DTLB_INVAL: // dtlb access 2522 2547 r_dcache_fsm = DCACHE_XTN_DT_INVAL; 2523 2548 break; 2524 2549 2525 case iss_t::XTN_ITLB_INVAL: 2550 case iss_t::XTN_ITLB_INVAL: // itlb access 2526 2551 r_dcache_xtn_req = true; 2527 2552 r_dcache_fsm = DCACHE_XTN_IT_INVAL; 2528 2553 break; 2529 2554 2530 case iss_t::XTN_DCACHE_INVAL: 2555 case iss_t::XTN_DCACHE_INVAL: // dcache, dtlb & itlb access 2531 2556 r_dcache_fsm = DCACHE_XTN_DC_INVAL_VA; 2532 2557 break; 2533 2558 2534 case iss_t::XTN_MMU_DCACHE_PA_INV: 2559 case iss_t::XTN_MMU_DCACHE_PA_INV: // dcache, dtlb & itlb access 2535 2560 r_dcache_fsm = DCACHE_XTN_DC_INVAL_PA; 2536 2561 if (sizeof(paddr_t) <= 32) … … 2547 2572 break; 2548 2573 2549 case iss_t::XTN_DCACHE_FLUSH: 2574 case iss_t::XTN_DCACHE_FLUSH: // itlb and dtlb must be reset 2550 2575 r_dcache_flush_count = 0; 2551 2576 r_dcache_fsm = DCACHE_XTN_DC_FLUSH; 2552 2577 break; 2553 2578 2554 case iss_t::XTN_ICACHE_INVAL: 2579 case iss_t::XTN_ICACHE_INVAL: // icache and itlb access 2555 2580 r_dcache_xtn_req = true; 2556 2581 r_dcache_fsm = DCACHE_XTN_IC_INVAL_VA; 2557 2582 break; 2558 2583 2559 case iss_t::XTN_MMU_ICACHE_PA_INV: 2584 case iss_t::XTN_MMU_ICACHE_PA_INV: // icache access 2560 2585 r_dcache_xtn_req = true; 2561 2586 r_dcache_fsm = DCACHE_XTN_IC_INVAL_PA; 2562 2587 break; 2563 2588 2564 case iss_t::XTN_ICACHE_FLUSH: 2589 case iss_t::XTN_ICACHE_FLUSH: // icache access 2565 2590 r_dcache_xtn_req = true; 2566 2591 r_dcache_fsm = DCACHE_XTN_IC_FLUSH; 2567 2592 break; 2568 2593 2569 case iss_t::XTN_SYNC: 2594 case iss_t::XTN_SYNC: // wait until write buffer empty 2570 2595 r_dcache_fsm = DCACHE_XTN_SYNC; 2571 2596 break; 2572 2597 2573 case iss_t::XTN_MMU_WORD_LO: 2598 case iss_t::XTN_MMU_WORD_LO: // no cache or tlb access 2574 2599 r_mmu_word_lo = m_dreq.wdata; 2575 2600 m_drsp.valid = true; … … 2577 2602 break; 2578 2603 2579 case iss_t::XTN_MMU_WORD_HI: 2604 case iss_t::XTN_MMU_WORD_HI: // no cache or tlb access 2580 2605 r_mmu_word_hi = m_dreq.wdata; 2581 2606 m_drsp.valid = true; … … 2595 2620 break; 2596 2621 2597 case iss_t::XTN_ICACHE_PREFETCH: // not implemented : no action 2598 case iss_t::XTN_DCACHE_PREFETCH: // not implemented : no action 2622 case iss_t::XTN_INST_PADDR_EXT: // no cache or tlb access 2623 r_dcache_xtn_req = true; 2624 r_dcache_fsm = DCACHE_XTN_IC_PADDR_EXT; 2625 break; 2626 2627 case iss_t::XTN_ICACHE_PREFETCH: // not implemented : no action 2628 case iss_t::XTN_DCACHE_PREFETCH: // not implemented : no action 2599 2629 m_drsp.valid = true; 2600 2630 r_dcache_fsm = DCACHE_IDLE; 2601 2631 break; 2602 2632 2603 2633 default: … … 2622 2652 else 2623 2653 { 2624 bool 2625 bool 2626 2627 if ( not (r_mmu_mode.read() & DATA_TLB_MASK) ) 2654 bool valid_req; 2655 bool cacheable; 2656 2657 if ( not (r_mmu_mode.read() & DATA_TLB_MASK) ) // dtlb not activated 2628 2658 { 2629 2659 valid_req = true; … … 2632 2662 else cacheable = m_cacheability_table[(uint64_t)m_dreq.addr]; 2633 2663 } 2634 else 2664 else // dtlb activated 2635 2665 { 2636 if ( tlb_hit ) 2666 if ( tlb_hit ) // tlb hit 2637 2667 { 2638 2668 // cacheability … … 2684 2714 } 2685 2715 } 2686 else 2716 else // tlb miss 2687 2717 { 2688 2718 valid_req = false; … … 2693 2723 } // end DTLB activated 2694 2724 2695 if ( valid_req ) 2725 if ( valid_req ) // processor request is valid (after MMU check) 2696 2726 { 2697 2727 // READ request … … 2702 2732 and not r_dcache_updt_req.read() ) 2703 2733 { 2704 if ( cacheable ) 2734 if ( cacheable ) // cacheable read 2705 2735 { 2706 2736 if ( cache_state == CACHE_SLOT_STATE_EMPTY ) // cache miss … … 2749 2779 } 2750 2780 } 2751 else 2781 else // uncacheable read 2752 2782 { 2753 2783 r_dcache_vci_paddr = paddr; … … 2794 2824 { 2795 2825 if ( (r_mmu_mode.read() & DATA_TLB_MASK ) 2796 and not tlb_flags.d ) 2826 and not tlb_flags.d ) // Dirty bit must be set 2797 2827 { 2798 2828 // The PTE physical address is obtained from the nline value (dtlb), 2799 2829 // and from the virtual address (word index) 2800 if ( tlb_flags.b ) 2830 if ( tlb_flags.b ) // PTE1 2801 2831 { 2802 2832 r_dcache_dirty_paddr = (paddr_t)(tlb_nline*(m_dcache_words<<2)) | 2803 2833 (paddr_t)((m_dreq.addr>>19) & 0x3c); 2804 2834 } 2805 else 2835 else // PTE2 2806 2836 { 2807 2837 r_dcache_dirty_paddr = (paddr_t)(tlb_nline*(m_dcache_words<<2)) | … … 2810 2840 r_dcache_fsm = DCACHE_DIRTY_GET_PTE; 2811 2841 } 2812 else 2842 else // Write request accepted 2813 2843 { 2814 2844 #ifdef INSTRUMENTATION … … 2852 2882 { 2853 2883 if ( (r_mmu_mode.read() & DATA_TLB_MASK ) 2854 and not tlb_flags.d ) 2884 and not tlb_flags.d ) // Dirty bit must be set 2855 2885 { 2856 2886 // The PTE physical address is obtained from the nline value (dtlb), 2857 2887 // and the word index (virtual address) 2858 if ( tlb_flags.b ) 2888 if ( tlb_flags.b ) // PTE1 2859 2889 { 2860 2890 r_dcache_dirty_paddr = (paddr_t)(tlb_nline*(m_dcache_words<<2)) | 2861 2891 (paddr_t)((m_dreq.addr>>19) & 0x3c); 2862 2892 } 2863 else 2893 else // PTE2 2864 2894 { 2865 2895 r_dcache_dirty_paddr = (paddr_t)(tlb_nline*(m_dcache_words<<2)) | … … 2871 2901 m_drsp.rdata = 0; 2872 2902 } 2873 else 2903 else // SC request accepted 2874 2904 { 2875 2905 #ifdef INSTRUMENTATION … … 2888 2918 else // local fail 2889 2919 { 2890 2891 2892 2920 m_drsp.valid = true; 2921 m_drsp.error = false; 2922 m_drsp.rdata = 0x1; 2893 2923 } 2894 2924 } … … 2899 2929 2900 2930 // itlb miss request 2901 2931 else if ( r_icache_tlb_miss_req.read() and not wbuf_write_miss ) 2902 2932 { 2903 2933 r_dcache_tlb_ins = true; … … 2925 2955 // r_mmu_ins_* or r_mmu_data* error reporting registers. 2926 2956 { 2927 uint32_t 2928 bool 2929 paddr_t 2957 uint32_t ptba = 0; 2958 bool bypass; 2959 paddr_t pte_paddr; 2930 2960 2931 2961 // evaluate bypass in order to skip first level page table access 2932 if ( r_dcache_tlb_ins.read() ) 2962 if ( r_dcache_tlb_ins.read() ) // itlb miss 2933 2963 { 2934 2964 bypass = r_itlb.get_bypass(r_dcache_tlb_vaddr.read(), &ptba); 2935 2965 } 2936 else 2966 else // dtlb miss 2937 2967 { 2938 2968 bypass = r_dtlb.get_bypass(r_dcache_tlb_vaddr.read(), &ptba); … … 2971 3001 } 2972 3002 ///////////////////////// 2973 case DCACHE_TLB_PTE1_GET: 3003 case DCACHE_TLB_PTE1_GET: // try to read a PT1 entry in dcache 2974 3004 { 2975 3005 // coherence clack request (from DSPIN CLACK) … … 2989 3019 } 2990 3020 2991 uint32_t 2992 size_t 2993 size_t 2994 size_t 3021 uint32_t entry; 3022 size_t way; 3023 size_t set; 3024 size_t word; 2995 3025 int cache_state; 2996 3026 r_dcache.read( r_dcache_tlb_paddr.read(), … … 3006 3036 if ( cache_state == CACHE_SLOT_STATE_VALID ) // hit in dcache 3007 3037 { 3008 if ( not (entry & PTE_V_MASK) ) 3038 if ( not (entry & PTE_V_MASK) ) // unmapped 3009 3039 { 3010 3040 if ( r_dcache_tlb_ins.read() ) … … 3038 3068 3039 3069 } 3040 else if( entry & PTE_T_MASK ) 3070 else if( entry & PTE_T_MASK ) // PTD : me must access PT2 3041 3071 { 3042 3072 // mark the cache line ac containing a PTD … … 3044 3074 3045 3075 // register bypass 3046 if ( r_dcache_tlb_ins.read() ) 3076 if ( r_dcache_tlb_ins.read() ) // itlb 3047 3077 { 3048 3078 r_itlb.set_bypass(r_dcache_tlb_vaddr.read(), … … 3050 3080 r_dcache_tlb_paddr.read() / (m_icache_words<<2) ); 3051 3081 } 3052 else 3082 else // dtlb 3053 3083 { 3054 3084 r_dtlb.set_bypass(r_dcache_tlb_vaddr.read(), … … 3074 3104 #endif 3075 3105 } 3076 else 3106 else // PTE1 : we must update the TLB 3077 3107 { 3078 3108 r_dcache_in_tlb[m_icache_sets*way+set] = true; … … 3102 3132 r_dcache_fsm = DCACHE_TLB_PTE1_GET; 3103 3133 } 3104 else 3134 else // we must load the missing cache line in dcache 3105 3135 { 3106 3136 r_dcache_vci_miss_req = true; … … 3122 3152 } 3123 3153 //////////////////////////// 3124 case DCACHE_TLB_PTE1_SELECT: 3125 { 3126 size_t 3127 size_t 3154 case DCACHE_TLB_PTE1_SELECT: // select a slot for PTE1 3155 { 3156 size_t way; 3157 size_t set; 3128 3158 3129 3159 if ( r_dcache_tlb_ins.read() ) … … 3167 3197 } 3168 3198 ////////////////////////// 3169 case DCACHE_TLB_PTE1_UPDT: 3199 case DCACHE_TLB_PTE1_UPDT: // write a new PTE1 in tlb after testing the L/R bit 3170 3200 // - if L/R bit already set, exit the sub-fsm. 3171 3201 // - if not, we update the page table but we dont write … … 3173 3203 // the coherence mechanism. 3174 3204 { 3175 paddr_t 3205 paddr_t nline = r_dcache_tlb_paddr.read() >> (uint32_log2(m_dcache_words)+2); 3176 3206 uint32_t pte = r_dcache_tlb_pte_flags.read(); 3177 bool 3178 bool 3207 bool pt_updt = false; 3208 bool local = true; 3179 3209 3180 3210 // We should compute the access locality: … … 3185 3215 // As long as this computation is not done, all access are local. 3186 3216 3187 if ( local ) 3217 if ( local ) // local access 3188 3218 { 3189 3219 if ( not ((pte & PTE_L_MASK) == PTE_L_MASK) ) // we must set the L bit … … 3196 3226 } 3197 3227 } 3198 else 3228 else // remote access 3199 3229 { 3200 3230 if ( not ((pte & PTE_R_MASK) == PTE_R_MASK) ) // we must set the R bit … … 3208 3238 } 3209 3239 3210 if ( not pt_updt ) 3240 if ( not pt_updt ) // update TLB and return 3211 3241 { 3212 3242 if ( r_dcache_tlb_ins.read() ) 3213 3243 { 3214 r_itlb.write( true, 3244 r_itlb.write( true, // 2M page 3215 3245 pte, 3216 0, 3246 0, // argument unused for a PTE1 3217 3247 r_dcache_tlb_vaddr.read(), 3218 3248 r_dcache_tlb_way.read(), … … 3236 3266 else 3237 3267 { 3238 r_dtlb.write( true, 3268 r_dtlb.write( true, // 2M page 3239 3269 pte, 3240 0, 3270 0, // argument unused for a PTE1 3241 3271 r_dcache_tlb_vaddr.read(), 3242 3272 r_dcache_tlb_way.read(), … … 3276 3306 } 3277 3307 ///////////////////////// 3278 case DCACHE_TLB_PTE2_GET: 3308 case DCACHE_TLB_PTE2_GET: // Try to get a PTE2 (64 bits) in the dcache 3279 3309 { 3280 3310 // coherence clack request (from DSPIN CLACK) … … 3294 3324 } 3295 3325 3296 uint32_t 3297 uint32_t 3298 size_t 3299 size_t 3300 size_t 3326 uint32_t pte_flags; 3327 uint32_t pte_ppn; 3328 size_t way; 3329 size_t set; 3330 size_t word; 3301 3331 int cache_state; 3302 3332 … … 3314 3344 if ( cache_state == CACHE_SLOT_STATE_VALID ) // hit in dcache 3315 3345 { 3316 if ( not (pte_flags & PTE_V_MASK) ) 3346 if ( not (pte_flags & PTE_V_MASK) ) // unmapped 3317 3347 { 3318 3348 if ( r_dcache_tlb_ins.read() ) … … 3342 3372 #endif 3343 3373 } 3344 else 3374 else // mapped : we must update the TLB 3345 3375 { 3346 3376 r_dcache_in_tlb[m_dcache_sets*way+set] = true; … … 3405 3435 { 3406 3436 r_itlb.select( r_dcache_tlb_vaddr.read(), 3407 false, 3437 false, // PTE2 3408 3438 &way, 3409 3439 &set ); … … 3415 3445 { 3416 3446 r_dtlb.select( r_dcache_tlb_vaddr.read(), 3417 false, 3447 false, // PTE2 3418 3448 &way, 3419 3449 &set ); … … 3442 3472 } 3443 3473 ////////////////////////// 3444 case DCACHE_TLB_PTE2_UPDT: 3474 case DCACHE_TLB_PTE2_UPDT: // write a new PTE2 in tlb after testing the L/R bit 3445 3475 // - if L/R bit already set, exit the sub-fsm. 3446 3476 // - if not, we update the page table but we dont write … … 3448 3478 // the coherence mechanism. 3449 3479 { 3450 paddr_t 3480 paddr_t nline = r_dcache_tlb_paddr.read() >> (uint32_log2(m_dcache_words)+2); 3451 3481 uint32_t pte_flags = r_dcache_tlb_pte_flags.read(); 3452 3482 uint32_t pte_ppn = r_dcache_tlb_pte_ppn.read(); … … 3461 3491 // As long as this computation is not done, all access are local. 3462 3492 3463 if ( local ) 3493 if ( local ) // local access 3464 3494 { 3465 3495 if ( not ((pte_flags & PTE_L_MASK) == PTE_L_MASK) ) // we must set the L bit … … 3469 3499 r_dcache_vci_cas_new = pte_flags | PTE_L_MASK; 3470 3500 pte_flags = pte_flags | PTE_L_MASK; 3471 3501 r_dcache_tlb_pte_flags = pte_flags; 3472 3502 } 3473 3503 } … … 3480 3510 r_dcache_vci_cas_new = pte_flags | PTE_R_MASK; 3481 3511 pte_flags = pte_flags | PTE_R_MASK; 3482 3512 r_dcache_tlb_pte_flags = pte_flags; 3483 3513 } 3484 3514 } … … 3488 3518 if ( r_dcache_tlb_ins.read() ) 3489 3519 { 3490 r_itlb.write( false, 3520 r_itlb.write( false, // 4K page 3491 3521 pte_flags, 3492 3522 pte_ppn, … … 3512 3542 else 3513 3543 { 3514 r_dtlb.write( false, 3544 r_dtlb.write( false, // 4K page 3515 3545 pte_flags, 3516 3546 pte_ppn, … … 3539 3569 else // update page table but not TLB 3540 3570 { 3541 r_dcache_fsm = DCACHE_TLB_LR_UPDT; 3571 r_dcache_fsm = DCACHE_TLB_LR_UPDT; // dcache and page table update 3542 3572 3543 3573 #if DEBUG_DCACHE … … 3574 3604 } 3575 3605 //////////////////////// 3576 case DCACHE_TLB_LR_WAIT: 3606 case DCACHE_TLB_LR_WAIT: // Waiting the response to SC transaction for DIRTY bit. 3577 3607 // We consume the response in rsp FIFO, 3578 3608 // and exit the sub-fsm, but we don't … … 3599 3629 } 3600 3630 3601 if ( r_vci_rsp_data_error.read() ) 3631 if ( r_vci_rsp_data_error.read() ) // bus error 3602 3632 { 3603 3633 std::cout << "BUS ERROR in DCACHE_TLB_LR_WAIT state" << std::endl; … … 3620 3650 } 3621 3651 /////////////////////// 3622 case DCACHE_TLB_RETURN: 3652 case DCACHE_TLB_RETURN: // return to caller depending on tlb miss type 3623 3653 { 3624 3654 #if DEBUG_DCACHE … … 3634 3664 } 3635 3665 /////////////////////// 3636 case DCACHE_XTN_SWITCH: 3666 case DCACHE_XTN_SWITCH: // The r_ptpr registers must be written, 3637 3667 // and both itlb and dtlb must be flushed. 3638 3668 // Caution : the itlb miss requests must be taken … … 3676 3706 } 3677 3707 ///////////////////// 3678 case DCACHE_XTN_SYNC: 3708 case DCACHE_XTN_SYNC: // waiting until write buffer empty 3679 3709 // The coherence request must be taken 3680 3710 // as there is a risk of dead-lock … … 3704 3734 } 3705 3735 //////////////////////// 3706 case DCACHE_XTN_IC_FLUSH: // Waiting completion of an XTN request to the ICACHE FSM 3707 case DCACHE_XTN_IC_INVAL_VA: // Caution : the itlb miss requests must be taken 3708 case DCACHE_XTN_IC_INVAL_PA: // because the XTN_ICACHE_INVAL request to icache 3709 case DCACHE_XTN_IT_INVAL: // can generate an itlb miss, 3710 // and because it can exist a simultaneous ITLB miss 3736 case DCACHE_XTN_IC_FLUSH: // Waiting completion of an XTN request to the ICACHE FSM 3737 case DCACHE_XTN_IC_INVAL_VA: // Caution : the itlb miss requests must be taken 3738 case DCACHE_XTN_IC_INVAL_PA: // because the XTN_ICACHE_INVAL request to icache 3739 case DCACHE_XTN_IC_PADDR_EXT: // can generate an itlb miss, 3740 case DCACHE_XTN_IT_INVAL: // and because it can exist a simultaneous ITLB miss 3741 3711 3742 { 3712 3743 // coherence clack request (from DSPIN CLACK) … … 3744 3775 } 3745 3776 ///////////////////////// 3746 case DCACHE_XTN_DC_FLUSH: 3777 case DCACHE_XTN_DC_FLUSH: // Invalidate sequencially all cache lines, using 3747 3778 // r_dcache_flush_count as a slot counter, 3748 3779 // looping in this state until all slots have been visited. … … 3845 3876 } 3846 3877 ///////////////////////// 3847 case DCACHE_XTN_DT_INVAL: 3878 case DCACHE_XTN_DT_INVAL: // handling processor XTN_DTLB_INVAL request 3848 3879 { 3849 3880 r_dtlb.inval(r_dcache_save_wdata.read()); … … 3855 3886 case DCACHE_XTN_DC_INVAL_VA: // selective cache line invalidate with virtual address 3856 3887 // requires 3 cycles: access tlb, read cache, inval cache 3857 3888 // we compute the physical address in this state 3858 3889 { 3859 3890 paddr_t paddr; 3860 3891 bool hit; 3861 3892 3862 if ( r_mmu_mode.read() & DATA_TLB_MASK ) 3893 if ( r_mmu_mode.read() & DATA_TLB_MASK ) // dtlb activated 3863 3894 { 3864 3895 … … 3869 3900 &paddr ); 3870 3901 } 3871 else 3902 else // dtlb not activated 3872 3903 { 3873 3904 paddr = (paddr_t)r_dcache_save_wdata.read(); … … 3877 3908 } 3878 3909 3879 if ( hit ) 3910 if ( hit ) // tlb hit 3880 3911 { 3881 3912 r_dcache_save_paddr = paddr; 3882 3913 r_dcache_fsm = DCACHE_XTN_DC_INVAL_PA; 3883 3914 } 3884 else 3885 3915 else // tlb miss 3916 { 3886 3917 3887 3918 #ifdef INSTRUMENTATION 3888 3919 m_cpt_dtlb_miss++; 3889 3920 #endif 3890 r_dcache_tlb_ins = false; 3921 r_dcache_tlb_ins = false; // dtlb 3891 3922 r_dcache_tlb_vaddr = r_dcache_save_wdata.read(); 3892 3923 r_dcache_fsm = DCACHE_TLB_MISS; … … 3910 3941 // In this state we read dcache. 3911 3942 { 3912 size_t 3913 size_t 3914 size_t 3943 size_t way; 3944 size_t set; 3945 size_t word; 3915 3946 int state; 3916 3947 … … 3924 3955 &word ); 3925 3956 3926 if ( state == CACHE_SLOT_STATE_VALID ) 3957 if ( state == CACHE_SLOT_STATE_VALID ) // inval to be done 3927 3958 { 3928 3959 r_dcache_xtn_way = way; … … 3930 3961 r_dcache_fsm = DCACHE_XTN_DC_INVAL_GO; 3931 3962 } 3932 else 3963 else // miss : nothing to do 3933 3964 { 3934 3965 r_dcache_fsm = DCACHE_IDLE; … … 3951 3982 //////////////////////////// 3952 3983 case DCACHE_XTN_DC_INVAL_GO: // In this state, we invalidate the cache line 3953 3984 // Blocked if previous cleanup not completed 3954 3985 // Test if itlb or dtlb inval is required 3955 3986 { 3956 3987 if ( not r_dcache_cc_send_req.read() ) // blocked until previous cc_send request is sent 3957 3988 { 3958 size_t 3959 size_t 3989 size_t way = r_dcache_xtn_way.read(); 3990 size_t set = r_dcache_xtn_set.read(); 3960 3991 paddr_t nline = r_dcache_save_paddr.read() / (m_dcache_words<<2); 3961 3992 … … 4004 4035 } 4005 4036 #endif 4006 4037 } 4007 4038 break; 4008 4039 } 4009 4040 ////////////////////////////// 4010 case DCACHE_XTN_DC_INVAL_END: 4041 case DCACHE_XTN_DC_INVAL_END: // send response to processor XTN request 4011 4042 { 4012 4043 r_dcache_fsm = DCACHE_IDLE; … … 4108 4139 } 4109 4140 /////////////////////// 4110 case DCACHE_MISS_CLEAN: 4141 case DCACHE_MISS_CLEAN: // switch the slot to ZOMBI state 4111 4142 // and possibly request itlb or dtlb invalidate 4112 4143 { … … 4160 4191 } 4161 4192 ////////////////////// 4162 case DCACHE_MISS_WAIT: 4193 case DCACHE_MISS_WAIT: // waiting the response to a miss request from VCI_RSP FSM 4163 4194 // This state is in charge of error signaling 4164 4195 // There is 5 types of error depending on the requester … … 4192 4223 } 4193 4224 4194 if ( r_vci_rsp_data_error.read() ) 4225 if ( r_vci_rsp_data_error.read() ) // bus error 4195 4226 { 4196 4227 switch ( r_dcache_miss_type.read() ) … … 4246 4277 r_vci_rsp_data_error = false; 4247 4278 } 4248 else if ( r_vci_rsp_fifo_dcache.rok() ) 4279 else if ( r_vci_rsp_fifo_dcache.rok() ) // valid response available 4249 4280 { 4250 4281 r_dcache_miss_word = 0; 4251 4282 r_dcache_fsm = DCACHE_MISS_DATA_UPDT; 4252 4283 } 4253 4284 break; 4254 4285 } 4255 4286 ////////////////////////// 4256 case DCACHE_MISS_DATA_UPDT: 4287 case DCACHE_MISS_DATA_UPDT: // update the dcache (one word per cycle) 4257 4288 { 4258 4289 if ( m_dreq.valid) m_cost_data_miss_frz++; 4259 4290 4260 if ( r_vci_rsp_fifo_dcache.rok() ) 4291 if ( r_vci_rsp_fifo_dcache.rok() ) // one word available 4261 4292 { 4262 4293 #ifdef INSTRUMENTATION … … 4407 4438 } 4408 4439 4409 if ( r_vci_rsp_data_error.read() ) 4440 if ( r_vci_rsp_data_error.read() ) // bus error 4410 4441 { 4411 4442 if(r_dcache_vci_unc_write.read()) … … 4421 4452 break; 4422 4453 } 4423 4424 4454 else if ( r_vci_rsp_fifo_dcache.rok() ) // data available 4455 { 4425 4456 // consume data 4426 4457 vci_rsp_fifo_dcache_get = true; … … 4430 4461 if ( m_dreq.valid and (m_dreq.addr == r_dcache_save_vaddr.read()) ) 4431 4462 { 4432 4463 m_drsp.valid = true; 4433 4464 m_drsp.error = false; 4434 4435 } 4436 4465 m_drsp.rdata = r_vci_rsp_fifo_dcache.read(); 4466 } 4467 } 4437 4468 break; 4438 4469 } … … 4456 4487 } 4457 4488 4458 if ( r_vci_rsp_data_error.read() ) 4489 if ( r_vci_rsp_data_error.read() ) // bus error 4459 4490 { 4460 4491 r_mmu_detr = MMU_READ_DATA_ILLEGAL_ACCESS; … … 4466 4497 break; 4467 4498 } 4468 4469 4499 else if ( r_vci_rsp_fifo_dcache.rok() ) // data available 4500 { 4470 4501 // consume data 4471 4502 vci_rsp_fifo_dcache_get = true; … … 4488 4519 r_dcache_fsm = DCACHE_IDLE; 4489 4520 } 4490 4521 } 4491 4522 break; 4492 4523 } 4493 4524 //////////////////// 4494 case DCACHE_SC_WAIT: 4525 case DCACHE_SC_WAIT: // waiting VCI response to a SC transaction 4495 4526 { 4496 4527 // coherence clack request (from DSPIN CLACK) … … 4510 4541 } 4511 4542 4512 if ( r_vci_rsp_data_error.read() ) 4543 if ( r_vci_rsp_data_error.read() ) // bus error 4513 4544 { 4514 4545 r_mmu_detr = MMU_READ_DATA_ILLEGAL_ACCESS; … … 4520 4551 break; 4521 4552 } 4522 4523 4553 else if ( r_vci_rsp_fifo_dcache.rok() ) // response available 4554 { 4524 4555 // consume response 4525 4556 vci_rsp_fifo_dcache_get = true; … … 4527 4558 m_drsp.rdata = r_vci_rsp_fifo_dcache.read(); 4528 4559 r_dcache_fsm = DCACHE_IDLE; 4529 4560 } 4530 4561 break; 4531 4562 } 4532 4563 ////////////////////////// 4533 case DCACHE_DIRTY_GET_PTE: 4564 case DCACHE_DIRTY_GET_PTE: // This sub_fsm set the PTE Dirty bit in memory 4534 4565 // before handling a processor WRITE or SC request 4535 4566 // Input argument is r_dcache_dirty_paddr … … 4937 4968 } 4938 4969 /////////////////////////// 4939 case DCACHE_INVAL_TLB_SCAN: 4970 case DCACHE_INVAL_TLB_SCAN: // Scan sequencially all sets for both ITLB & DTLB 4940 4971 // It makes assumption: m_itlb_sets == m_dtlb_sets 4941 4972 // All ways are handled in parallel. … … 4953 4984 // - r_dcache_fsm_scan_save 4954 4985 { 4955 paddr_t 4986 paddr_t line = r_dcache_tlb_inval_line.read(); 4956 4987 size_t set = r_dcache_tlb_inval_set.read(); 4957 4988 size_t way; … … 5010 5041 if ( (m_ireq.valid and not m_irsp.valid) or (m_dreq.valid and not m_drsp.valid) ) 5011 5042 { 5012 m_cpt_frz_cycles++; 5013 m_cpt_stop_simulation++; 5043 m_cpt_frz_cycles++; // used for instrumentation 5044 m_cpt_stop_simulation++; // used for debug 5014 5045 if ( m_cpt_stop_simulation > m_max_frozen_cycles ) 5015 5046 {
Note: See TracChangeset
for help on using the changeset viewer.