Changeset 205 for trunk/modules
- Timestamp:
- Mar 11, 2012, 6:42:17 PM (13 years ago)
- Location:
- trunk/modules/vci_cc_vcache_wrapper_v4/caba/source
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/modules/vci_cc_vcache_wrapper_v4/caba/source/include/vci_cc_vcache_wrapper_v4.h
r204 r205 100 100 DCACHE_TLB_PTE2_SELECT, 101 101 DCACHE_TLB_PTE2_UPDT, 102 DCACHE_TLB_ SC_UPDT,103 DCACHE_TLB_ SC_WAIT,102 DCACHE_TLB_LR_UPDT, 103 DCACHE_TLB_LR_WAIT, 104 104 DCACHE_TLB_RETURN, 105 105 // handling processor XTN requests … … 116 116 DCACHE_XTN_DC_INVAL_GO, 117 117 DCACHE_XTN_DT_INVAL, 118 //handling fourth stage write119 DCACHE_ WRITE_TLB_DIRTY,120 DCACHE_ WRITE_CACHE_DIRTY,121 DCACHE_ WRITE_SC_WAIT,122 DCACHE_ WRITE_UNC_WAIT,118 //handling long write (set dirty bit) 119 DCACHE_DIRTY_TLB_SET, 120 DCACHE_DIRTY_CACHE_SET, 121 DCACHE_DIRTY_SC_WAIT, 122 DCACHE_DIRTY_UNC_WAIT, 123 123 // handling processor miss requests 124 124 DCACHE_MISS_VICTIM, … … 129 129 // handling processor unc and sc requests 130 130 DCACHE_UNC_WAIT, 131 DCACHE_SC_WAIT, 131 132 // handling coherence requests 132 133 DCACHE_CC_CHECK, … … 263 264 264 265 //////////////////////////////////////// 265 // Variables used by print_trace()266 // Communication with processor ISS 266 267 //////////////////////////////////////// 267 268 bool m_ireq_valid; 269 uint32_t m_ireq_addr; 270 soclib::common::Iss2::ExecMode m_ireq_mode; 271 272 bool m_irsp_valid; 273 uint32_t m_irsp_instruction; 274 bool m_irsp_error; 275 276 bool m_dreq_valid; 277 uint32_t m_dreq_addr; 278 soclib::common::Iss2::ExecMode m_dreq_mode; 279 soclib::common::Iss2::DataOperationType m_dreq_type; 280 uint32_t m_dreq_wdata; 281 uint8_t m_dreq_be; 282 283 bool m_drsp_valid; 284 uint32_t m_drsp_rdata; 285 bool m_drsp_error; 268 typename iss_t::InstructionRequest m_ireq; 269 typename iss_t::InstructionResponse m_irsp; 270 typename iss_t::DataRequest m_dreq; 271 typename iss_t::DataResponse m_drsp; 286 272 287 273 ///////////////////////////////////////////// … … 379 365 sc_signal<bool> r_dcache_p1_tlb_big; // big page bit (from dtlb) 380 366 // registers written in P2 stage (used in long write) 381 sc_signal<uint32_t> r_dcache_p2_vaddr; // virtual address (from proc) 382 sc_signal<size_t> r_dcache_p2_tlb_way; // selected way in dtlb 383 sc_signal<size_t> r_dcache_p2_tlb_set; // selected set in dtlb 384 sc_signal<bool> r_dcache_p2_set_dirty; // PTE dirty bit must be set 367 sc_signal<size_t> r_dcache_p2_way; // selected way in dtlb or dcache 368 sc_signal<size_t> r_dcache_p2_set; // selected set in dtlb or dcache 369 sc_signal<size_t> r_dcache_p2_word; // selected word in dcache 385 370 sc_signal<paddr_t> r_dcache_p2_pte_paddr; // PTE physical address 386 sc_signal<size_t> r_dcache_p2_pte_way; // selected way in dcache 387 sc_signal<size_t> r_dcache_p2_pte_set; // selected set in dcache 388 sc_signal<size_t> r_dcache_p2_pte_word; // selected word in dcache 389 sc_signal<size_t> r_dcache_p2_pte; // pte value read in dcache 371 sc_signal<size_t> r_dcache_p2_pte_value; // PTE value 372 sc_signal<bool> r_dcache_p2_type_sc; // request type (WRITE or SC) 373 sc_signal<bool> r_dcache_p2_sc_success; // successful SC request 390 374 391 375 // communication between DCACHE FSM and VCI_CMD FSM … … 406 390 407 391 // handling dcache miss 408 sc_signal<int> r_dcache_miss_type;// type of miss depending on the requester392 sc_signal<int> r_dcache_miss_type; // type of miss depending on the requester 409 393 sc_signal<size_t> r_dcache_miss_word; // word index for sequencial cache update 410 394 sc_signal<size_t> r_dcache_miss_way; // selected way for cache update -
trunk/modules/vci_cc_vcache_wrapper_v4/caba/source/src/vci_cc_vcache_wrapper_v4.cpp
r204 r205 71 71 "DCACHE_TLB_PTE2_SELECT", 72 72 "DCACHE_TLB_PTE2_UPDT", 73 "DCACHE_TLB_ SC_UPDT",74 "DCACHE_TLB_ SC_WAIT",73 "DCACHE_TLB_LR_UPDT", 74 "DCACHE_TLB_LR_WAIT", 75 75 "DCACHE_TLB_RETURN", 76 76 … … 88 88 "DCACHE_XTN_DT_INVAL", 89 89 90 "DCACHE_ WRITE_TLB_DIRTY",91 "DCACHE_ WRITE_CACHE_DIRTY",92 "DCACHE_ WRITE_SC_WAIT",93 "DCACHE_ WRITE_UNC_WAIT",90 "DCACHE_DIRTY_TLB_SET", 91 "DCACHE_DIRTY_CACHE_SET", 92 "DCACHE_DIRTY_SC_WAIT", 93 "DCACHE_DIRTY_UNC_WAIT", 94 94 95 95 "DCACHE_MISS_VICTIM", … … 100 100 101 101 "DCACHE_UNC_WAIT", 102 "DCACHE_SC_WAIT", 102 103 103 104 "DCACHE_CC_CHECK", … … 273 274 r_dcache_p1_tlb_big("r_dcache_p1_tlb_big"), 274 275 275 r_dcache_p2_vaddr("r_dcache_p2_vaddr"), 276 r_dcache_p2_tlb_way("r_dcache_p2_tlb_way"), 277 r_dcache_p2_tlb_set("r_dcache_p2_tlb_set"), 278 r_dcache_p2_set_dirty("r_dcache_p2_set_dirty"), 276 r_dcache_p2_way("r_dcache_p2_way"), 277 r_dcache_p2_set("r_dcache_p2_set"), 278 r_dcache_p2_word("r_dcache_p2_word"), 279 279 r_dcache_p2_pte_paddr("r_dcache_p2_pte_paddr"), 280 r_dcache_p2_pte_way("r_dcache_p2_pte_way"), 281 r_dcache_p2_pte_set("r_dcache_p2_pte_set"), 282 r_dcache_p2_pte_word("r_dcache_p2_pte_word"), 283 r_dcache_p2_pte("r_dcache_p2_pte"), 280 r_dcache_p2_pte_value("r_dcache_p2_pte_value"), 281 r_dcache_p2_type_sc("r_dcache_p2_type_sc"), 282 r_dcache_p2_sc_success("r_dcache_p2_sc_success"), 284 283 285 284 r_dcache_vci_paddr("r_dcache_vci_paddr"), … … 442 441 //////////////////////////////////// 443 442 { 444 // b0 : write buffer printtrace443 // b0 : write buffer trace 445 444 // b1 : write buffer verbose 446 // b2 : dcache print trace 447 // b3 : icache print trace 448 449 typename iss_t::InstructionRequest ireq; 450 typename iss_t::InstructionResponse irsp; 451 typename iss_t::DataRequest dreq; 452 typename iss_t::DataResponse drsp; 453 454 ireq.valid = m_ireq_valid; 455 ireq.addr = m_ireq_addr; 456 ireq.mode = m_ireq_mode; 457 458 irsp.valid = m_irsp_valid; 459 irsp.instruction = m_irsp_instruction; 460 irsp.error = m_irsp_error; 461 462 dreq.valid = m_dreq_valid; 463 dreq.addr = m_dreq_addr; 464 dreq.mode = m_dreq_mode; 465 dreq.type = m_dreq_type; 466 dreq.wdata = m_dreq_wdata; 467 dreq.be = m_dreq_be; 468 469 drsp.valid = m_drsp_valid; 470 drsp.rdata = m_drsp_rdata; 471 drsp.error = m_drsp_error; 445 // b2 : dcache trace 446 // b3 : icache trace 447 // b4 : dtlb trace 448 // b5 : itlb trace 472 449 473 450 std::cout << std::dec << "PROC " << name() << std::endl; 474 451 475 std::cout << " " << ireq << std::endl;476 std::cout << " " << irsp << std::endl;477 std::cout << " " << dreq << std::endl;478 std::cout << " " << drsp << std::endl;452 std::cout << " " << m_ireq << std::endl; 453 std::cout << " " << m_irsp << std::endl; 454 std::cout << " " << m_dreq << std::endl; 455 std::cout << " " << m_drsp << std::endl; 479 456 480 457 std::cout << " " << icache_fsm_state_str[r_icache_fsm.read()] … … 488 465 std::cout << std::endl; 489 466 490 if(mode & 0x 1)467 if(mode & 0x01) 491 468 { 492 469 r_wbuf.printTrace((mode>>1)&1); 493 470 } 494 if(mode & 0x 4)495 { 496 std::cout << " Data cache" << std::endl;471 if(mode & 0x04) 472 { 473 std::cout << " Data Cache" << std::endl; 497 474 r_dcache.printTrace(); 498 475 } 499 if(mode & 0x 8)500 { 501 std::cout << " Instruction cache" << std::endl;476 if(mode & 0x08) 477 { 478 std::cout << " Instruction Cache" << std::endl; 502 479 r_icache.printTrace(); 480 } 481 if(mode & 0x10) 482 { 483 std::cout << " Data TLB" << std::endl; 484 r_dtlb.printTrace(); 485 } 486 if(mode & 0x20) 487 { 488 std::cout << " Instruction TLB" << std::endl; 489 r_itlb.printTrace(); 503 490 } 504 491 } … … 1185 1172 /////////////////////////////////////////////////////////////////////// 1186 1173 1187 typename iss_t::InstructionRequest ireq = ISS_IREQ_INITIALIZER; 1188 typename iss_t::DataRequest dreq = ISS_DREQ_INITIALIZER; 1189 1190 r_iss.getRequests(ireq, dreq); 1174 r_iss.getRequests(m_ireq, m_dreq); 1191 1175 1192 1176 //////////////////////////////////////////////////////////////////////////////////// … … 1239 1223 //////////////////////////////////////////////////////////////////////////////////////// 1240 1224 1241 // The default value for irsp.valid is false1242 typename iss_t::InstructionResponse irsp = ISS_IRSP_INITIALIZER;1225 // default value for m_irsp.valid 1226 m_irsp.valid = false; 1243 1227 1244 1228 switch( r_icache_fsm.read() ) … … 1296 1280 1297 1281 // processor request 1298 if ( ireq.valid )1282 if ( m_ireq.valid ) 1299 1283 { 1300 1284 bool cacheable; … … 1302 1286 1303 1287 // We register processor request 1304 r_icache_vaddr_save = ireq.addr;1288 r_icache_vaddr_save = m_ireq.addr; 1305 1289 1306 1290 // speculative icache access (if cache activated) … … 1313 1297 { 1314 1298 paddr_t spc_paddr = (r_icache_vci_paddr.read() & ~PAGE_K_MASK) | 1315 ((paddr_t) ireq.addr & PAGE_K_MASK);1299 ((paddr_t)m_ireq.addr & PAGE_K_MASK); 1316 1300 1317 1301 #ifdef INSTRUMENTATION … … 1339 1323 m_cpt_itlb_read++; 1340 1324 #endif 1341 tlb_hit = r_itlb.translate( ireq.addr,1325 tlb_hit = r_itlb.translate( m_ireq.addr, 1342 1326 &tlb_paddr, 1343 1327 &tlb_flags, … … 1359 1343 // cacheability 1360 1344 if ( not (r_mmu_mode.read() & INS_CACHE_MASK) ) cacheable = false; 1361 else cacheable = m_cacheability_table[ ireq.addr];1345 else cacheable = m_cacheability_table[m_ireq.addr]; 1362 1346 1363 1347 // physical address 1364 paddr = (paddr_t) ireq.addr;1348 paddr = (paddr_t)m_ireq.addr; 1365 1349 } 1366 1350 else // itlb activated … … 1376 1360 1377 1361 // access rights checking 1378 if ( not tlb_flags.u && ( ireq.mode == iss_t::MODE_USER) )1362 if ( not tlb_flags.u && (m_ireq.mode == iss_t::MODE_USER) ) 1379 1363 { 1380 1364 r_mmu_ietr = MMU_READ_PRIVILEGE_VIOLATION; 1381 r_mmu_ibvar = ireq.addr;1382 irsp.valid = true;1383 irsp.error = true;1384 irsp.instruction = 0;1365 r_mmu_ibvar = m_ireq.addr; 1366 m_irsp.valid = true; 1367 m_irsp.error = true; 1368 m_irsp.instruction = 0; 1385 1369 break; 1386 1370 } … … 1388 1372 { 1389 1373 r_mmu_ietr = MMU_READ_EXEC_VIOLATION; 1390 r_mmu_ibvar = ireq.addr;1391 irsp.valid = true;1392 irsp.error = true;1393 irsp.instruction = 0;1374 r_mmu_ibvar = m_ireq.addr; 1375 m_irsp.valid = true; 1376 m_irsp.error = true; 1377 m_irsp.instruction = 0; 1394 1378 break; 1395 1379 } … … 1449 1433 m_cpt_ins_read++; 1450 1434 #endif 1451 irsp.valid = true;1452 irsp.instruction = cache_inst;1435 m_irsp.valid = true; 1436 m_irsp.instruction = cache_inst; 1453 1437 } 1454 1438 } … … 1458 1442 r_icache_fsm = ICACHE_UNC_WAIT; 1459 1443 } 1460 } // end if ireq.valid1444 } // end if m_ireq.valid 1461 1445 break; 1462 1446 } … … 1477 1461 } 1478 1462 1479 if ( ireq.valid ) m_cost_ins_tlb_miss_frz++;1463 if ( m_ireq.valid ) m_cost_ins_tlb_miss_frz++; 1480 1464 1481 1465 // DCACHE FSM signals response by reseting the request flip-flop … … 1485 1469 { 1486 1470 r_icache_tlb_rsp_error = false; 1487 irsp.error = true;1488 irsp.valid = true;1471 m_irsp.error = true; 1472 m_irsp.valid = true; 1489 1473 r_icache_fsm = ICACHE_IDLE; 1490 1474 } … … 1638 1622 // when the selected slot is not empty 1639 1623 { 1640 if ( ireq.valid ) m_cost_ins_miss_frz++;1624 if ( m_ireq.valid ) m_cost_ins_miss_frz++; 1641 1625 1642 1626 bool valid; … … 1679 1663 case ICACHE_MISS_WAIT: // waiting a response to a miss request from VCI_RSP FSM 1680 1664 { 1681 if ( ireq.valid ) m_cost_ins_miss_frz++;1665 if ( m_ireq.valid ) m_cost_ins_miss_frz++; 1682 1666 1683 1667 // external coherence request … … 1693 1677 r_mmu_ietr = MMU_READ_DATA_ILLEGAL_ACCESS; 1694 1678 r_mmu_ibvar = r_icache_vaddr_save.read(); 1695 irsp.valid = true;1696 irsp.error = true;1679 m_irsp.valid = true; 1680 m_irsp.error = true; 1697 1681 r_vci_rsp_ins_error = false; 1698 1682 r_icache_fsm = ICACHE_IDLE; … … 1708 1692 case ICACHE_MISS_UPDT: // update the cache (one word per cycle) 1709 1693 { 1710 if ( ireq.valid ) m_cost_ins_miss_frz++;1694 if ( m_ireq.valid ) m_cost_ins_miss_frz++; 1711 1695 1712 1696 if ( r_vci_rsp_fifo_icache.rok() ) // response available … … 1778 1762 { 1779 1763 r_mmu_ietr = MMU_READ_DATA_ILLEGAL_ACCESS; 1780 r_mmu_ibvar = ireq.addr;1764 r_mmu_ibvar = m_ireq.addr; 1781 1765 r_vci_rsp_ins_error = false; 1782 irsp.valid = true;1783 irsp.error = true;1766 m_irsp.valid = true; 1767 m_irsp.error = true; 1784 1768 r_icache_fsm = ICACHE_IDLE; 1785 1769 } … … 1788 1772 vci_rsp_fifo_icache_get = true; 1789 1773 r_icache_fsm = ICACHE_IDLE; 1790 if ( ireq.valid and (ireq.addr == r_icache_vaddr_save.read()) ) // request not modified1791 { 1792 irsp.valid = true;1793 irsp.instruction = r_vci_rsp_fifo_icache.read();1774 if ( m_ireq.valid and (m_ireq.addr == r_icache_vaddr_save.read()) ) // request not modified 1775 { 1776 m_irsp.valid = true; 1777 m_irsp.instruction = r_vci_rsp_fifo_icache.read(); 1794 1778 } 1795 1779 } … … 1887 1871 1888 1872 } // end switch r_icache_fsm 1889 1890 // save the IREQ and IRSP fields for the print_trace() function1891 m_ireq_valid = ireq.valid;1892 m_ireq_addr = ireq.addr;1893 m_ireq_mode = ireq.mode;1894 1895 m_irsp_valid = irsp.valid;1896 m_irsp_instruction = irsp.instruction;1897 m_irsp_error = irsp.error;1898 1873 1899 1874 //////////////////////////////////////////////////////////////////////////////////// … … 1911 1886 // pre-empted state. 1912 1887 // 1913 // 2/ processor requests : 1888 // 2/ TLB miss 1889 // The page tables can be cacheable. 1890 // In case of miss in itlb or dtlb, the tlb miss is handled by a dedicated 1891 // sub-fsm (DCACHE_TLB_MISS state), that handle possible miss in DCACHE, 1892 // this sub-fsm implement the table-walk... 1893 // 1894 // 3/ processor requests : 1914 1895 // Processor READ, WRITE, LL or SC requests are taken in IDLE state only. 1915 1896 // The IDLE state implements a three stages pipe-line to handle write bursts: … … 1917 1898 // - The registration in wbuf and the dcache hit are computed in stage P1. 1918 1899 // - The dcache update is done in stage P2. 1919 // A write operation can require a fourth stage if the dirty bit must be updated, 1920 // or if the TLBs must be cleared, but these "long write" operation requires 1921 // to exit the IDLE stage 1900 // A write operation can require a "long write" operation (if the PTE dirty bit 1901 // must be updated) handled by a dedicated sub-fsm (DCACHE_DIRTY_TLB_SET state). 1902 // If a PTE is modified, the both te itlb and dtlb are selectively, but sequencially 1903 // cleared by a dedicated sub_fsm (DCACHE_INVAL_TLB_SCAN state). 1922 1904 // If there is no write in the pipe, dcache and dtlb are accessed in parallel, 1923 1905 // (virtual address for itlb, and speculative physical address computed during 1924 1906 // previous cycle for dcache) in order to return the data in one cycle for a read. 1925 // We just pay an extra cycle when the speculative access is illegal.1907 // We just pay an extra cycle when the speculative access is failing. 1926 1908 // 1927 // 3/ Atomic instructions LL/SC1909 // 4/ Atomic instructions LL/SC 1928 1910 // The LL/SC address can be cacheable or non cacheable. 1929 1911 // The reservation registers (r_dcache_ll_valid, r_dcache_ll_vaddr and … … 1933 1915 // READ transactions (one word / one line, depending on the cacheability). 1934 1916 // - SC requests from the processor are systematically transmitted to the 1935 // memory cache as C OMPARE&swap requests (both the data value stored in the1917 // memory cache as Compare&swap requests (both the data value stored in the 1936 1918 // r_dcache_ll_data register and the new value). 1919 // The cache is not updated, as this is done in case of success by the 1920 // coherence transaction. 1921 // If rqired, the dirty bit is updated in PTE by a "long write". 1937 1922 // 1938 // 4/ Non cacheable access:1923 // 5/ Non cacheable access: 1939 1924 // This component implement a strong order between non cacheable access 1940 1925 // (read or write) : A new non cacheable VCI transaction starts only when … … 1946 1931 // pending non cacheable write transaction completes). 1947 1932 // 1948 // 5/ Error handling:1933 // 6/ Error handling: 1949 1934 // When the MMU is not activated, Read Bus Errors are synchronous events, 1950 1935 // but Write Bus Errors are asynchronous events (processor is not frozen). … … 1959 1944 //////////////////////////////////////////////////////////////////////////////////////// 1960 1945 1961 // The default value for drsp.valid is false1962 typename iss_t::DataResponse drsp = ISS_DRSP_INITIALIZER;1946 // default value for m_drsp.valid 1947 m_drsp.valid = false; 1963 1948 1964 1949 switch ( r_dcache_fsm.read() ) 1965 1950 { 1966 1951 case DCACHE_IDLE: // There is 8 conditions to exit the IDLE state : 1967 // 1) Long write request (DCACHE FSM) => DCACHE_ WRITE_***1952 // 1) Long write request (DCACHE FSM) => DCACHE_DIRTY_*** 1968 1953 // 2) Coherence request (TGT FSM) => DCACHE_CC_CHECK 1969 1954 // 3) ITLB miss request (ICACHE FSM) => DCACHE_TLB_MISS … … 1992 1977 // DCACHE_INVAL_TLB 1993 1978 // - If the PTE dirty bit must be updated, we start a "long write", that is 1994 // blocking for the processor, because we switch to the DCACHE_ WRITE_SET_DIRTY1995 1996 bool long_write_set_dirty = false; 1997 bool tlb_inval_required = false; 1979 // blocking for the processor, because we switch to the DCACHE_DIRTY_SET_DIRTY 1980 1981 bool long_write_set_dirty = false; // default value 1982 bool tlb_inval_required = false; // default value 1998 1983 1999 1984 if ( r_dcache_p1_valid.read() ) // P2 stage activated … … 2045 2030 { 2046 2031 long_write_set_dirty = true; 2047 r_dcache_p2_vaddr = r_dcache_p1_vaddr.read(); 2048 r_dcache_p2_set_dirty = r_dcache_p1_set_dirty.read(); 2049 r_dcache_p2_tlb_way = r_dcache_p1_tlb_way.read(); 2050 r_dcache_p2_tlb_set = r_dcache_p1_tlb_set.read(); 2032 r_dcache_p2_way = r_dcache_p1_tlb_way.read(); 2033 r_dcache_p2_set = r_dcache_p1_tlb_set.read(); 2051 2034 // The PTE physical address is the concatenation of the nline value (from dtlb), 2052 2035 // with the word index (obtained from the proper bits of the virtual address) … … 2059 2042 { 2060 2043 r_dcache_p2_pte_paddr = (paddr_t)(r_dcache_p1_tlb_nline.read()*(m_dcache_words<<2)) | 2061 (paddr_t)((r_dcache_p1_vaddr.read()>> 10) & 0x3c);2044 (paddr_t)((r_dcache_p1_vaddr.read()>>9) & 0x38); 2062 2045 } 2063 2046 } … … 2171 2154 ///////////////////////////////////////////////////////////////////////////// 2172 2155 // handling P0 pipe-line stage 2173 // This stage is controlling the DCACHE FSM state register: 2156 // This stage is controlling r_dcache_fsm and r_dcache_p0_* registers. 2157 // The r_dcache_p0_valid flip-flop is only set in case of WRITE request. 2174 2158 // - the TLB invalidate requests have the highest priority, 2175 2159 // - then the long write requests, … … 2177 2161 // - then the itlb miss requests, 2178 2162 // - and finally the processor requests. 2179 // A processor read request generate a dcache access using speculative PPN 2180 // only if the write pipe-line is empty. There is an unconditionnal access 2181 // to the dtlb, using virtual address from processor. 2182 // The r_dcache_p0_valid value must be computed at all cycles. 2183 2184 bool p0_valid = false; // default value 2163 // If dtlb is activated, there is an unconditionnal access to dtlb, 2164 // for address translation. 2165 // 1) A processor WRITE request enters the three stage pipe-line (handled 2166 // by the IDLE state), and can be completed by a "long write" if the 2167 // PTE dirty bit must be updated in dtb, dcache and RAM. 2168 // 2) A processor READ request generate a simultaneouss access to 2169 // both dcache data and dcache directoty, using speculative PPN, but 2170 // is delayed if the write pipe-line is not empty. 2171 // In case of miss, we wait the VCI response in DCACHE_UNC_WAIT or 2172 // DCACHE_MISS_WAIT states. 2173 // 3) A processor LL request is handled as a READ request. 2174 // 4) A processor SC request is delayed until the write pipe-line is empty. 2175 // A VCI SC transaction is launched, and we wait the VCI response in 2176 // DCACHE_SC_WAIT state. It can be completed by a "long write" if the 2177 // PTE dirty bit must be updated in dtlb, dcache, and RAM. 2178 // The data is not modified in dcache, as it will be done by the 2179 // coherence transaction. 2185 2180 2186 2181 // TLB inval required … … 2189 2184 r_dcache_fsm_save = DCACHE_IDLE; 2190 2185 r_dcache_fsm = DCACHE_INVAL_TLB_SCAN; 2186 r_dcache_p0_valid = false; 2191 2187 } 2192 2188 … … 2194 2190 else if ( long_write_set_dirty ) 2195 2191 { 2196 r_dcache_fsm = DCACHE_WRITE_TLB_DIRTY; 2192 r_dcache_fsm = DCACHE_DIRTY_TLB_SET; 2193 r_dcache_p0_valid = false; 2197 2194 } 2198 2195 … … 2202 2199 r_dcache_fsm_save = DCACHE_IDLE; 2203 2200 r_dcache_fsm = DCACHE_CC_CHECK; 2201 r_dcache_p0_valid = false; 2204 2202 } 2205 2203 … … 2210 2208 r_dcache_tlb_vaddr = r_icache_vaddr_save.read(); 2211 2209 r_dcache_fsm = DCACHE_TLB_MISS; 2210 r_dcache_p0_valid = false; 2212 2211 } 2213 2212 2214 2213 // processor request 2215 else if ( dreq.valid )2214 else if ( m_dreq.valid ) 2216 2215 { 2217 2216 // dcache access using speculative PPN only if pipe-line empty … … 2228 2227 { 2229 2228 cache_paddr = (r_dcache_p0_paddr.read() & ~PAGE_K_MASK) | 2230 ((paddr_t) dreq.addr & PAGE_K_MASK);2229 ((paddr_t)m_dreq.addr & PAGE_K_MASK); 2231 2230 2232 2231 cache_hit = r_dcache.read( cache_paddr, … … 2256 2255 if ( r_mmu_mode.read() & DATA_TLB_MASK ) // TLB activated 2257 2256 { 2258 tlb_hit = r_dtlb.translate( dreq.addr,2257 tlb_hit = r_dtlb.translate( m_dreq.addr, 2259 2258 &tlb_paddr, 2260 2259 &tlb_flags, … … 2278 2277 2279 2278 // register the processor request 2280 r_dcache_p0_vaddr = dreq.addr;2281 r_dcache_p0_be = dreq.be;2282 r_dcache_p0_wdata = dreq.wdata;2279 r_dcache_p0_vaddr = m_dreq.addr; 2280 r_dcache_p0_be = m_dreq.be; 2281 r_dcache_p0_wdata = m_dreq.wdata; 2283 2282 2284 2283 // Handling READ XTN requests from processor 2285 2284 // They are executed in this DCACHE_IDLE state. 2286 2285 // The processor must not be in user mode 2287 if ( dreq.type == iss_t::XTN_READ)2288 { 2289 int xtn_opcode = (int) dreq.addr/4;2286 if (m_dreq.type == iss_t::XTN_READ) 2287 { 2288 int xtn_opcode = (int)m_dreq.addr/4; 2290 2289 2291 2290 // checking processor mode: 2292 if ( dreq.mode == iss_t::MODE_USER)2291 if (m_dreq.mode == iss_t::MODE_USER) 2293 2292 { 2294 2293 r_mmu_detr = MMU_READ_PRIVILEGE_VIOLATION; 2295 r_mmu_dbvar = dreq.addr;2296 drsp.valid = true;2297 drsp.error = true;2294 r_mmu_dbvar = m_dreq.addr; 2295 m_drsp.valid = true; 2296 m_drsp.error = true; 2298 2297 r_dcache_fsm = DCACHE_IDLE; 2299 2298 } … … 2303 2302 { 2304 2303 case iss_t::XTN_INS_ERROR_TYPE: 2305 drsp.rdata = r_mmu_ietr.read();2306 drsp.valid = true;2304 m_drsp.rdata = r_mmu_ietr.read(); 2305 m_drsp.valid = true; 2307 2306 break; 2308 2307 2309 2308 case iss_t::XTN_DATA_ERROR_TYPE: 2310 drsp.rdata = r_mmu_detr.read();2311 drsp.valid = true;2309 m_drsp.rdata = r_mmu_detr.read(); 2310 m_drsp.valid = true; 2312 2311 break; 2313 2312 2314 2313 case iss_t::XTN_INS_BAD_VADDR: 2315 drsp.rdata = r_mmu_ibvar.read();2316 drsp.valid = true;2314 m_drsp.rdata = r_mmu_ibvar.read(); 2315 m_drsp.valid = true; 2317 2316 break; 2318 2317 2319 2318 case iss_t::XTN_DATA_BAD_VADDR: 2320 drsp.rdata = r_mmu_dbvar.read();2321 drsp.valid = true;2319 m_drsp.rdata = r_mmu_dbvar.read(); 2320 m_drsp.valid = true; 2322 2321 break; 2323 2322 2324 2323 case iss_t::XTN_PTPR: 2325 drsp.rdata = r_mmu_ptpr.read();2326 drsp.valid = true;2324 m_drsp.rdata = r_mmu_ptpr.read(); 2325 m_drsp.valid = true; 2327 2326 break; 2328 2327 2329 2328 case iss_t::XTN_TLB_MODE: 2330 drsp.rdata = r_mmu_mode.read();2331 drsp.valid = true;2329 m_drsp.rdata = r_mmu_mode.read(); 2330 m_drsp.valid = true; 2332 2331 break; 2333 2332 2334 2333 case iss_t::XTN_MMU_PARAMS: 2335 drsp.rdata = r_mmu_params;2336 drsp.valid = true;2334 m_drsp.rdata = r_mmu_params; 2335 m_drsp.valid = true; 2337 2336 break; 2338 2337 2339 2338 case iss_t::XTN_MMU_RELEASE: 2340 drsp.rdata = r_mmu_release;2341 drsp.valid = true;2339 m_drsp.rdata = r_mmu_release; 2340 m_drsp.valid = true; 2342 2341 break; 2343 2342 2344 2343 case iss_t::XTN_MMU_WORD_LO: 2345 drsp.rdata = r_mmu_word_lo.read();2346 drsp.valid = true;2344 m_drsp.rdata = r_mmu_word_lo.read(); 2345 m_drsp.valid = true; 2347 2346 break; 2348 2347 2349 2348 case iss_t::XTN_MMU_WORD_HI: 2350 drsp.rdata = r_mmu_word_hi.read();2351 drsp.valid = true;2349 m_drsp.rdata = r_mmu_word_hi.read(); 2350 m_drsp.valid = true; 2352 2351 break; 2353 2352 2354 2353 default: 2355 2354 r_mmu_detr = MMU_READ_UNDEFINED_XTN; 2356 r_mmu_dbvar = dreq.addr;2357 drsp.valid = true;2358 drsp.error = true;2355 r_mmu_dbvar = m_dreq.addr; 2356 m_drsp.valid = true; 2357 m_drsp.error = true; 2359 2358 break; 2360 2359 } // end switch xtn_opcode 2361 2360 } // end else 2361 r_dcache_p0_valid = false; 2362 2362 } // end if XTN_READ 2363 2363 … … 2368 2368 // Caches can be invalidated or flushed in user mode, 2369 2369 // and the sync instruction can be executed in user mode 2370 else if ( dreq.type == iss_t::XTN_WRITE)2371 { 2372 int xtn_opcode = (int) dreq.addr/4;2370 else if (m_dreq.type == iss_t::XTN_WRITE) 2371 { 2372 int xtn_opcode = (int)m_dreq.addr/4; 2373 2373 r_dcache_xtn_opcode = xtn_opcode; 2374 2374 2375 2375 // checking processor mode: 2376 if ( ( dreq.mode == iss_t::MODE_USER) &&2376 if ( (m_dreq.mode == iss_t::MODE_USER) && 2377 2377 (xtn_opcode != iss_t:: XTN_SYNC) && 2378 2378 (xtn_opcode != iss_t::XTN_DCACHE_INVAL) && … … 2382 2382 { 2383 2383 r_mmu_detr = MMU_WRITE_PRIVILEGE_VIOLATION; 2384 r_mmu_dbvar = dreq.addr;2385 drsp.valid = true;2386 drsp.error = true;2384 r_mmu_dbvar = m_dreq.addr; 2385 m_drsp.valid = true; 2386 m_drsp.error = true; 2387 2387 r_dcache_fsm = DCACHE_IDLE; 2388 2388 } … … 2392 2392 { 2393 2393 case iss_t::XTN_PTPR: // itlb & dtlb must be flushed 2394 r_mmu_ptpr = dreq.wdata;2394 r_mmu_ptpr = m_dreq.wdata; 2395 2395 r_dcache_xtn_req = true; 2396 2396 r_dcache_fsm = DCACHE_XTN_SWITCH; … … 2398 2398 2399 2399 case iss_t::XTN_TLB_MODE: // no cache or tlb access 2400 r_mmu_mode = dreq.wdata;2401 drsp.valid = true;2400 r_mmu_mode = m_dreq.wdata; 2401 m_drsp.valid = true; 2402 2402 r_dcache_fsm = DCACHE_IDLE; 2403 2403 break; … … 2447 2447 2448 2448 case iss_t::XTN_MMU_WORD_LO: // no cache or tlb access 2449 r_mmu_word_lo = dreq.wdata;2450 drsp.valid = true;2449 r_mmu_word_lo = m_dreq.wdata; 2450 m_drsp.valid = true; 2451 2451 r_dcache_fsm = DCACHE_IDLE; 2452 2452 break; 2453 2453 2454 2454 case iss_t::XTN_MMU_WORD_HI: // no cache or tlb access 2455 r_mmu_word_hi = dreq.wdata;2456 drsp.valid = true;2455 r_mmu_word_hi = m_dreq.wdata; 2456 m_drsp.valid = true; 2457 2457 r_dcache_fsm = DCACHE_IDLE; 2458 2458 break; … … 2460 2460 case iss_t::XTN_ICACHE_PREFETCH: // not implemented : no action 2461 2461 case iss_t::XTN_DCACHE_PREFETCH: // not implemented : no action 2462 drsp.valid = true;2462 m_drsp.valid = true; 2463 2463 r_dcache_fsm = DCACHE_IDLE; 2464 2464 break; … … 2466 2466 default: 2467 2467 r_mmu_detr = MMU_WRITE_UNDEFINED_XTN; 2468 r_mmu_dbvar = dreq.addr;2469 drsp.valid = true;2470 drsp.error = true;2468 r_mmu_dbvar = m_dreq.addr; 2469 m_drsp.valid = true; 2470 m_drsp.error = true; 2471 2471 r_dcache_fsm = DCACHE_IDLE; 2472 2472 break; 2473 2473 } // end switch xtn_opcode 2474 2474 } // end else 2475 r_dcache_p0_valid = false; 2475 2476 } // end if XTN_WRITE 2476 2477 2477 // Handling read/write processor requests.2478 // Handling read/write/ll/sc processor requests. 2478 2479 // The dtlb and dcache can be activated or not. 2479 2480 // We compute the physical address, the cacheability, and check processor request. … … 2498 2499 // cacheability 2499 2500 if ( not (r_mmu_mode.read() & DATA_CACHE_MASK) ) cacheable = false; 2500 else cacheable = m_cacheability_table[ dreq.addr];2501 else cacheable = m_cacheability_table[m_dreq.addr]; 2501 2502 2502 2503 // physical address 2503 paddr = (paddr_t) dreq.addr;2504 paddr = (paddr_t)m_dreq.addr; 2504 2505 } 2505 2506 else // dtlb activated … … 2512 2513 2513 2514 // access rights checking 2514 if ( not tlb_flags.u and ( dreq.mode == iss_t::MODE_USER))2515 if ( not tlb_flags.u and (m_dreq.mode == iss_t::MODE_USER)) 2515 2516 { 2516 if ( ( dreq.type == iss_t::DATA_READ) or (dreq.type == iss_t::DATA_LL) )2517 if ( (m_dreq.type == iss_t::DATA_READ) or (m_dreq.type == iss_t::DATA_LL) ) 2517 2518 r_mmu_detr = MMU_READ_PRIVILEGE_VIOLATION; 2518 2519 else 2519 2520 r_mmu_detr = MMU_WRITE_PRIVILEGE_VIOLATION; 2520 2521 2521 r_mmu_dbvar = dreq.addr;2522 drsp.valid = true;2523 drsp.error = true;2524 drsp.rdata = 0;2522 r_mmu_dbvar = m_dreq.addr; 2523 m_drsp.valid = true; 2524 m_drsp.error = true; 2525 m_drsp.rdata = 0; 2525 2526 } 2526 2527 else if ( not tlb_flags.w and 2527 (( dreq.type == iss_t::DATA_WRITE) or2528 ( dreq.type == iss_t::DATA_SC)) )2528 ((m_dreq.type == iss_t::DATA_WRITE) or 2529 (m_dreq.type == iss_t::DATA_SC)) ) 2529 2530 { 2530 2531 r_mmu_detr = MMU_WRITE_ACCES_VIOLATION; 2531 r_mmu_dbvar = dreq.addr;2532 drsp.valid = true;2533 drsp.error = true;2534 drsp.rdata = 0;2532 r_mmu_dbvar = m_dreq.addr; 2533 m_drsp.valid = true; 2534 m_drsp.error = true; 2535 m_drsp.rdata = 0; 2535 2536 } 2536 2537 else … … 2544 2545 else // tlb miss 2545 2546 { 2546 r_dcache_tlb_vaddr = dreq.addr;2547 r_dcache_tlb_vaddr = m_dreq.addr; 2547 2548 r_dcache_tlb_ins = false; 2548 2549 r_dcache_fsm = DCACHE_TLB_MISS; … … 2550 2551 } // end DTLB activated 2551 2552 2552 if ( valid_req ) // processor request is valid 2553 if ( valid_req ) // processor request is valid after TLB check 2553 2554 { 2554 2555 // physical address and cacheability registration … … 2562 2563 // If dcache miss, we go to DCACHE_MISS_VICTIM state. 2563 2564 // If uncacheable, we go to DCACHE_UNC_WAIT state. 2564 if ( (( dreq.type == iss_t::DATA_READ) or (dreq.type == iss_t::DATA_LL)) and2565 not r_dcache_p0_valid.read() and not r_dcache_p1_valid.read() )2565 if ( ((m_dreq.type == iss_t::DATA_READ) or (m_dreq.type == iss_t::DATA_LL)) 2566 and not r_dcache_p0_valid.read() and not r_dcache_p1_valid.read() ) 2566 2567 { 2567 2568 if ( cacheable ) // cacheable read … … 2596 2597 m_cpt_data_read++; 2597 2598 #endif 2598 drsp.valid = true;2599 drsp.rdata = cache_rdata;2599 m_drsp.valid = true; 2600 m_drsp.rdata = cache_rdata; 2600 2601 } 2601 2602 } … … 2603 2604 { 2604 2605 r_dcache_vci_paddr = paddr; 2605 r_dcache_vci_unc_be = dreq.be;2606 r_dcache_vci_unc_be = m_dreq.be; 2606 2607 r_dcache_vci_unc_req = true; 2607 2608 r_dcache_fsm = DCACHE_UNC_WAIT; … … 2609 2610 2610 2611 // makes reservation in case of LL 2611 if ( dreq.type == iss_t::DATA_LL )2612 if ( m_dreq.type == iss_t::DATA_LL ) 2612 2613 { 2613 2614 r_dcache_ll_valid = true; 2614 2615 r_dcache_ll_data = cache_rdata; 2615 r_dcache_ll_vaddr = dreq.addr;2616 r_dcache_ll_vaddr = m_dreq.addr; 2616 2617 } 2618 r_dcache_p0_valid = false; 2617 2619 } // end READ or LL 2618 2620 2619 2621 // WRITE request: 2620 // The write request arguments have been registered .2622 // The write request arguments have been registered in r_dcache_p0 registers. 2621 2623 // The physical address has been computed and registered. 2622 2624 // We acknowledge the processor request and activate the P1 pipeline stage. 2623 else if ( dreq.type == iss_t::DATA_WRITE )2625 else if ( m_dreq.type == iss_t::DATA_WRITE ) 2624 2626 { 2625 2627 … … 2627 2629 m_cpt_data_write++; 2628 2630 #endif 2629 p0_valid= true;2630 drsp.valid = true;2631 drsp.rdata = 0;2631 m_drsp.valid = true; 2632 m_drsp.rdata = 0; 2633 r_dcache_p0_valid = true; 2632 2634 } // end WRITE 2633 2635 2634 2636 // SC request: 2637 // The SC requests are taken only if the write pipe-line is empty. 2635 2638 // - if a valid LL reservation (with the same address) is registered, 2636 // we request a SC transaction to CMD FSM and go to the DCACHE_UNC_WAIT state 2637 // that will directly return the response to the processor, and invalidate 2638 // the LL reservation. We don't check a possible write hit in dcache, 2639 // as the cache update is done by the coherence transaction... 2640 // - if there is no registerd LL, we just stay in IDLE state, invalidate 2641 // the LL reservation, and return 1 (atomic access failed) 2642 else if ( dreq.type == iss_t::DATA_SC ) 2639 // we request a SC transaction to CMD FSM and go to the DCACHE_SC_WAIT state 2640 // that will directly return the response to the processor, invalidate 2641 // the LL reservation, and set the Dirty bit if required. 2642 // We don't check a possible write hit in dcache, as the cache update 2643 // is done by the coherence transaction... 2644 // - if there is no valid registered LL, we just stay in IDLE state, 2645 // and return 1 (atomic access failed) 2646 else if ( ( m_dreq.type == iss_t::DATA_SC ) 2647 and not r_dcache_p0_valid.read() and not r_dcache_p1_valid.read() ) 2643 2648 { 2649 2644 2650 #ifdef INSTRUMENTATION 2645 2651 m_cpt_data_sc++; 2646 2652 #endif 2647 2653 // test if valid registered LL 2648 if ( r_dcache_ll_valid.read() and (r_dcache_ll_vaddr.read() == dreq.addr))2654 if ( r_dcache_ll_valid.read() and (r_dcache_ll_vaddr.read() == m_dreq.addr)) 2649 2655 { 2650 r_dcache_vci_paddr 2651 r_dcache_vci_sc_req 2652 r_dcache_vci_sc_old 2653 r_dcache_vci_sc_new =dreq.wdata;2654 r_dcache_fsm = DCACHE_UNC_WAIT;2656 r_dcache_vci_paddr = paddr; 2657 r_dcache_vci_sc_req = true; 2658 r_dcache_vci_sc_old = r_dcache_ll_data.read(); 2659 r_dcache_vci_sc_new = m_dreq.wdata; 2660 r_dcache_fsm = DCACHE_SC_WAIT; 2655 2661 } 2656 2662 else // no registered LL 2657 2663 { 2658 drsp.valid = true; 2659 drsp.rdata = 1; 2660 r_dcache_ll_valid = false; 2664 2665 m_drsp.valid = true; 2666 m_drsp.rdata = 1; 2667 r_dcache_ll_valid = false; 2661 2668 } 2669 r_dcache_p0_valid = false; 2662 2670 } // end SC 2671 else 2672 { 2673 r_dcache_p0_valid = false; 2674 } 2663 2675 } // end valid_req 2664 } // end if read/write request 2676 else 2677 { 2678 r_dcache_p0_valid = false; 2679 } 2680 } // end if read/write/ll/sc request 2681 } // end dreq.valid 2682 else 2683 { 2684 r_dcache_p0_valid = false; 2665 2685 } // end P0 pipe stage 2666 2667 r_dcache_p0_valid = p0_valid;2668 2686 break; 2669 2687 } 2670 2688 ///////////////////// 2671 2689 case DCACHE_TLB_MISS: // This is the entry point for the sub-fsm handling all tlb miss. 2672 // - Input arguments are r_dcache_tlb_vaddr & r_dcache_tlb_ins 2673 // - The sub-fsm access the dcache to find the missing TLB entry, 2674 // and activates the cache miss procedure in case of miss. 2675 // - It bypass the first level page table access if possible. 2676 // - It uses atomic access to update the R/L access bits 2677 // in the page table if required. 2678 // - It directly updates the itlb or dtlb, and writes into the 2679 // r_mmu_ins_* or r_mmu_data* error reporting registers. 2690 // Input arguments are: 2691 // - r_dcache_tlb_vaddr 2692 // - r_dcache_tlb_ins (true when itlb miss) 2693 // The sub-fsm access the dcache to find the missing TLB entry, 2694 // and activates the cache miss procedure in case of miss. 2695 // It bypass the first level page table access if possible. 2696 // It uses atomic access to update the R/L access bits 2697 // in the page table if required. 2698 // It directly updates the itlb or dtlb, and writes into the 2699 // r_mmu_ins_* or r_mmu_data* error reporting registers. 2680 2700 { 2681 2701 uint32_t ptba = 0; 2682 2702 bool bypass; 2683 paddr_t p addr;2703 paddr_t pte_paddr; 2684 2704 2685 2705 // evaluate bypass in order to skip first level page table access … … 2693 2713 } 2694 2714 2695 if ( not bypass ) // Try to read thePTE1/PTD1 in dcache2696 { 2697 p addr = (paddr_t)r_mmu_ptpr.read() << (INDEX1_NBITS+2) |2698 (paddr_t)((r_dcache_tlb_vaddr.read() >> PAGE_M_NBITS) << 2);2699 r_dcache_tlb_paddr = p addr;2715 if ( not bypass ) // Try to read PTE1/PTD1 in dcache 2716 { 2717 pte_paddr = (paddr_t)r_mmu_ptpr.read() << (INDEX1_NBITS+2) | 2718 (paddr_t)((r_dcache_tlb_vaddr.read() >> PAGE_M_NBITS) << 2); 2719 r_dcache_tlb_paddr = pte_paddr; 2700 2720 r_dcache_fsm = DCACHE_TLB_PTE1_GET; 2701 2721 } 2702 else // Try to read thePTE2 in dcache2703 { 2704 p addr = (paddr_t)ptba << PAGE_K_NBITS |2705 (paddr_t)(r_dcache_tlb_vaddr.read()&PTD_ID2_MASK)>>(PAGE_K_NBITS-3);2706 r_dcache_tlb_paddr = p addr;2722 else // Try to read PTE2 in dcache 2723 { 2724 pte_paddr = (paddr_t)ptba << PAGE_K_NBITS | 2725 (paddr_t)(r_dcache_tlb_vaddr.read()&PTD_ID2_MASK)>>(PAGE_K_NBITS-3); 2726 r_dcache_tlb_paddr = pte_paddr; 2707 2727 r_dcache_fsm = DCACHE_TLB_PTE2_GET; 2708 2728 } … … 2712 2732 { 2713 2733 if ( r_dcache_tlb_ins.read() ) 2714 std::cout << " <PROC.DCACHE_TLB_MISS> ITLB miss request:"; 2715 else 2716 std::cout << " <PROC.DCACHE_TLB_MISS> DTLB miss request:"; 2717 std::cout << " vaddr = " << std::hex << r_dcache_tlb_vaddr.read() 2718 << " / bypass = " << bypass 2719 << " / PTE address = " << paddr << std::endl; 2734 { 2735 std::cout << " <PROC.DCACHE_TLB_MISS> ITLB miss"; 2736 } 2737 else 2738 { 2739 std::cout << " <PROC.DCACHE_TLB_MISS> DTLB miss"; 2740 } 2741 std::cout << " / VADDR = " << std::hex << r_dcache_tlb_vaddr.read() 2742 << " / BYPASS = " << bypass 2743 << " / PTE_ADR = " << pte_paddr << std::endl; 2720 2744 } 2721 2745 #endif … … 2755 2779 r_mmu_detr = MMU_READ_PT1_UNMAPPED; 2756 2780 r_mmu_dbvar = r_dcache_tlb_vaddr.read(); 2757 drsp.valid = true;2758 drsp.error = true;2781 m_drsp.valid = true; 2782 m_drsp.error = true; 2759 2783 } 2760 2784 r_dcache_fsm = DCACHE_IDLE; … … 2829 2853 else // we must load the missing cache line in dcache 2830 2854 { 2855 r_dcache_vci_miss_req = true; 2831 2856 r_dcache_vci_paddr = r_dcache_tlb_paddr.read(); 2832 2857 r_dcache_miss_type = PTE1_MISS; 2833 2858 r_dcache_fsm = DCACHE_MISS_VICTIM; 2834 r_dcache_vci_miss_req = true;2835 2859 2836 2860 #if DEBUG_DCACHE … … 2895 2919 uint32_t pte = r_dcache_tlb_pte_flags.read(); 2896 2920 bool updt = false; 2897 2898 // test the access bits L/R, depending on the physical address locality 2899 // we must use the 10 MSB bits of the 19 bits PPN1 to obtain the target index2900 // we must use the 10 MSB bits of the SRCID to obtain the local index2901 // set the r_dcache_vci_sc_old and r_dcache_vci_sc_new registers if SC required2902 2903 uint32_t target = (pte >> 9) & 0x3FF;2904 uint32_t local = m_srcid_d >> 4;2905 2906 if ( local == target ) // local_address2921 bool local = true; 2922 2923 // We should compute the access locality: 2924 // The PPN MSB bits define the destination cluster index. 2925 // The m_srcid_d MSB bits define the source cluster index. 2926 // The number of bits to compare depends on the number of clusters, 2927 // and can be obtained in the mapping table. 2928 // As long as this computation is not done, all access are local. 2929 2930 if ( local ) // local access 2907 2931 { 2908 2932 if ( not ((pte & PTE_L_MASK) == PTE_L_MASK) ) // we must set the L bit 2909 2933 { 2910 updt = true; 2911 r_dcache_vci_sc_old = r_dcache_tlb_pte_flags.read(); 2912 r_dcache_vci_sc_new = r_dcache_tlb_pte_flags.read() | PTE_L_MASK; 2913 } 2914 } 2915 else // remote address 2934 updt = true; 2935 r_dcache_vci_sc_old = pte; 2936 r_dcache_vci_sc_new = pte | PTE_L_MASK; 2937 pte = pte | PTE_L_MASK; 2938 } 2939 } 2940 else // remote access 2916 2941 { 2917 2942 if ( not ((pte & PTE_R_MASK) == PTE_R_MASK) ) // we must set the R bit 2918 2943 { 2919 2944 updt = true; 2920 r_dcache_vci_sc_old = r_dcache_tlb_pte_flags.read(); 2921 r_dcache_vci_sc_new = r_dcache_tlb_pte_flags.read() | PTE_R_MASK; 2945 r_dcache_vci_sc_old = pte; 2946 r_dcache_vci_sc_new = pte | PTE_R_MASK; 2947 pte = pte | PTE_R_MASK; 2922 2948 } 2923 2949 } … … 2951 2977 } 2952 2978 // next state 2953 if ( updt ) r_dcache_fsm = DCACHE_TLB_ SC_UPDT; // dcache and page table update2979 if ( updt ) r_dcache_fsm = DCACHE_TLB_LR_UPDT; // dcache and page table update 2954 2980 else r_dcache_fsm = DCACHE_TLB_RETURN; // exit sub-fsm 2955 2981 … … 2959 2985 if ( r_dcache_tlb_ins.read() ) 2960 2986 { 2961 std::cout << " <PROC.DCACHE_TLB_PTE1_UPDT> write PTE1 in ITLB :";2962 std::cout << " way = " << std::dec << r_dcache_tlb_way.read()2963 << " / set = " << r_dcache_tlb_set.read() << std::endl;2964 r_itlb.print ();2987 std::cout << " <PROC.DCACHE_TLB_PTE1_UPDT> write PTE1 in ITLB"; 2988 std::cout << " / set = " << std::dec << r_dcache_tlb_set.read() 2989 << " / way = " << r_dcache_tlb_way.read() << std::endl; 2990 r_itlb.printTrace(); 2965 2991 } 2966 2992 else 2967 2993 { 2968 std::cout << " <PROC.DCACHE_TLB_PTE1_UPDT> write PTE1 in DTLB :";2969 std::cout << " way = " << std::dec << r_dcache_tlb_way.read()2970 << " / set = " << r_dcache_tlb_set.read() << std::endl;2971 r_dtlb.print ();2994 std::cout << " <PROC.DCACHE_TLB_PTE1_UPDT> write PTE1 in DTLB"; 2995 std::cout << " / set = " << std::dec << r_dcache_tlb_set.read() 2996 << " / way = " << r_dcache_tlb_way.read() << std::endl; 2997 r_dtlb.printTrace(); 2972 2998 } 2973 2999 … … 3010 3036 r_mmu_detr = MMU_READ_PT2_UNMAPPED; 3011 3037 r_mmu_dbvar = r_dcache_tlb_vaddr.read(); 3012 drsp.valid = true;3013 drsp.error = true;3038 m_drsp.valid = true; 3039 m_drsp.error = true; 3014 3040 } 3015 3041 r_dcache_fsm = DCACHE_IDLE; … … 3047 3073 else // we must load the missing cache line in dcache 3048 3074 { 3075 r_dcache_fsm = DCACHE_MISS_VICTIM; 3076 r_dcache_vci_miss_req = true; 3049 3077 r_dcache_vci_paddr = r_dcache_tlb_paddr.read(); 3050 3078 r_dcache_miss_type = PTE2_MISS; 3051 r_dcache_fsm = DCACHE_MISS_VICTIM;3052 r_dcache_vci_miss_req = true;3053 3079 3054 3080 #if DEBUG_DCACHE … … 3088 3114 #endif 3089 3115 } 3116 3117 #if DEBUG_DCACHE 3118 if ( m_debug_dcache_fsm ) 3119 { 3120 if ( r_dcache_tlb_ins.read() ) 3121 std::cout << " <PROC.DCACHE_TLB_PTE2_SELECT> Select a slot in ITLB:"; 3122 else 3123 std::cout << " <PROC.DCACHE_TLB_PTE2_SELECT> Select a slot in DTLB:"; 3124 std::cout << " way = " << std::dec << way 3125 << " / set = " << set << std::endl; 3126 } 3127 #endif 3090 3128 r_dcache_tlb_way = way; 3091 3129 r_dcache_tlb_set = set; … … 3095 3133 ////////////////////////// 3096 3134 case DCACHE_TLB_PTE2_UPDT: // write a new PTE2 in tlb after testing the L/R bit 3097 // if L/R bit already set exit the sub-fsm3135 // if L/R bit already set, exit the sub-fsm 3098 3136 // if not, the page table must be updated by an atomic access 3099 3137 { 3100 paddr_t nline = r_dcache_p0_paddr.read() >> (uint32_log2(m_dcache_words)+2);3138 paddr_t nline = r_dcache_tlb_paddr.read() >> (uint32_log2(m_dcache_words)+2); 3101 3139 uint32_t pte_flags = r_dcache_tlb_pte_flags.read(); 3102 3140 uint32_t pte_ppn = r_dcache_tlb_pte_ppn.read(); 3103 bool updt = false; // page table update required3104 3105 // test the access bit L/R, depending on the physical address locality 3106 // we must use the 10 MSB bits of the 28 bits PPN2 to obtain the target cluster index3107 // we must use the 10 MSB bits of the SRCID to obtain the local cluster index3108 // set the r_dcache_vci_sc_old and r_dcache_vci_sc_new registers if SC required.3109 3110 uint32_t target = (pte_ppn >> 18) & 0x3FF;3111 uint32_t local = m_srcid_d >> 4;3112 3113 if ( local == target ) // local address3141 bool updt = false; 3142 bool local = true; 3143 3144 // We should compute the access locality: 3145 // The PPN MSB bits define the destination cluster index. 3146 // The m_srcid_d MSB bits define the source cluster index. 3147 // The number of bits to compare depends on the number of clusters, 3148 // and can be obtained in the mapping table. 3149 // As long as this computation is not done, all access are local. 3150 3151 if ( local ) // local access 3114 3152 { 3115 3153 if ( not ((pte_flags & PTE_L_MASK) == PTE_L_MASK) ) // we must set the L bit 3116 3154 { 3155 updt = true; 3156 r_dcache_vci_sc_old = pte_flags; 3157 r_dcache_vci_sc_new = pte_flags | PTE_L_MASK; 3158 pte_flags = pte_flags | PTE_L_MASK; 3159 } 3160 } 3161 else // remote access 3162 { 3163 if ( not ((pte_flags & PTE_R_MASK) == PTE_R_MASK) ) // we must set the R bit 3164 { 3117 3165 updt = true; 3118 r_dcache_vci_sc_old = r_dcache_tlb_pte_flags.read(); 3119 r_dcache_vci_sc_new = r_dcache_tlb_pte_flags.read() | PTE_L_MASK; 3120 } 3121 } 3122 else // remote address 3123 { 3124 if ( not ((pte_flags & PTE_R_MASK) == PTE_R_MASK) ) // we must set the R bit 3125 { 3126 updt = true; 3127 r_dcache_vci_sc_old = r_dcache_tlb_pte_flags.read(); 3128 r_dcache_vci_sc_new = r_dcache_tlb_pte_flags.read() | PTE_R_MASK; 3166 r_dcache_vci_sc_old = pte_flags; 3167 r_dcache_vci_sc_new = pte_flags | PTE_R_MASK; 3168 pte_flags = pte_flags | PTE_R_MASK; 3129 3169 } 3130 3170 } … … 3133 3173 if ( r_dcache_tlb_ins.read() ) 3134 3174 { 3135 r_itlb.write( false, // 2K page3175 r_itlb.write( false, // 4K page 3136 3176 pte_flags, 3137 3177 pte_ppn, … … 3146 3186 else 3147 3187 { 3148 r_dtlb.write( false, // 2K page3188 r_dtlb.write( false, // 4K page 3149 3189 pte_flags, 3150 3190 pte_ppn, … … 3163 3203 if ( r_dcache_tlb_ins.read() ) 3164 3204 { 3165 std::cout << " <PROC.DCACHE_TLB_PTE2_UPDT> write PTE2 in ITLB :";3166 std::cout << " way = " << std::dec << r_dcache_tlb_way.read()3167 << " / set = " << r_dcache_tlb_set.read() << std::endl;3168 r_itlb.print ();3205 std::cout << " <PROC.DCACHE_TLB_PTE2_UPDT> write PTE2 in ITLB"; 3206 std::cout << " / set = " << std::dec << r_dcache_tlb_set.read() 3207 << " / way = " << r_dcache_tlb_way.read() << std::endl; 3208 r_itlb.printTrace(); 3169 3209 } 3170 3210 else 3171 3211 { 3172 std::cout << " <PROC.DCACHE_TLB_PTE2_UPDT> write PTE2 in DTLB :";3173 std::cout << " way = " << std::dec << r_dcache_tlb_way.read()3174 << " / set = " << r_dcache_tlb_set.read() << std::endl;3175 r_dtlb.print ();3212 std::cout << " <PROC.DCACHE_TLB_PTE2_UPDT> write PTE2 in DTLB"; 3213 std::cout << " / set = " << std::dec << r_dcache_tlb_set.read() 3214 << " / way = " << r_dcache_tlb_way.read() << std::endl; 3215 r_dtlb.printTrace(); 3176 3216 } 3177 3217 } 3178 3218 #endif 3179 3219 // next state 3180 if ( updt ) r_dcache_fsm = DCACHE_TLB_ SC_UPDT; // dcache and page table update3220 if ( updt ) r_dcache_fsm = DCACHE_TLB_LR_UPDT; // dcache and page table update 3181 3221 else r_dcache_fsm = DCACHE_TLB_RETURN; // exit sub-fsm 3182 3222 break; 3183 3223 } 3184 3224 //////////////////////// 3185 case DCACHE_TLB_ SC_UPDT: // update the dcache after a tlb miss (L/R bit),3225 case DCACHE_TLB_LR_UPDT: // update the dcache after a tlb miss (L/R bit), 3186 3226 // request a SC transaction to CMD FSM 3187 3227 { … … 3189 3229 if ( m_debug_dcache_fsm ) 3190 3230 { 3191 std::cout << " <PROC.DCACHE_TLB_ SC_UPDT> Update dcache: (L/R) bit" << std::endl;3231 std::cout << " <PROC.DCACHE_TLB_LR_UPDT> Update dcache: (L/R) bit" << std::endl; 3192 3232 } 3193 3233 #endif … … 3200 3240 #endif 3201 3241 // r_dcache_vci_sc_old & r_dcache_vci_sc_new registers are already set 3242 r_dcache_vci_paddr = r_dcache_tlb_paddr.read(); 3202 3243 r_dcache_vci_sc_req = true; 3203 r_dcache_fsm = DCACHE_TLB_ SC_WAIT;3244 r_dcache_fsm = DCACHE_TLB_LR_WAIT; 3204 3245 break; 3205 3246 } 3206 3247 //////////////////////// 3207 case DCACHE_TLB_ SC_WAIT: // Waiting a response to SC transaction.3248 case DCACHE_TLB_LR_WAIT: // Waiting a response to SC transaction. 3208 3249 // We consume the response in rsp FIFO, 3209 3250 // and exit the sub-fsm, but we don't … … 3224 3265 if ( r_vci_rsp_data_error.read() ) // bus error 3225 3266 { 3226 std::cout << "BUS ERROR in DCACHE_TLB_ SC_WAIT state" << std::endl;3267 std::cout << "BUS ERROR in DCACHE_TLB_LR_WAIT state" << std::endl; 3227 3268 std::cout << "This should not happen in this state" << std::endl; 3228 3269 exit(0); … … 3233 3274 if ( m_debug_dcache_fsm ) 3234 3275 { 3235 std::cout << " <PROC.DCACHE_TLB_ SC_WAIT> SC response received" << std::endl;3276 std::cout << " <PROC.DCACHE_TLB_LR_WAIT> SC response received" << std::endl; 3236 3277 } 3237 3278 #endif … … 3261 3302 r_dtlb.flush(); 3262 3303 r_dcache_fsm = DCACHE_IDLE; 3263 drsp.valid = true;3304 m_drsp.valid = true; 3264 3305 } 3265 3306 break; … … 3279 3320 if ( r_wbuf.empty() ) 3280 3321 { 3281 drsp.valid = true;3322 m_drsp.valid = true; 3282 3323 r_dcache_fsm = DCACHE_IDLE; 3283 3324 } … … 3311 3352 { 3312 3353 r_dcache_fsm = DCACHE_IDLE; 3313 drsp.valid = true;3354 m_drsp.valid = true; 3314 3355 } 3315 3356 break; … … 3351 3392 } 3352 3393 r_dcache_fsm = DCACHE_IDLE; 3353 drsp.valid = true;3394 m_drsp.valid = true; 3354 3395 } 3355 3396 } … … 3361 3402 r_dtlb.inval(r_dcache_p0_wdata.read()); 3362 3403 r_dcache_fsm = DCACHE_IDLE; 3363 drsp.valid = true;3404 m_drsp.valid = true; 3364 3405 break; 3365 3406 } … … 3429 3470 { 3430 3471 r_dcache_fsm = DCACHE_IDLE; 3431 drsp.valid = true;3472 m_drsp.valid = true; 3432 3473 } 3433 3474 break; … … 3480 3521 { 3481 3522 r_dcache_fsm = DCACHE_IDLE; 3482 drsp.valid = true;3523 m_drsp.valid = true; 3483 3524 } 3484 3525 } … … 3489 3530 { 3490 3531 r_dcache_fsm = DCACHE_IDLE; 3491 drsp.valid = true;3532 m_drsp.valid = true; 3492 3533 break; 3493 3534 } … … 3593 3634 r_mmu_detr = MMU_READ_DATA_ILLEGAL_ACCESS; 3594 3635 r_mmu_dbvar = r_dcache_p0_vaddr.read(); 3595 drsp.valid = true;3596 drsp.error = true;3636 m_drsp.valid = true; 3637 m_drsp.error = true; 3597 3638 r_dcache_fsm = DCACHE_IDLE; 3598 3639 break; … … 3611 3652 r_mmu_detr = MMU_READ_PT1_ILLEGAL_ACCESS; 3612 3653 r_mmu_dbvar = r_dcache_tlb_vaddr.read(); 3613 drsp.valid = true;3614 drsp.error = true;3654 m_drsp.valid = true; 3655 m_drsp.error = true; 3615 3656 } 3616 3657 r_dcache_fsm = DCACHE_IDLE; … … 3630 3671 r_mmu_detr = MMU_READ_PT2_ILLEGAL_ACCESS; 3631 3672 r_mmu_dbvar = r_dcache_tlb_vaddr.read(); 3632 drsp.valid = true;3633 drsp.error = true;3673 m_drsp.valid = true; 3674 m_drsp.error = true; 3634 3675 } 3635 3676 r_dcache_fsm = DCACHE_IDLE; … … 3755 3796 { 3756 3797 r_mmu_detr = MMU_READ_DATA_ILLEGAL_ACCESS; 3757 r_mmu_dbvar = dreq.addr;3798 r_mmu_dbvar = m_dreq.addr; 3758 3799 r_vci_rsp_data_error = false; 3759 drsp.error = true;3760 drsp.valid = true;3800 m_drsp.error = true; 3801 m_drsp.valid = true; 3761 3802 r_dcache_fsm = DCACHE_IDLE; 3762 3803 break; … … 3767 3808 r_dcache_fsm = DCACHE_IDLE; 3768 3809 // we acknowledge the processor request if it has not been modified 3769 if ( dreq.valid and (dreq.addr == r_dcache_p0_vaddr.read()) )3770 { 3771 drsp.valid = true;3772 drsp.rdata = r_vci_rsp_fifo_dcache.read();3810 if ( m_dreq.valid and (m_dreq.addr == r_dcache_p0_vaddr.read()) ) 3811 { 3812 m_drsp.valid = true; 3813 m_drsp.rdata = r_vci_rsp_fifo_dcache.read(); 3773 3814 } 3774 3815 } 3775 3816 break; 3776 3817 } 3777 //////////////////////////// 3778 case DCACHE_WRITE_TLB_DIRTY: // set PTE dirty bit in dtlb 3818 //////////////////// 3819 case DCACHE_SC_WAIT: // waiting VCI response after a processor SC request 3820 // a long write is launched if dirty bit must be set 3821 { 3822 // external coherence request 3823 if ( r_tgt_dcache_req.read() ) 3824 { 3825 r_dcache_fsm = DCACHE_CC_CHECK; 3826 r_dcache_fsm_save = r_dcache_fsm; 3827 break; 3828 } 3829 3830 if ( r_vci_rsp_data_error.read() ) // bus error 3831 { 3832 r_mmu_detr = MMU_READ_DATA_ILLEGAL_ACCESS; 3833 r_mmu_dbvar = m_dreq.addr; 3834 r_vci_rsp_data_error = false; 3835 m_drsp.error = true; 3836 m_drsp.valid = true; 3837 r_dcache_fsm = DCACHE_IDLE; 3838 break; 3839 } 3840 else if ( r_vci_rsp_fifo_dcache.rok() ) // response available 3841 { 3842 bool sc_success = (r_vci_rsp_fifo_dcache.read() == 0); 3843 vci_rsp_fifo_dcache_get = true; 3844 3845 if ( sc_success and not r_dcache_p0_tlb_dirty.read() ) // Dirty bit must be set 3846 { 3847 // The PTE physical address is the concatenation of the nline value (from dtlb), 3848 // with the word index (obtained from the proper bits of the virtual address) 3849 if ( r_dcache_p0_tlb_big.read() ) // PTE1 3850 { 3851 r_dcache_p2_pte_paddr = (paddr_t)(r_dcache_p0_tlb_nline.read()*(m_dcache_words<<2)) | 3852 (paddr_t)((r_dcache_p0_vaddr.read()>>19) & 0x3c); 3853 } 3854 else // PTE2 3855 { 3856 r_dcache_p2_pte_paddr = (paddr_t)(r_dcache_p0_tlb_nline.read()*(m_dcache_words<<2)) | 3857 (paddr_t)((r_dcache_p0_vaddr.read()>>9) & 0x38); 3858 } 3859 r_dcache_p2_sc_success = sc_success; 3860 r_dcache_p2_way = r_dcache_p0_tlb_way.read(); 3861 r_dcache_p2_set = r_dcache_p0_tlb_set.read(); 3862 r_dcache_fsm = DCACHE_DIRTY_TLB_SET; 3863 3864 } 3865 else 3866 { 3867 m_drsp.valid = true; 3868 m_drsp.rdata = r_vci_rsp_fifo_dcache.read(); 3869 r_dcache_fsm = DCACHE_IDLE; 3870 } 3871 } 3872 break; 3873 } 3874 ////////////////////////// 3875 case DCACHE_DIRTY_TLB_SET: // Enter this sub_fsm in case of long write: 3876 // - in case of WRITE request (r_dcache_p2_type_sc == false) 3877 // - in case of SC request (r_dcache_p2_type_sc == true) 3878 // Inputs arguments are: 3879 // - r_dcache_p2_way, 3880 // - r_dcache_p2_set, 3881 // - r_dcache_p2_pte_paddr, 3882 // - r_dcache_p2_type_sc, 3883 // - r_dcache_p2_sc_success, 3884 // In this first state, we set PTE dirty bit in dtlb 3779 3885 // and get PTE in dcache 3780 3886 { 3781 3887 // set dirty bit in dtlb 3782 r_dtlb.set_dirty( r_dcache_p2_ tlb_way.read(),3783 r_dcache_p2_ tlb_set.read() );3888 r_dtlb.set_dirty( r_dcache_p2_way.read(), 3889 r_dcache_p2_set.read() ); 3784 3890 3785 3891 // get PTE in dcache … … 3801 3907 if ( m_debug_dcache_fsm ) 3802 3908 { 3803 std::cout << " <PROC.DCACHE_ WRITE_TLB_DIRTY> Set dirty bit in dtlb:" << std::dec3804 << " / tlb_ way = " << r_dcache_p2_tlb_way.read()3805 << " / tlb_ set = " << r_dcache_p2_tlb_set.read() << std::endl;3806 r_dtlb.print ();3807 std::cout << " Get PTE in dcache:" << std::hex3808 << " paddr= " << r_dcache_p2_pte_paddr.read()3909 std::cout << " <PROC.DCACHE_DIRTY_TLB_SET> Set dirty bit in dtlb:" << std::dec 3910 << " / tlb_set = " << r_dcache_p2_set.read() 3911 << " / tlb_way = " << r_dcache_p2_way.read() << std::endl; 3912 r_dtlb.printTrace(); 3913 std::cout << " and get PTE in dcache" << std::hex 3914 << " / PADDR = " << r_dcache_p2_pte_paddr.read() 3809 3915 << " / PTE = " << pte << std::dec 3810 << " / dcache_way = " << way3811 << " / dcache_set = " << set<< std::endl;3916 << " / set = " << set 3917 << " / way = " << way << std::endl; 3812 3918 } 3813 3919 #endif 3814 assert( hit and "error in DCACHE_ WRITE_TLB_DIRTY: the PTE should be in dcache" );3815 3816 r_dcache_p2_ pte_way= way; // register pte way in dcache3817 r_dcache_p2_ pte_set= set; // register pte set in dcache;3818 r_dcache_p2_ pte_word= word; // register pte word in dcache;3819 r_dcache_p2_pte 3820 r_dcache_fsm = DCACHE_ WRITE_CACHE_DIRTY;3821 break; 3822 } 3823 //////////////////////////// //3824 case DCACHE_ WRITE_CACHE_DIRTY: // set PTE dirty bit in dcache3920 assert( hit and "error in DCACHE_DIRTY_TLB_SET: the PTE should be in dcache" ); 3921 3922 r_dcache_p2_way = way; // register pte way in dcache 3923 r_dcache_p2_set = set; // register pte set in dcache; 3924 r_dcache_p2_word = word; // register pte word in dcache; 3925 r_dcache_p2_pte_value = pte; // register pte value 3926 r_dcache_fsm = DCACHE_DIRTY_CACHE_SET; 3927 break; 3928 } 3929 //////////////////////////// 3930 case DCACHE_DIRTY_CACHE_SET: // set PTE dirty bit in dcache 3825 3931 // request SC tranansaction to CMD FSM 3826 3932 { 3827 3933 // set PTE dirty bit in dcache 3828 r_dcache.write( r_dcache_p2_ pte_way.read(),3829 r_dcache_p2_ pte_set.read(),3830 r_dcache_p2_ pte_word.read(),3831 r_dcache_p2_pte .read() | PTE_D_MASK,3934 r_dcache.write( r_dcache_p2_way.read(), 3935 r_dcache_p2_set.read(), 3936 r_dcache_p2_word.read(), 3937 r_dcache_p2_pte_value.read() | PTE_D_MASK, 3832 3938 0xF ); 3833 3939 … … 3835 3941 m_cpt_dcache_data_write++; 3836 3942 #endif 3837 3943 // request sc transaction to CMD_FSM 3944 r_dcache_vci_sc_req = true; 3945 r_dcache_vci_paddr = r_dcache_p2_pte_paddr.read(); 3946 r_dcache_vci_sc_old = r_dcache_p2_pte_value.read(); 3947 r_dcache_vci_sc_new = r_dcache_p2_pte_value.read() | PTE_D_MASK; 3948 r_dcache_fsm = DCACHE_DIRTY_SC_WAIT; 3949 3838 3950 #if DEBUG_DCACHE 3839 3951 if ( m_debug_dcache_fsm ) 3840 3952 { 3841 std::cout << " <PROC.DCACHE_ WRITE_CACHE_DIRTY> Set PTE dirty bit in dcache"3842 << " / way = " << r_dcache_p2_ pte_way.read()3843 << " / set = " << r_dcache_p2_ pte_set.read()3844 << " / word = " << r_dcache_p2_ pte_word.read() << std::endl;3845 std::cout << " Request SC transaction"3846 << " / address = " << "bloup"3847 << " / old = " << r_dcache_p2_pte .read()3848 << " / new = " << (r_dcache_p2_pte .read() | PTE_D_MASK) << std::endl;3953 std::cout << " <PROC.DCACHE_DIRTY_CACHE_SET> Set PTE dirty bit in dcache" 3954 << " / way = " << r_dcache_p2_way.read() 3955 << " / set = " << r_dcache_p2_set.read() 3956 << " / word = " << r_dcache_p2_word.read() << std::endl; 3957 std::cout << " and request SC transaction for dirty bit update" 3958 << " / address = " << r_dcache_p2_pte_paddr.read() 3959 << " / old = " << r_dcache_p2_pte_value.read() 3960 << " / new = " << (r_dcache_p2_pte_value.read() | PTE_D_MASK) << std::endl; 3849 3961 } 3850 3962 #endif 3851 // request sc transaction to CMD_FSM3852 r_dcache_vci_sc_req = true;3853 r_dcache_vci_sc_old = r_dcache_p2_pte.read();3854 r_dcache_vci_sc_new = r_dcache_p2_pte.read() | PTE_D_MASK;3855 r_dcache_fsm = DCACHE_WRITE_SC_WAIT;3856 3963 break; 3857 3964 } 3858 3965 ////////////////////////// 3859 case DCACHE_ WRITE_SC_WAIT: // wait completion of SC3966 case DCACHE_DIRTY_SC_WAIT: // wait completion of SC for PTE Dirty bit 3860 3967 // if atomic, write completed : return to IDLE state 3861 // else, makes an uncacheable readto retry the SC3968 // else, read the mofified PTE to retry the SC 3862 3969 { 3863 3970 // external coherence request … … 3871 3978 if ( r_vci_rsp_data_error.read() ) // bus error 3872 3979 { 3873 r_mmu_detr = MMU_WRITE_PT2_ILLEGAL_ACCESS; 3874 r_mmu_dbvar = r_dcache_p2_vaddr; 3875 drsp.valid = true; 3876 drsp.error = true; 3877 r_dcache_fsm = DCACHE_IDLE; 3878 break; 3980 std::cout << "BUS ERROR in DCACHE_DIRTY_SC_WAIT state" << std::endl; 3981 std::cout << "This should not happen in this state" << std::endl; 3982 exit(0); 3879 3983 } 3880 3984 else if ( r_vci_rsp_fifo_dcache.rok() ) // response available 3881 3985 { 3882 if ( r_vci_rsp_fifo_dcache.read() == 0 ) // atomic 3883 { 3884 drsp.valid = true; // acknowledge the initial write 3885 r_dcache_fsm = DCACHE_IDLE; 3886 } 3887 else 3986 vci_rsp_fifo_dcache_get = true; 3987 if ( r_vci_rsp_fifo_dcache.read() == 0 ) // exit if dirty bit update atomic 3988 { 3989 if ( r_dcache_p2_type_sc.read() ) // long write for SC request 3990 { 3991 m_drsp.valid = true; 3992 m_drsp.rdata = ( r_dcache_p2_sc_success.read() ? 0 : 1 ); 3993 r_dcache_fsm = DCACHE_IDLE; 3994 } 3995 else // long write for WRITE request 3996 { 3997 r_dcache_fsm = DCACHE_IDLE; 3998 } 3999 } 4000 else // retry if dirty bit update failed 3888 4001 { 3889 4002 r_dcache_vci_paddr = r_dcache_p2_pte_paddr; 3890 4003 r_dcache_vci_unc_req = true; 3891 4004 r_dcache_vci_unc_be = 0xF; 3892 r_dcache_fsm = DCACHE_ WRITE_UNC_WAIT;4005 r_dcache_fsm = DCACHE_DIRTY_UNC_WAIT; 3893 4006 } 3894 4007 } … … 3896 4009 } 3897 4010 /////////////////////////// 3898 case DCACHE_ WRITE_UNC_WAIT: // wait completion of uncacheableread3899 // in case of success weretry a SC request to4011 case DCACHE_DIRTY_UNC_WAIT: // wait completion of PTE read 4012 // and retry a SC request to 3900 4013 // set the dirty bit in the PTE 3901 4014 { … … 3910 4023 if ( r_vci_rsp_data_error.read() ) // bus error 3911 4024 { 3912 r_mmu_detr = MMU_READ_PT2_ILLEGAL_ACCESS; 3913 r_mmu_dbvar = r_dcache_p2_vaddr; 3914 drsp.valid = true; 3915 drsp.error = true; 3916 r_dcache_fsm = DCACHE_IDLE; 3917 break; 4025 std::cout << "BUS ERROR in DCACHE_DIRTY_UNC_WAIT state" << std::endl; 4026 std::cout << "This should not happen in this state" << std::endl; 4027 exit(0); 3918 4028 } 3919 4029 if ( r_vci_rsp_fifo_dcache.rok() ) // PTE available … … 3922 4032 r_dcache_vci_sc_old = r_vci_rsp_fifo_dcache.read(); 3923 4033 r_dcache_vci_sc_new = r_vci_rsp_fifo_dcache.read() | PTE_D_MASK; 3924 r_dcache_fsm = DCACHE_ WRITE_SC_WAIT;4034 r_dcache_fsm = DCACHE_DIRTY_SC_WAIT; 3925 4035 } 3926 4036 break; … … 4169 4279 << " / set = " << set 4170 4280 << " / way = " << way << std::endl; 4171 r_itlb.print ();4281 r_itlb.printTrace(); 4172 4282 } 4173 4283 #endif … … 4186 4296 << " / set = " << set 4187 4297 << " / way = " << way << std::endl; 4188 r_dtlb.print ();4298 r_dtlb.printTrace(); 4189 4299 } 4190 4300 #endif … … 4207 4317 } // end switch r_dcache_fsm 4208 4318 4209 4210 //////////////////// save DREQ and DRSP fields for print_trace() ////////////////4211 m_dreq_valid = dreq.valid;4212 m_dreq_addr = dreq.addr;4213 m_dreq_mode = dreq.mode;4214 m_dreq_type = dreq.type;4215 m_dreq_wdata = dreq.wdata;4216 m_dreq_be = dreq.be;4217 4218 m_drsp_valid = drsp.valid;4219 m_drsp_rdata = drsp.rdata;4220 m_drsp_error = drsp.error;4221 4222 4319 ///////////////// wbuf update ////////////////////////////////////////////////////// 4223 4320 r_wbuf.update(); 4224 4321 4225 /////////// test processor frozen /////////////////////////////////////////////4322 //////////////// test processor frozen ///////////////////////////////////////////// 4226 4323 // The simulation exit if the number of consecutive frozen cycles 4227 4324 // is larger than the m_max_frozen_cycles (constructor parameter) 4228 if ( ( ireq.valid and not irsp.valid) or (dreq.valid and notdrsp.valid) )4325 if ( (m_ireq.valid and not m_irsp.valid) or (m_dreq.valid and not m_drsp.valid) ) 4229 4326 { 4230 4327 m_cpt_frz_cycles++; // used for instrumentation … … 4248 4345 uint32_t it = 0; 4249 4346 for (size_t i=0; i<(size_t)iss_t::n_irq; i++) if(p_irq[i].read()) it |= (1<<i); 4250 r_iss.executeNCycles(1, irsp,drsp, it);4347 r_iss.executeNCycles(1, m_irsp, m_drsp, it); 4251 4348 } 4252 4349
Note: See TracChangeset
for help on using the changeset viewer.