Changeset 468 for trunk/modules
- Timestamp:
- Jul 24, 2013, 8:47:40 AM (11 years ago)
- Location:
- trunk/modules
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/modules/vci_cc_vcache_wrapper
-
Property
svn:mergeinfo
set to
/branches/v5/modules/vci_cc_vcache_wrapper merged eligible
-
Property
svn:mergeinfo
set to
-
trunk/modules/vci_cc_vcache_wrapper/caba/metadata/vci_cc_vcache_wrapper.sd
r421 r468 37 37 ports = [ 38 38 Port('caba:vci_initiator', 'p_vci'), 39 Port('caba:dspin_input', 'p_dspin_ in',39 Port('caba:dspin_input', 'p_dspin_m2p', 40 40 dspin_data_size = parameter.Reference('dspin_in_width')), 41 Port('caba:dspin_output', 'p_dspin_ out',41 Port('caba:dspin_output', 'p_dspin_p2m', 42 42 dspin_data_size = parameter.Reference('dspin_out_width')), 43 Port('caba:dspin_input', 'p_dspin_clack', 44 dspin_data_size = parameter.Reference('dspin_in_width')), 43 45 Port('caba:bit_in','p_irq', parameter.Constant('n_irq')), 44 46 Port('caba:bit_in', 'p_resetn', auto = 'resetn'), -
trunk/modules/vci_cc_vcache_wrapper/caba/source/include/vci_cc_vcache_wrapper.h
r432 r468 89 89 // handling coherence requests 90 90 ICACHE_CC_CHECK, 91 ICACHE_CC_UPDT, 91 92 ICACHE_CC_INVAL, 92 ICACHE_CC_UPDT,93 ICACHE_CC_BROADCAST,94 ICACHE_CC_SEND_WAIT,95 93 }; 96 94 … … 138 136 // handling coherence requests 139 137 DCACHE_CC_CHECK, 138 DCACHE_CC_UPDT, 140 139 DCACHE_CC_INVAL, 141 DCACHE_CC_UPDT,142 DCACHE_CC_BROADCAST,143 DCACHE_CC_SEND_WAIT,144 140 // handling TLB inval (after a coherence or XTN request) 145 141 DCACHE_INVAL_TLB_SCAN, … … 173 169 { 174 170 CC_RECEIVE_IDLE, 175 CC_RECEIVE_CLACK,176 171 CC_RECEIVE_BRDCAST_HEADER, 177 172 CC_RECEIVE_BRDCAST_NLINE, 178 CC_RECEIVE_INVAL_HEADER, 179 CC_RECEIVE_INVAL_NLINE, 180 CC_RECEIVE_UPDT_HEADER, 181 CC_RECEIVE_UPDT_NLINE, 182 CC_RECEIVE_UPDT_DATA, 173 CC_RECEIVE_INS_INVAL_HEADER, 174 CC_RECEIVE_INS_INVAL_NLINE, 175 CC_RECEIVE_INS_UPDT_HEADER, 176 CC_RECEIVE_INS_UPDT_NLINE, 177 CC_RECEIVE_INS_UPDT_DATA, 178 CC_RECEIVE_DATA_INVAL_HEADER, 179 CC_RECEIVE_DATA_INVAL_NLINE, 180 CC_RECEIVE_DATA_UPDT_HEADER, 181 CC_RECEIVE_DATA_UPDT_NLINE, 182 CC_RECEIVE_DATA_UPDT_DATA, 183 183 }; 184 184 … … 285 285 286 286 public: 287 sc_in<bool> p_clk; 288 sc_in<bool> p_resetn; 289 sc_in<bool> p_irq[iss_t::n_irq]; 290 soclib::caba::VciInitiator<vci_param> p_vci; 291 soclib::caba::DspinInput <dspin_in_width> p_dspin_in; 292 soclib::caba::DspinOutput<dspin_out_width> p_dspin_out; 287 sc_in<bool> p_clk; 288 sc_in<bool> p_resetn; 289 sc_in<bool> p_irq[iss_t::n_irq]; 290 soclib::caba::VciInitiator<vci_param> p_vci; 291 soclib::caba::DspinInput<dspin_in_width> p_dspin_m2p; 292 soclib::caba::DspinOutput<dspin_out_width> p_dspin_p2m; 293 soclib::caba::DspinInput<dspin_in_width> p_dspin_clack; 293 294 294 295 private: … … 371 372 sc_signal<bool> r_icache_cc_need_write; // activate the cache for writing 372 373 374 // coherence clack handling 375 sc_signal<bool> r_icache_clack_req; // clack request 376 sc_signal<size_t> r_icache_clack_way; // clack way 377 sc_signal<size_t> r_icache_clack_set; // clack set 378 373 379 // icache flush handling 374 380 sc_signal<size_t> r_icache_flush_count; // slot counter used for cache flush … … 444 450 sc_signal<bool> r_dcache_cc_need_write; // activate the cache for writing 445 451 452 // coherence clack handling 453 sc_signal<bool> r_dcache_clack_req; // clack request 454 sc_signal<size_t> r_dcache_clack_way; // clack way 455 sc_signal<size_t> r_dcache_clack_set; // clack set 456 446 457 // dcache flush handling 447 458 sc_signal<size_t> r_dcache_flush_count; // slot counter used for cache flush … … 537 548 sc_signal<paddr_t> r_cc_receive_dcache_nline; // cache line physical address 538 549 550 /////////////////////////////////// 551 // DSPIN CLACK INTERFACE REGISTER 552 /////////////////////////////////// 553 sc_signal<bool> r_dspin_clack_req; 554 sc_signal<uint64_t> r_dspin_clack_flit; 555 539 556 ////////////////////////////////////////////////////////////////// 540 557 // processor, write buffer, caches , TLBs -
trunk/modules/vci_cc_vcache_wrapper/caba/source/src/vci_cc_vcache_wrapper.cpp
r434 r468 62 62 63 63 "ICACHE_CC_CHECK", 64 "ICACHE_CC_UPDT", 64 65 "ICACHE_CC_INVAL", 65 "ICACHE_CC_UPDT",66 "ICACHE_CC_BROADCAST",67 "ICACHE_CC_SEND_WAIT",68 66 }; 69 67 … … 110 108 111 109 "DCACHE_CC_CHECK", 110 "DCACHE_CC_UPDT", 112 111 "DCACHE_CC_INVAL", 113 "DCACHE_CC_UPDT",114 "DCACHE_CC_BROADCAST",115 "DCACHE_CC_SEND_WAIT",116 112 117 113 "DCACHE_INVAL_TLB_SCAN", … … 160 156 const char *cc_receive_fsm_state_str[] = { 161 157 "CC_RECEIVE_IDLE", 162 "CC_RECEIVE_CLACK",163 158 "CC_RECEIVE_BRDCAST_HEADER", 164 159 "CC_RECEIVE_BRDCAST_NLINE", 165 "CC_RECEIVE_INVAL_HEADER", 166 "CC_RECEIVE_INVAL_NLINE", 167 "CC_RECEIVE_UPDT_HEADER", 168 "CC_RECEIVE_UPDT_NLINE", 169 "CC_RECEIVE_UPDT_DATA", 160 "CC_RECEIVE_INS_INVAL_HEADER", 161 "CC_RECEIVE_INS_INVAL_NLINE", 162 "CC_RECEIVE_INS_UPDT_HEADER", 163 "CC_RECEIVE_INS_UPDT_NLINE", 164 "CC_RECEIVE_INS_UPDT_DATA", 165 "CC_RECEIVE_DATA_INVAL_HEADER", 166 "CC_RECEIVE_DATA_INVAL_NLINE", 167 "CC_RECEIVE_DATA_UPDT_HEADER", 168 "CC_RECEIVE_DATA_UPDT_NLINE", 169 "CC_RECEIVE_DATA_UPDT_DATA", 170 170 }; 171 171 … … 216 216 p_resetn("p_resetn"), 217 217 p_vci("p_vci"), 218 p_dspin_in("p_dspin_in"), 219 p_dspin_out("p_dspin_out"), 218 p_dspin_m2p("p_dspin_m2p"), 219 p_dspin_p2m("p_dspin_p2m"), 220 p_dspin_clack("p_dspin_clack"), 220 221 221 222 m_cacheability_table( mtd.getCacheabilityTable() ), … … 755 756 r_icache_cc_send_req = false; 756 757 758 r_icache_clack_req = false; 759 757 760 // No pending write in pipeline 758 761 r_dcache_wbuf_req = false; … … 775 778 r_dcache_cc_send_req = false; 776 779 780 r_dcache_clack_req = false; 781 777 782 // No request from CC_RECEIVE FSM to ICACHE/DCACHE FSMs 778 783 r_cc_receive_icache_req = false; … … 789 794 r_icache_miss_inval = false; 790 795 r_dcache_miss_inval = false; 796 797 r_dspin_clack_req = false; 791 798 792 799 // No signalisation of errors … … 1014 1021 // 5/ uncacheable read miss => ICACHE_UNC_REQ 1015 1022 { 1023 // coherence clack interrupt 1024 if ( r_icache_clack_req.read() ) 1025 { 1026 r_icache_fsm = ICACHE_CC_CHECK; 1027 r_icache_fsm_save = r_icache_fsm.read(); 1028 break; 1029 } 1030 1016 1031 // coherence interrupt 1017 1032 if ( r_cc_receive_icache_req.read() ) … … 1242 1257 // external coherence request are accepted in this state. 1243 1258 { 1259 // coherence clack interrupt 1260 if ( r_icache_clack_req.read() ) 1261 { 1262 r_icache_fsm = ICACHE_CC_CHECK; 1263 r_icache_fsm_save = r_icache_fsm.read(); 1264 break; 1265 } 1266 1244 1267 // coherence interrupt 1245 1268 if ( r_cc_receive_icache_req.read() ) … … 1286 1309 // A cleanup request is generated for each valid line 1287 1310 { 1311 // coherence clack interrupt 1312 if ( r_icache_clack_req.read() ) 1313 { 1314 r_icache_fsm = ICACHE_CC_CHECK; 1315 r_icache_fsm_save = r_icache_fsm.read(); 1316 break; 1317 } 1318 1288 1319 // coherence request (from CC_RECEIVE FSM) 1289 1320 if ( r_cc_receive_icache_req.read() ) … … 1484 1515 { 1485 1516 if (m_ireq.valid) m_cost_ins_miss_frz++; 1517 1518 // coherence clack interrupt 1519 if ( r_icache_clack_req.read() ) 1520 { 1521 r_icache_fsm = ICACHE_CC_CHECK; 1522 r_icache_fsm_save = r_icache_fsm.read(); 1523 break; 1524 } 1486 1525 1487 1526 // coherence interrupt … … 1574 1613 if (m_ireq.valid) m_cost_ins_miss_frz++; 1575 1614 1615 // coherence clack interrupt 1616 if ( r_icache_clack_req.read() ) 1617 { 1618 r_icache_fsm = ICACHE_CC_CHECK; 1619 r_icache_fsm_save = r_icache_fsm.read(); 1620 break; 1621 } 1622 1576 1623 // coherence interrupt 1577 1624 if ( r_cc_receive_icache_req.read() ) … … 1645 1692 if ( m_ireq.valid ) m_cost_ins_miss_frz++; 1646 1693 1694 // coherence clack interrupt 1695 if ( r_icache_clack_req.read() ) 1696 { 1697 r_icache_fsm = ICACHE_CC_CHECK; 1698 r_icache_fsm_save = r_icache_fsm.read(); 1699 break; 1700 } 1701 1647 1702 // coherence interrupt 1648 1703 if ( r_cc_receive_icache_req.read() ) … … 1716 1771 case ICACHE_UNC_WAIT: // waiting a response to an uncacheable read from VCI_RSP FSM 1717 1772 { 1773 // coherence clack interrupt 1774 if ( r_icache_clack_req.read() ) 1775 { 1776 r_icache_fsm = ICACHE_CC_CHECK; 1777 r_icache_fsm_save = r_icache_fsm.read(); 1778 break; 1779 } 1780 1718 1781 // coherence interrupt 1719 1782 if ( r_cc_receive_icache_req.read() ) … … 1756 1819 paddr_t mask = ~((m_icache_words<<2)-1); 1757 1820 1758 if (r_cc_receive_icache_type.read() == CC_TYPE_CLACK) 1759 // We switch the directory slot to EMPTY state 1760 // and reset r_icache_miss_clack if the cleanup ack 1761 // is matching a pending miss 1762 { 1763 1764 if ( m_ireq.valid ) m_cost_ins_miss_frz++; 1765 1766 #ifdef INSTRUMENTATION 1767 m_cpt_icache_dir_write++; 1768 #endif 1769 r_icache.write_dir( 0, 1770 r_cc_receive_icache_way.read(), 1771 r_cc_receive_icache_set.read(), 1772 CACHE_SLOT_STATE_EMPTY); 1773 1774 if ( (r_icache_miss_set.read() == r_cc_receive_icache_set.read()) and 1775 (r_icache_miss_way.read() == r_cc_receive_icache_way.read()) ) 1776 r_icache_miss_clack = false; 1777 1778 r_icache_fsm = r_icache_fsm_save.read() ; 1779 r_cc_receive_icache_req = false; 1780 1781 #if DEBUG_ICACHE 1782 if ( m_debug_activated ) 1783 { 1784 std::cout << " <PROC " << name() 1785 << " ICACHE_CC_CHECK> CC_TYPE_CLACK slot returns to empty state" 1786 << " set = " << r_cc_receive_icache_set.read() 1787 << " / way = " << r_cc_receive_icache_way.read() << std::endl; 1788 } 1789 #endif 1790 } 1791 else if( ((r_icache_fsm_save.read() == ICACHE_MISS_SELECT) or 1792 (r_icache_fsm_save.read() == ICACHE_MISS_WAIT) or 1793 (r_icache_fsm_save.read() == ICACHE_MISS_DIR_UPDT)) and 1794 ((r_icache_vci_paddr.read() & mask) == (paddr & mask)) ) // matching 1821 1822 // Match between MISS address and CC address 1823 // note: In the same cycle we can handle a CLACK and a MISS match 1824 // because the CLACK access the directory but the MISS match dont. 1825 if (r_cc_receive_icache_req.read() and 1826 ((r_icache_fsm_save.read() == ICACHE_MISS_SELECT ) or 1827 (r_icache_fsm_save.read() == ICACHE_MISS_WAIT ) or 1828 (r_icache_fsm_save.read() == ICACHE_MISS_DIR_UPDT)) and 1829 ((r_icache_vci_paddr.read() & mask) == (paddr & mask)) ) // matching 1795 1830 { 1796 1831 // signaling the matching … … 1803 1838 r_icache_fsm = ICACHE_CC_UPDT; 1804 1839 r_icache_cc_word = r_cc_receive_word_idx.read(); 1840 1805 1841 // just pop the fifo , don't write in icache 1806 1842 r_icache_cc_need_write = false; … … 1821 1857 #endif 1822 1858 } 1823 else // no match 1824 { 1825 int state = 0; 1826 size_t way = 0; 1827 size_t set = 0; 1828 size_t word = 0; 1859 1860 // CLACK handler 1861 // We switch the directory slot to EMPTY state 1862 // and reset r_icache_miss_clack if the cleanup ack 1863 // is matching a pending miss. 1864 if ( r_icache_clack_req.read() ) 1865 { 1866 1867 if ( m_ireq.valid ) m_cost_ins_miss_frz++; 1829 1868 1830 1869 #ifdef INSTRUMENTATION 1831 m_cpt_icache_dir_read++; 1832 #endif 1833 r_icache.read_dir(paddr, 1834 &state, 1835 &way, 1836 &set, 1837 &word); 1838 1839 r_icache_cc_way = way; 1840 r_icache_cc_set = set; 1841 1842 if ( state == CACHE_SLOT_STATE_VALID) // hit 1843 { 1844 // need to update the cache state 1845 r_icache_cc_need_write = true; 1846 if (r_cc_receive_icache_type.read() == CC_TYPE_UPDT) // hit update 1847 { 1848 r_icache_fsm = ICACHE_CC_UPDT; 1849 r_icache_cc_word = r_cc_receive_word_idx.read(); 1850 } 1851 else if (r_cc_receive_icache_type.read() == CC_TYPE_INVAL) // hit inval 1852 { 1853 r_icache_fsm = ICACHE_CC_INVAL; 1854 } 1855 else if (r_cc_receive_icache_type.read() == CC_TYPE_BRDCAST) // hit broadcast 1856 { 1857 r_icache_fsm = ICACHE_CC_BROADCAST; 1858 } 1859 } 1860 else // miss 1861 { 1862 // multicast acknowledgement required in case of update 1863 if(r_cc_receive_icache_type.read() == CC_TYPE_UPDT) 1864 { 1865 r_icache_fsm = ICACHE_CC_UPDT; 1866 r_icache_cc_word = r_cc_receive_word_idx.read(); 1867 // just pop the fifo , don't write in icache 1868 r_icache_cc_need_write = false; 1869 } 1870 else // No response needed 1871 { 1872 r_cc_receive_icache_req = false; 1873 r_icache_fsm = r_icache_fsm_save.read(); 1874 } 1875 } 1876 } 1877 break; 1878 } 1879 ///////////////////// 1880 case ICACHE_CC_INVAL: // hit inval : switch slot to EMPTY state 1881 { 1870 m_cpt_icache_dir_write++; 1871 #endif 1872 r_icache.write_dir( 0, 1873 r_icache_clack_way.read(), 1874 r_icache_clack_set.read(), 1875 CACHE_SLOT_STATE_EMPTY); 1876 1877 if ( (r_icache_miss_set.read() == r_icache_clack_set.read()) and 1878 (r_icache_miss_way.read() == r_icache_clack_way.read()) ) 1879 { 1880 r_icache_miss_clack = false; 1881 } 1882 1883 r_icache_clack_req = false; 1884 1885 // return to cc_save state if no pending CC request 1886 if ( not r_cc_receive_icache_req.read() ) 1887 r_icache_fsm = r_icache_fsm_save.read(); 1882 1888 1883 1889 #if DEBUG_ICACHE … … 1885 1891 { 1886 1892 std::cout << " <PROC " << name() 1887 << " ICACHE_CC_INVAL>slot returns to empty state"1888 << " set = " << r_icache_cc_set.read()1889 << " / way = " << r_icache_cc_way.read() << std::endl;1893 << " ICACHE_CC_CHECK> CC_TYPE_CLACK slot returns to empty state" 1894 << " set = " << r_icache_clack_set.read() 1895 << " / way = " << r_icache_clack_way.read() << std::endl; 1890 1896 } 1891 1897 #endif 1898 1899 break; 1900 } 1901 1902 // wait if pending request to CC_SEND. This way if there are pending 1903 // CLACK they can be treated in this state and then a deadlock 1904 // situation is avoided 1905 if ( r_icache_cc_send_req.read() ) break; 1906 1907 // CC request handler 1908 1909 int state = 0; 1910 size_t way = 0; 1911 size_t set = 0; 1912 size_t word = 0; 1892 1913 1893 1914 #ifdef INSTRUMENTATION 1894 1915 m_cpt_icache_dir_read++; 1895 1916 #endif 1896 if (r_icache_cc_need_write.read()) 1897 { 1898 r_icache.write_dir( 0, 1899 r_icache_cc_way.read(), 1900 r_icache_cc_set.read(), 1901 CACHE_SLOT_STATE_EMPTY ); 1902 // no need to write in the cache anymore 1903 r_icache_cc_need_write = false; 1904 } 1905 1906 // multicast acknowledgement 1907 // send a request to cc_send_fsm 1908 if(not r_icache_cc_send_req.read()) // cc_send is available 1909 { 1910 // coherence request completed 1911 r_cc_receive_icache_req = false; 1912 // request multicast acknowledgement 1913 r_icache_cc_send_req = true; 1914 r_icache_cc_send_nline = r_cc_receive_icache_nline.read(); 1915 r_icache_cc_send_updt_tab_idx = r_cc_receive_icache_updt_tab_idx.read(); 1916 r_icache_cc_send_type = CC_TYPE_MULTI_ACK; 1917 1918 r_icache_fsm = r_icache_fsm_save.read(); 1919 } 1920 //else wait for previous cc_send request to be sent 1917 r_icache.read_dir(paddr, 1918 &state, 1919 &way, 1920 &set, 1921 &word); 1922 1923 r_icache_cc_way = way; 1924 r_icache_cc_set = set; 1925 1926 if ( state == CACHE_SLOT_STATE_VALID) // hit 1927 { 1928 // need to update the cache state 1929 if (r_cc_receive_icache_type.read() == CC_TYPE_UPDT) // hit update 1930 { 1931 r_icache_cc_need_write = true; 1932 r_icache_fsm = ICACHE_CC_UPDT; 1933 r_icache_cc_word = r_cc_receive_word_idx.read(); 1934 } 1935 else if ( r_cc_receive_icache_type.read() == CC_TYPE_INVAL ) // hit inval 1936 { 1937 r_icache_fsm = ICACHE_CC_INVAL; 1938 } 1939 } 1940 else // miss 1941 { 1942 // multicast acknowledgement required in case of update 1943 if(r_cc_receive_icache_type.read() == CC_TYPE_UPDT) 1944 { 1945 r_icache_fsm = ICACHE_CC_UPDT; 1946 r_icache_cc_word = r_cc_receive_word_idx.read(); 1947 1948 // just pop the fifo , don't write in icache 1949 r_icache_cc_need_write = false; 1950 } 1951 else // No response needed 1952 { 1953 r_cc_receive_icache_req = false; 1954 r_icache_fsm = r_icache_fsm_save.read(); 1955 } 1956 } 1957 break; 1958 } 1959 ///////////////////// 1960 case ICACHE_CC_INVAL: // hit inval : switch slot to ZOMBI state 1961 { 1962 assert (not r_icache_cc_send_req.read() && 1963 "ERROR in ICACHE_CC_INVAL: the r_icache_cc_send_req " 1964 "must not be set"); 1965 1966 #ifdef INSTRUMENTATION 1967 m_cpt_icache_dir_read++; 1968 #endif 1969 1970 // Switch slot state to ZOMBI and send CLEANUP command 1971 r_icache.write_dir( 0, 1972 r_icache_cc_way.read(), 1973 r_icache_cc_set.read(), 1974 CACHE_SLOT_STATE_ZOMBI ); 1975 1976 // coherence request completed 1977 r_icache_cc_send_req = true; 1978 r_icache_cc_send_nline = r_cc_receive_icache_nline.read(); 1979 r_icache_cc_send_way = r_icache_cc_way.read(); 1980 r_icache_cc_send_type = CC_TYPE_CLEANUP; 1981 1982 r_icache_fsm = r_icache_fsm_save.read(); 1983 1984 #if DEBUG_ICACHE 1985 if ( m_debug_activated ) 1986 { 1987 std::cout << " <PROC " << name() 1988 << " ICACHE_CC_INVAL> slot returns to ZOMBI state" 1989 << " set = " << r_icache_cc_set.read() 1990 << " / way = " << r_icache_cc_way.read() << std::endl; 1991 } 1992 #endif 1993 1921 1994 break; 1922 1995 } … … 1924 1997 case ICACHE_CC_UPDT: // hit update : write one word per cycle 1925 1998 { 1999 assert (not r_icache_cc_send_req.read() && 2000 "ERROR in ICACHE_CC_UPDT: the r_icache_cc_send_req " 2001 "must not be set"); 2002 2003 if ( not r_cc_receive_updt_fifo_be.rok() ) break; 2004 2005 2006 size_t word = r_icache_cc_word.read(); 2007 size_t way = r_icache_cc_way.read(); 2008 size_t set = r_icache_cc_set.read(); 2009 2010 if (r_icache_cc_need_write.read()) 2011 { 2012 r_icache.write( way, 2013 set, 2014 word, 2015 r_cc_receive_updt_fifo_data.read(), 2016 r_cc_receive_updt_fifo_be.read() ); 2017 2018 r_icache_cc_word = word+1; 2019 2020 #ifdef INSTRUMENTATION 2021 m_cpt_icache_data_write++; 2022 #endif 1926 2023 1927 2024 #if DEBUG_ICACHE … … 1935 2032 } 1936 2033 #endif 1937 1938 #ifdef INSTRUMENTATION 1939 m_cpt_icache_data_write++; 1940 #endif 1941 size_t word = r_icache_cc_word.read(); 1942 size_t way = r_icache_cc_way.read(); 1943 size_t set = r_icache_cc_set.read(); 1944 1945 if (r_cc_receive_updt_fifo_be.rok()) 1946 { 1947 if (r_icache_cc_need_write.read()) 1948 { 1949 r_icache.write( way, 1950 set, 1951 word, 1952 r_cc_receive_updt_fifo_data.read(), 1953 r_cc_receive_updt_fifo_be.read() ); 1954 1955 r_icache_cc_word = word+1; 1956 } 1957 if ( r_cc_receive_updt_fifo_eop.read() ) // last word 1958 { 1959 // no need to write in the cache anymore 1960 r_icache_cc_need_write = false; 1961 // wait to send a request to cc_send_fsm 1962 if(not r_icache_cc_send_req.read()) // cc_send is available 1963 { 1964 //consume last flit 1965 cc_receive_updt_fifo_get = true; 1966 // coherence request completed 1967 r_cc_receive_icache_req = false; 1968 // request multicast acknowledgement 1969 r_icache_cc_send_req = true; 1970 r_icache_cc_send_nline = r_cc_receive_icache_nline.read(); 1971 r_icache_cc_send_updt_tab_idx = r_cc_receive_icache_updt_tab_idx.read(); 1972 r_icache_cc_send_type = CC_TYPE_MULTI_ACK; 1973 1974 r_icache_fsm = r_icache_fsm_save.read(); 1975 } 1976 } 1977 else 1978 { 1979 //consume fifo if not eop 1980 cc_receive_updt_fifo_get = true; 1981 } 1982 } 2034 } 2035 2036 if ( r_cc_receive_updt_fifo_eop.read() ) // last word 2037 { 2038 // no need to write in the cache anymore 2039 r_icache_cc_need_write = false; 2040 2041 // coherence request completed 2042 r_cc_receive_icache_req = false; 2043 2044 // request multicast acknowledgement 2045 r_icache_cc_send_req = true; 2046 r_icache_cc_send_nline = r_cc_receive_icache_nline.read(); 2047 r_icache_cc_send_updt_tab_idx = r_cc_receive_icache_updt_tab_idx.read(); 2048 r_icache_cc_send_type = CC_TYPE_MULTI_ACK; 2049 2050 r_icache_fsm = r_icache_fsm_save.read(); 2051 } 2052 //consume fifo if not eop 2053 cc_receive_updt_fifo_get = true; 2054 1983 2055 break; 1984 2056 } 1985 ///////////////////////// 1986 case ICACHE_CC_BROADCAST: // hit broadcast : switch slot to ZOMBI state 1987 // and request a cleanup 1988 { 1989 1990 #if DEBUG_ICACHE 1991 if ( m_debug_activated ) 1992 { 1993 std::cout << " <PROC " << name() 1994 << " ICACHE_CC_BROADCAST > Slot goes to zombi state " 1995 << " set = " << r_icache_cc_set.read() 1996 << " / way = " << r_icache_cc_way.read() << std::endl; 1997 } 1998 #endif 1999 2000 #ifdef INSTRUMENTATION 2001 m_cpt_icache_dir_write++; 2002 #endif 2003 if (r_icache_cc_need_write.read()) 2004 { 2005 r_icache.write_dir( r_icache_cc_way.read(), 2006 r_icache_cc_set.read(), 2007 CACHE_SLOT_STATE_ZOMBI ); 2008 // no need to write in the cache anymore 2009 r_icache_cc_need_write = false; 2010 } 2011 2012 // cleanup 2013 // send a request to cc_send_fsm 2014 if(not r_icache_cc_send_req.read()) // cc_send is available 2015 { 2016 // coherence request completed 2017 r_cc_receive_icache_req = false; 2018 // request cleanup 2019 r_icache_cc_send_req = true; 2020 r_icache_cc_send_nline = r_cc_receive_icache_nline.read(); 2021 r_icache_cc_send_way = r_icache_cc_way.read(); 2022 r_icache_cc_send_type = CC_TYPE_CLEANUP; 2023 2024 r_icache_fsm = r_icache_fsm_save.read(); 2025 } 2026 //else wait for previous cc_send request to be sent 2027 break; 2028 } 2057 2029 2058 } // end switch r_icache_fsm 2030 2059 … … 2308 2337 } 2309 2338 2339 // coherence clack request (from DSPIN CLACK) 2340 else if ( r_dcache_clack_req.read() ) 2341 { 2342 r_dcache_fsm = DCACHE_CC_CHECK; 2343 r_dcache_fsm_cc_save = r_dcache_fsm.read(); 2344 } 2310 2345 // coherence request (from CC_RECEIVE FSM) 2311 2346 else if ( r_cc_receive_dcache_req.read() ) … … 2912 2947 case DCACHE_TLB_PTE1_GET: // try to read a PT1 entry in dcache 2913 2948 { 2949 // coherence clack request (from DSPIN CLACK) 2950 if ( r_dcache_clack_req.read() ) 2951 { 2952 r_dcache_fsm = DCACHE_CC_CHECK; 2953 r_dcache_fsm_cc_save = r_dcache_fsm.read(); 2954 break; 2955 } 2956 2914 2957 // coherence request (from CC_RECEIVE FSM) 2915 2958 if ( r_cc_receive_dcache_req.read() ) … … 3209 3252 case DCACHE_TLB_PTE2_GET: // Try to get a PTE2 (64 bits) in the dcache 3210 3253 { 3254 // coherence clack request (from DSPIN CLACK) 3255 if ( r_dcache_clack_req.read() ) 3256 { 3257 r_dcache_fsm = DCACHE_CC_CHECK; 3258 r_dcache_fsm_cc_save = r_dcache_fsm.read(); 3259 break; 3260 } 3261 3211 3262 // coherence request (from CC_RECEIVE FSM) 3212 3263 if ( r_cc_receive_dcache_req.read() ) … … 3506 3557 3507 3558 { 3559 // coherence clack request (from DSPIN CLACK) 3560 if ( r_dcache_clack_req.read() ) 3561 { 3562 r_dcache_fsm = DCACHE_CC_CHECK; 3563 r_dcache_fsm_cc_save = r_dcache_fsm.read(); 3564 break; 3565 } 3566 3508 3567 // coherence request (from CC_RECEIVE FSM) 3509 3568 if ( r_cc_receive_dcache_req.read() ) … … 3577 3636 // as there is a risk of dead-lock 3578 3637 { 3638 // coherence clack request (from DSPIN CLACK) 3639 if ( r_dcache_clack_req.read() ) 3640 { 3641 r_dcache_fsm = DCACHE_CC_CHECK; 3642 r_dcache_fsm_cc_save = r_dcache_fsm.read(); 3643 break; 3644 } 3645 3579 3646 // coherence request (from CC_RECEIVE FSM) 3580 3647 if ( r_cc_receive_dcache_req.read() ) … … 3582 3649 r_dcache_fsm = DCACHE_CC_CHECK; 3583 3650 r_dcache_fsm_cc_save = r_dcache_fsm.read(); 3651 break; 3584 3652 } 3585 3653 … … 3598 3666 // and because it can exist a simultaneous ITLB miss 3599 3667 { 3668 // coherence clack request (from DSPIN CLACK) 3669 if ( r_dcache_clack_req.read() ) 3670 { 3671 r_dcache_fsm = DCACHE_CC_CHECK; 3672 r_dcache_fsm_cc_save = r_dcache_fsm.read(); 3673 break; 3674 } 3675 3600 3676 // coherence request (from CC_RECEIVE FSM) 3601 3677 if ( r_cc_receive_dcache_req.read() ) … … 3633 3709 // returns to IDLE and flush TLBs when last slot 3634 3710 { 3711 // coherence clack request (from DSPIN CLACK) 3712 if ( r_dcache_clack_req.read() ) 3713 { 3714 r_dcache_fsm = DCACHE_CC_CHECK; 3715 r_dcache_fsm_cc_save = r_dcache_fsm.read(); 3716 break; 3717 } 3718 3635 3719 // coherence request (from CC_RECEIVE FSM) 3636 3720 if ( r_cc_receive_dcache_req.read() ) … … 3893 3977 { 3894 3978 if ( m_dreq.valid) m_cost_data_miss_frz++; 3979 3980 // coherence clack request (from DSPIN CLACK) 3981 if ( r_dcache_clack_req.read() ) 3982 { 3983 r_dcache_fsm = DCACHE_CC_CHECK; 3984 r_dcache_fsm_cc_save = r_dcache_fsm.read(); 3985 break; 3986 } 3895 3987 3896 3988 // coherence request (from CC_RECEIVE FSM) … … 4009 4101 { 4010 4102 if ( m_dreq.valid) m_cost_data_miss_frz++; 4103 4104 // coherence clack request (from DSPIN CLACK) 4105 if ( r_dcache_clack_req.read() ) 4106 { 4107 r_dcache_fsm = DCACHE_CC_CHECK; 4108 r_dcache_fsm_cc_save = r_dcache_fsm.read(); 4109 break; 4110 } 4011 4111 4012 4112 // coherence request (from CC_RECEIVE FSM) … … 4125 4225 if ( m_dreq.valid) m_cost_data_miss_frz++; 4126 4226 4227 // coherence clack request (from DSPIN CLACK) 4228 if ( r_dcache_clack_req.read() ) 4229 { 4230 r_dcache_fsm = DCACHE_CC_CHECK; 4231 r_dcache_fsm_cc_save = r_dcache_fsm.read(); 4232 break; 4233 } 4234 4127 4235 // coherence request (from CC_RECEIVE FSM) 4128 4236 if ( r_cc_receive_dcache_req.read() ) … … 4199 4307 case DCACHE_UNC_WAIT: // waiting a response to an uncacheable read 4200 4308 { 4309 // coherence clack request (from DSPIN CLACK) 4310 if ( r_dcache_clack_req.read() ) 4311 { 4312 r_dcache_fsm = DCACHE_CC_CHECK; 4313 r_dcache_fsm_cc_save = r_dcache_fsm.read(); 4314 break; 4315 } 4316 4201 4317 // coherence request (from CC_RECEIVE FSM) 4202 4318 if ( r_cc_receive_dcache_req.read() ) … … 4236 4352 case DCACHE_LL_WAIT: // waiting VCI response to a LL transaction 4237 4353 { 4354 // coherence clack request (from DSPIN CLACK) 4355 if ( r_dcache_clack_req.read() ) 4356 { 4357 r_dcache_fsm = DCACHE_CC_CHECK; 4358 r_dcache_fsm_cc_save = r_dcache_fsm.read(); 4359 break; 4360 } 4361 4238 4362 // coherence request (from CC_RECEIVE FSM) 4239 4363 if ( r_cc_receive_dcache_req.read() ) … … 4282 4406 case DCACHE_SC_WAIT: // waiting VCI response to a SC transaction 4283 4407 { 4408 // coherence clack request (from DSPIN CLACK) 4409 if ( r_dcache_clack_req.read() ) 4410 { 4411 r_dcache_fsm = DCACHE_CC_CHECK; 4412 r_dcache_fsm_cc_save = r_dcache_fsm.read(); 4413 break; 4414 } 4415 4284 4416 // coherence request (from CC_RECEIVE FSM) 4285 4417 if ( r_cc_receive_dcache_req.read() ) … … 4321 4453 size_t way; 4322 4454 size_t set; 4323 size_t word; 4455 size_t word; // unused 4324 4456 int state; 4325 4457 … … 4374 4506 // - if the CAS is a failure, we just retry the write. 4375 4507 { 4508 // coherence clack request (from DSPIN CLACK) 4509 if ( r_dcache_clack_req.read() ) 4510 { 4511 r_dcache_fsm = DCACHE_CC_CHECK; 4512 r_dcache_fsm_cc_save = r_dcache_fsm.read(); 4513 break; 4514 } 4515 4376 4516 // coherence request (from CC_RECEIVE FSM) 4377 4517 if ( r_cc_receive_dcache_req.read() ) … … 4382 4522 } 4383 4523 4384 if ( r_vci_rsp_data_error.read() ) 4524 if ( r_vci_rsp_data_error.read() ) // bus error 4385 4525 { 4386 4526 std::cout << "BUS ERROR in DCACHE_DIRTY_WAIT state" << std::endl; … … 4388 4528 exit(0); 4389 4529 } 4390 else if ( r_vci_rsp_fifo_dcache.rok() ) 4530 else if ( r_vci_rsp_fifo_dcache.rok() ) // response available 4391 4531 { 4392 4532 vci_rsp_fifo_dcache_get = true; … … 4432 4572 #endif 4433 4573 4434 if (r_cc_receive_dcache_type.read() == CC_TYPE_CLACK) 4435 // We switch the directory slot to EMPTY state 4436 // and reset r_icache_miss_clack if the cleanup ack 4437 // is matching a pending miss. 4438 { 4439 4440 if ( m_dreq.valid ) m_cost_data_miss_frz++; 4441 4442 #ifdef INSTRUMENTATION 4443 m_cpt_dcache_dir_write++; 4444 #endif 4445 r_dcache.write_dir( 0, 4446 r_cc_receive_dcache_way.read(), 4447 r_cc_receive_dcache_set.read(), 4448 CACHE_SLOT_STATE_EMPTY); 4449 4450 if ( (r_dcache_miss_set.read() == r_cc_receive_dcache_set.read()) and 4451 (r_dcache_miss_way.read() == r_cc_receive_dcache_way.read()) ) 4452 r_dcache_miss_clack = false; 4453 4454 r_dcache_fsm = r_dcache_fsm_cc_save.read() ; 4455 r_cc_receive_dcache_req = false; 4456 #if DEBUG_DCACHE 4457 if ( m_debug_activated ) 4458 { 4459 std::cout << " <PROC " << name() 4460 << " DCACHE_CC_CHECK> CC_TYPE_CLACK Switch slot to EMPTY state" 4461 << " set = " << r_cc_receive_dcache_set.read() 4462 << " / way = " << r_cc_receive_dcache_way.read() << std::endl; 4463 } 4464 #endif 4465 } 4466 else if( ((r_dcache_fsm_cc_save == DCACHE_MISS_SELECT) or 4467 (r_dcache_fsm_cc_save == DCACHE_MISS_WAIT) or 4468 (r_dcache_fsm_cc_save == DCACHE_MISS_DIR_UPDT)) and 4469 ((r_dcache_vci_paddr.read() & mask) == (paddr & mask)) ) // matching 4574 4575 // Match between MISS address and CC address 4576 // note: In the same cycle we can handle a CLACK and a MISS match 4577 // because the CLACK access the directory but the MISS match dont. 4578 if (r_cc_receive_dcache_req.read() and 4579 ((r_dcache_fsm_cc_save == DCACHE_MISS_SELECT ) or 4580 (r_dcache_fsm_cc_save == DCACHE_MISS_WAIT ) or 4581 (r_dcache_fsm_cc_save == DCACHE_MISS_DIR_UPDT)) and 4582 ((r_dcache_vci_paddr.read() & mask) == (paddr & mask))) // matching 4470 4583 { 4471 4584 // signaling matching … … 4478 4591 r_dcache_fsm = DCACHE_CC_UPDT; 4479 4592 r_dcache_cc_word = r_cc_receive_word_idx.read(); 4593 4480 4594 // just pop the fifo , don't write in icache 4481 4595 r_dcache_cc_need_write = false; … … 4485 4599 { 4486 4600 r_cc_receive_dcache_req = false; 4487 r_dcache_fsm = r_dcache_fsm_cc_save.read();4601 r_dcache_fsm = r_dcache_fsm_cc_save.read(); 4488 4602 } 4489 4603 … … 4496 4610 } 4497 4611 #endif 4498 4499 } 4500 else // no match 4501 { 4502 int state = 0; 4503 size_t way = 0; 4504 size_t set = 0; 4505 size_t word = 0; 4612 } 4613 4614 // CLACK handler 4615 // We switch the directory slot to EMPTY state and reset 4616 // r_dcache_miss_clack if the cleanup ack is matching a pending miss. 4617 if ( r_dcache_clack_req.read() ) 4618 { 4619 if ( m_dreq.valid ) m_cost_data_miss_frz++; 4620 4621 #ifdef INSTRUMENTATION 4622 m_cpt_dcache_dir_write++; 4623 #endif 4624 r_dcache.write_dir( 0, 4625 r_dcache_clack_way.read(), 4626 r_dcache_clack_set.read(), 4627 CACHE_SLOT_STATE_EMPTY); 4628 4629 if ( (r_dcache_miss_set.read() == r_dcache_clack_set.read()) and 4630 (r_dcache_miss_way.read() == r_dcache_clack_way.read()) ) 4631 { 4632 r_dcache_miss_clack = false; 4633 } 4634 4635 r_dcache_clack_req = false; 4636 4637 // return to cc_save state if no pending CC request 4638 if ( not r_cc_receive_dcache_req.read() ) 4639 { 4640 r_dcache_fsm = r_dcache_fsm_cc_save.read() ; 4641 } 4642 4643 #if DEBUG_DCACHE 4644 if ( m_debug_activated ) 4645 { 4646 std::cout << " <PROC " << name() 4647 << " DCACHE_CC_CHECK> CC_TYPE_CLACK Switch slot to EMPTY state" 4648 << " set = " << r_dcache_clack_set.read() 4649 << " / way = " << r_dcache_clack_way.read() << std::endl; 4650 } 4651 #endif 4652 break; 4653 } 4654 4655 // wait if pending request to CC_SEND. This way if there are pending 4656 // CLACK they can be treated in this state and then a deadlock 4657 // situation is avoided 4658 if ( r_dcache_cc_send_req.read() ) break; 4659 4660 // CC request handler 4661 4662 int state = 0; 4663 size_t way = 0; 4664 size_t set = 0; 4665 size_t word = 0; 4506 4666 4507 4667 #ifdef INSTRUMENTATION 4508 4668 m_cpt_dcache_dir_read++; 4509 4669 #endif 4510 r_dcache.read_dir( paddr, 4511 &state, 4512 &way, 4513 &set, 4514 &word ); // unused 4515 4516 r_dcache_cc_way = way; 4517 r_dcache_cc_set = set; 4518 4519 if ( state == CACHE_SLOT_STATE_VALID) // hit 4520 { 4521 // need to update the cache state 4670 r_dcache.read_dir( paddr, 4671 &state, 4672 &way, 4673 &set, 4674 &word ); // unused 4675 4676 r_dcache_cc_way = way; 4677 r_dcache_cc_set = set; 4678 4679 if ( state == CACHE_SLOT_STATE_VALID) // hit 4680 { 4681 // need to update the cache state 4682 if (r_cc_receive_dcache_type.read() == CC_TYPE_UPDT) // hit update 4683 { 4522 4684 r_dcache_cc_need_write = true; 4523 if (r_cc_receive_dcache_type.read() == CC_TYPE_UPDT) // hit update 4524 { 4525 r_dcache_fsm = DCACHE_CC_UPDT; 4526 r_dcache_cc_word = r_cc_receive_word_idx.read(); 4527 } 4528 else if (r_cc_receive_dcache_type.read() == CC_TYPE_INVAL) // hit inval 4529 { 4530 r_dcache_fsm = DCACHE_CC_INVAL; 4531 } 4532 else if ( r_cc_receive_dcache_type.read() == CC_TYPE_BRDCAST) // hit broadcast 4533 { 4534 r_dcache_fsm = DCACHE_CC_BROADCAST; 4535 } 4536 } 4537 else // miss 4538 { 4539 // multicast acknowledgement required in case of update 4540 if(r_cc_receive_dcache_type.read() == CC_TYPE_UPDT) 4541 { 4542 r_dcache_fsm = DCACHE_CC_UPDT; 4543 r_dcache_cc_word = r_cc_receive_word_idx.read(); 4544 // just pop the fifo , don't write in icache 4545 r_dcache_cc_need_write = false; 4546 } 4547 else // No response needed 4548 { 4549 r_cc_receive_dcache_req = false; 4550 r_dcache_fsm = r_dcache_fsm_cc_save.read(); 4551 } 4552 } 4685 r_dcache_fsm = DCACHE_CC_UPDT; 4686 r_dcache_cc_word = r_cc_receive_word_idx.read(); 4687 } 4688 else if ( r_cc_receive_dcache_type.read() == CC_TYPE_INVAL ) // hit inval 4689 { 4690 r_dcache_fsm = DCACHE_CC_INVAL; 4691 } 4692 } 4693 else // miss 4694 { 4695 // multicast acknowledgement required in case of update 4696 if(r_cc_receive_dcache_type.read() == CC_TYPE_UPDT) 4697 { 4698 r_dcache_fsm = DCACHE_CC_UPDT; 4699 r_dcache_cc_word = r_cc_receive_word_idx.read(); 4700 4701 // just pop the fifo , don't write in icache 4702 r_dcache_cc_need_write = false; 4703 } 4704 else // No response needed 4705 { 4706 r_cc_receive_dcache_req = false; 4707 r_dcache_fsm = r_dcache_fsm_cc_save.read(); 4708 } 4709 } 4553 4710 4554 4711 #if DEBUG_DCACHE … … 4562 4719 } 4563 4720 #endif 4564 } 4721 4565 4722 break; 4566 4723 } 4567 4724 ///////////////////// 4568 case DCACHE_CC_INVAL: // hit inval: switch slot to EMPTY state, 4569 // after possible invalidation of copies in TLBs 4570 { 4571 size_t way = r_dcache_cc_way.read(); 4572 size_t set = r_dcache_cc_set.read(); 4573 4574 if (r_dcache_cc_need_write.read()) 4575 { 4576 if ( r_dcache_in_tlb[way*m_dcache_sets+set] ) // selective TLB inval 4577 { 4578 r_dcache_in_tlb[way*m_dcache_sets+set] = false; 4579 r_dcache_tlb_inval_line = r_cc_receive_dcache_nline.read(); 4580 r_dcache_tlb_inval_set = 0; 4581 r_dcache_fsm_scan_save = r_dcache_fsm.read(); 4582 r_dcache_fsm = DCACHE_INVAL_TLB_SCAN; 4583 break; 4584 } 4585 else 4586 { 4587 if ( r_dcache_contains_ptd[way*m_dcache_sets+set] ) // TLB flush 4588 { 4589 r_itlb.reset(); 4590 r_dtlb.reset(); 4591 r_dcache_contains_ptd[way*m_dcache_sets+set] = false; 4725 case DCACHE_CC_INVAL: // hit inval: switch slot to ZOMBI state and send a 4726 // CLEANUP after possible invalidation of copies in 4727 // TLBs 4728 { 4729 size_t way = r_dcache_cc_way.read(); 4730 size_t set = r_dcache_cc_set.read(); 4731 4732 if ( r_dcache_in_tlb[way*m_dcache_sets+set] ) // selective TLB inval 4733 { 4734 r_dcache_in_tlb[way*m_dcache_sets+set] = false; 4735 r_dcache_tlb_inval_line = r_cc_receive_dcache_nline.read(); 4736 r_dcache_tlb_inval_set = 0; 4737 r_dcache_fsm_scan_save = r_dcache_fsm.read(); 4738 r_dcache_fsm = DCACHE_INVAL_TLB_SCAN; 4739 break; 4740 } 4741 4742 if ( r_dcache_contains_ptd[way*m_dcache_sets+set] ) // TLB flush 4743 { 4744 r_itlb.reset(); 4745 r_dtlb.reset(); 4746 r_dcache_contains_ptd[way*m_dcache_sets+set] = false; 4592 4747 4593 4748 #if DEBUG_DCACHE … … 4598 4753 } 4599 4754 #endif 4600 } 4601 4602 r_dcache.write_dir( 0, 4603 way, 4604 set, 4605 CACHE_SLOT_STATE_EMPTY ); 4606 4607 r_dcache_cc_need_write = false; 4755 } 4756 4757 assert (not r_dcache_cc_send_req.read() && 4758 "ERROR in DCACHE_CC_INVAL: the r_dcache_cc_send_req " 4759 "must not be set"); 4760 4761 // Switch slot state to ZOMBI and send CLEANUP command 4762 r_dcache.write_dir( 0, 4763 way, 4764 set, 4765 CACHE_SLOT_STATE_ZOMBI ); 4766 4767 // coherence request completed 4768 r_cc_receive_dcache_req = false; 4769 r_dcache_cc_send_req = true; 4770 r_dcache_cc_send_nline = r_cc_receive_dcache_nline.read(); 4771 r_dcache_cc_send_way = r_dcache_cc_way.read(); 4772 r_dcache_cc_send_type = CC_TYPE_CLEANUP; 4773 r_dcache_fsm = r_dcache_fsm_cc_save.read(); 4608 4774 4609 4775 #if DEBUG_DCACHE … … 4611 4777 { 4612 4778 std::cout << " <PROC " << name() 4613 4614 4615 4779 << " DCACHE_CC_INVAL> Switch slot to EMPTY state:" << std::dec 4780 << " / WAY = " << way 4781 << " / SET = " << set << std::endl; 4616 4782 } 4617 4783 #endif 4618 }4619 }4620 // multicast acknowledgement4621 // send a request to cc_send_fsm4622 if(not r_dcache_cc_send_req.read()) // cc_send is available4623 {4624 // coherence request completed4625 r_cc_receive_dcache_req = false;4626 // request multicast acknowledgement4627 r_dcache_cc_send_req = true;4628 r_dcache_cc_send_nline = r_cc_receive_dcache_nline.read();4629 r_dcache_cc_send_updt_tab_idx = r_cc_receive_dcache_updt_tab_idx.read();4630 r_dcache_cc_send_type = CC_TYPE_MULTI_ACK;4631 4632 r_dcache_fsm = r_dcache_fsm_cc_save.read();4633 }4634 //else wait for previous cc_send request to be sent4635 4784 break; 4636 4785 } 4637 4786 /////////////////// 4638 case DCACHE_CC_UPDT: 4787 case DCACHE_CC_UPDT: // hit update: write one word per cycle, 4639 4788 // after possible invalidation of copies in TLBs 4640 4789 { 4641 size_t word = r_dcache_cc_word.read(); 4642 size_t way = r_dcache_cc_way.read(); 4643 size_t set = r_dcache_cc_set.read(); 4644 4645 if (r_cc_receive_updt_fifo_be.rok()) 4646 { 4647 if (r_dcache_cc_need_write.read()) 4648 { 4649 if ( r_dcache_in_tlb[way*m_dcache_sets+set] ) // selective TLB inval 4650 { 4651 r_dcache_in_tlb[way*m_dcache_sets+set] = false; 4652 r_dcache_tlb_inval_line = r_cc_receive_dcache_nline.read(); 4653 r_dcache_tlb_inval_set = 0; 4654 r_dcache_fsm_scan_save = r_dcache_fsm.read(); 4655 r_dcache_fsm = DCACHE_INVAL_TLB_SCAN; 4656 break; 4657 } 4658 4659 if ( r_dcache_contains_ptd[way*m_dcache_sets+set] ) // TLB flush 4660 { 4661 r_itlb.reset(); 4662 r_dtlb.reset(); 4663 r_dcache_contains_ptd[way*m_dcache_sets+set] = false; 4790 size_t word = r_dcache_cc_word.read(); 4791 size_t way = r_dcache_cc_way.read(); 4792 size_t set = r_dcache_cc_set.read(); 4793 4794 if ( r_dcache_in_tlb[way*m_dcache_sets+set] ) // selective TLB inval 4795 { 4796 r_dcache_in_tlb[way*m_dcache_sets+set] = false; 4797 r_dcache_tlb_inval_line = r_cc_receive_dcache_nline.read(); 4798 r_dcache_tlb_inval_set = 0; 4799 r_dcache_fsm_scan_save = r_dcache_fsm.read(); 4800 r_dcache_fsm = DCACHE_INVAL_TLB_SCAN; 4801 4802 break; 4803 } 4804 4805 if ( r_dcache_contains_ptd[way*m_dcache_sets+set] ) // TLB flush 4806 { 4807 r_itlb.reset(); 4808 r_dtlb.reset(); 4809 r_dcache_contains_ptd[way*m_dcache_sets+set] = false; 4664 4810 4665 4811 #if DEBUG_DCACHE … … 4670 4816 } 4671 4817 #endif 4672 } 4673 4818 } 4819 4820 assert (not r_dcache_cc_send_req.read() && 4821 "ERROR in DCACHE_CC_INVAL: the r_dcache_cc_send_req " 4822 "must not be set"); 4823 4824 if ( not r_cc_receive_updt_fifo_be.rok() ) break; 4825 4826 if (r_dcache_cc_need_write.read()) 4827 { 4828 4674 4829 #ifdef INSTRUMENTATION 4675 4830 m_cpt_dcache_data_write++; 4676 4831 #endif 4677 4678 4679 4680 4681 4682 4683 4832 r_dcache.write( way, 4833 set, 4834 word, 4835 r_cc_receive_updt_fifo_data.read(), 4836 r_cc_receive_updt_fifo_be.read() ); 4837 4838 r_dcache_cc_word = word + 1; 4684 4839 4685 4840 #if DEBUG_DCACHE … … 4694 4849 } 4695 4850 #endif 4696 } 4697 4698 if ( r_cc_receive_updt_fifo_eop.read() ) // last word 4699 { 4700 // no need to write in the cache anymore 4701 r_dcache_cc_need_write = false; 4702 4703 // wait to send a request to cc_send_fsm 4704 if(not r_dcache_cc_send_req.read()) 4705 // cc_send is available 4706 { 4707 //consume last fifo flit if eop and request to cc_send possible 4708 cc_receive_updt_fifo_get = true; 4709 4710 // coherence request completed 4711 r_cc_receive_dcache_req = false; 4712 4713 // request multicast acknowledgement 4714 r_dcache_cc_send_req = true; 4715 r_dcache_cc_send_nline = r_cc_receive_dcache_nline.read(); 4716 r_dcache_cc_send_updt_tab_idx = r_cc_receive_dcache_updt_tab_idx.read(); 4717 r_dcache_cc_send_type = CC_TYPE_MULTI_ACK; 4718 4719 r_dcache_fsm = r_dcache_fsm_cc_save.read(); 4720 } 4721 } 4722 else 4723 { 4724 //consume fifo if not eop 4725 cc_receive_updt_fifo_get = true; 4726 } 4727 } 4728 break; 4729 } 4730 ///////////////////////// 4731 case DCACHE_CC_BROADCAST: // hit broadcast : switch state to ZOMBI state 4732 // and request a cleanup, after possible 4733 // invalidation of copies in TLBs 4734 { 4735 size_t way = r_dcache_cc_way.read(); 4736 size_t set = r_dcache_cc_set.read(); 4737 paddr_t nline = r_cc_receive_dcache_nline.read(); 4738 4739 if (r_dcache_cc_need_write.read()) 4740 { 4741 if ( r_dcache_in_tlb[way*m_dcache_sets+set] ) // selective TLB inval 4742 { 4743 r_dcache_in_tlb[way*m_dcache_sets+set] = false; 4744 r_dcache_tlb_inval_line = nline; 4745 r_dcache_tlb_inval_set = 0; 4746 r_dcache_fsm_scan_save = r_dcache_fsm.read(); 4747 r_dcache_fsm = DCACHE_INVAL_TLB_SCAN; 4748 break; 4749 } 4750 else 4751 { 4752 if ( r_dcache_contains_ptd[way*m_dcache_sets+set] ) // TLB flush 4753 { 4754 r_itlb.reset(); 4755 r_dtlb.reset(); 4756 r_dcache_contains_ptd[way*m_dcache_sets+set] = false; 4757 4758 #if DEBUG_DCACHE 4759 if ( m_debug_activated ) 4760 { 4761 std::cout << " <PROC " << name() 4762 << " DCACHE_CC_BROADCAST> Flush DTLB & ITLB" << std::endl; 4763 } 4764 #endif 4765 } 4766 4767 #ifdef INSTRUMENTATION 4768 m_cpt_dcache_dir_write++; 4769 #endif 4770 r_dcache.write_dir( way, 4771 set, 4772 CACHE_SLOT_STATE_ZOMBI ); 4773 4774 r_dcache_cc_need_write = false; 4775 #if DEBUG_DCACHE 4776 if ( m_debug_activated ) 4777 { 4778 std::cout << " <PROC " << name() 4779 << " DCACHE_CC_BROADCAST > Slot goes to ZOMBI state " 4780 << " SET = " << set 4781 << " / WAY = " << way << std::endl; 4782 } 4783 #endif 4784 } 4785 } 4786 // cleanup 4787 // send a request to cc_send_fsm 4788 if(not r_dcache_cc_send_req.read()) // cc_send is available 4789 { 4851 } 4852 4853 if ( r_cc_receive_updt_fifo_eop.read() ) // last word 4854 { 4855 // no need to write in the cache anymore 4856 r_dcache_cc_need_write = false; 4857 4790 4858 // coherence request completed 4791 4859 r_cc_receive_dcache_req = false; 4792 // request cleanup 4793 r_dcache_cc_send_req = true; 4794 r_dcache_cc_send_nline = r_cc_receive_dcache_nline.read(); 4795 r_dcache_cc_send_way = r_dcache_cc_way.read(); 4796 r_dcache_cc_send_type = CC_TYPE_CLEANUP; 4797 4798 r_dcache_fsm = r_dcache_fsm_cc_save.read(); 4799 } 4800 //else wait for previous cc_send request to be sent 4860 4861 // request multicast acknowledgement 4862 r_dcache_cc_send_req = true; 4863 r_dcache_cc_send_nline = r_cc_receive_dcache_nline.read(); 4864 r_dcache_cc_send_updt_tab_idx = r_cc_receive_dcache_updt_tab_idx.read(); 4865 r_dcache_cc_send_type = CC_TYPE_MULTI_ACK; 4866 4867 r_dcache_fsm = r_dcache_fsm_cc_save.read(); 4868 } 4869 4870 //consume fifo if not eop 4871 cc_receive_updt_fifo_get = true; 4872 4801 4873 break; 4802 4874 } … … 5389 5461 { 5390 5462 // wait for the first flit to be consumed 5391 if (p_dspin_ out.read.read())5463 if (p_dspin_p2m.read.read()) 5392 5464 r_cc_send_fsm = CC_SEND_CLEANUP_2; 5393 5465 … … 5398 5470 { 5399 5471 // wait for the second flit to be consumed 5400 if (p_dspin_ out.read.read())5472 if (p_dspin_p2m.read.read()) 5401 5473 { 5402 5474 if (r_cc_send_last_client.read() == 0) // dcache active request … … 5414 5486 { 5415 5487 // wait for the flit to be consumed 5416 if(p_dspin_ out.read.read())5488 if(p_dspin_p2m.read.read()) 5417 5489 { 5418 5490 if(r_cc_send_last_client.read() == 0) // dcache active request … … 5428 5500 5429 5501 /////////////////////////////////////////////////////////////////////////////// 5430 // 5502 // CC_RECEIVE FSM 5431 5503 // This FSM receive all coherence packets on a DSPIN40 port. 5432 // There is 7packet types:5504 // There is 5 packet types: 5433 5505 // - CC_DATA_INVAL : DCACHE invalidate request 5434 5506 // - CC_DATA_UPDT : DCACHE update request (multi-words) … … 5436 5508 // - CC_INST_UPDT : ICACHE update request (multi-words) 5437 5509 // - CC_BROADCAST : Broadcast invalidate request (both DCACHE & ICACHE) 5438 // - CC_DATA_CLACK : DCACHE cleanup acknowledge5439 // - CC_INST_CLACK : ICACHE cleanup acknowledge5440 5510 ////////////////////////////////////////////////////////////////////////////// 5441 5511 switch( r_cc_receive_fsm.read() ) … … 5445 5515 { 5446 5516 // a coherence request has arrived 5447 if (p_dspin_ in.write.read())5517 if (p_dspin_m2p.write.read()) 5448 5518 { 5449 5519 // initialize dspin received data 5450 uint64_t receive_data = p_dspin_ in.data.read();5520 uint64_t receive_data = p_dspin_m2p.data.read(); 5451 5521 // initialize coherence packet type 5452 5522 uint64_t receive_type = DspinDhccpParam::dspin_get(receive_data, 5453 DspinDhccpParam::FROM_MC_TYPE); 5454 // initialize data/ins flip_flop (0 data / 1 ins) 5455 r_cc_receive_data_ins = (bool)(receive_type & 0x1); 5523 DspinDhccpParam::M2P_TYPE); 5456 5524 // test for a broadcast 5457 if (DspinDhccpParam::dspin_get(receive_data,DspinDhccpParam:: FROM_MC_BC))5525 if (DspinDhccpParam::dspin_get(receive_data,DspinDhccpParam::M2P_BC)) 5458 5526 { 5459 5527 r_cc_receive_fsm = CC_RECEIVE_BRDCAST_HEADER; 5460 5528 } 5461 // test for a CLACK 5462 else if ((receive_type == DspinDhccpParam::TYPE_CLEANUP_ACK_DATA) or 5463 (receive_type == DspinDhccpParam::TYPE_CLEANUP_ACK_INST)) 5529 // test for a multi updt 5530 else if (receive_type == DspinDhccpParam::TYPE_MULTI_UPDT_DATA) 5464 5531 { 5465 r_cc_receive_fsm = CC_RECEIVE_ CLACK;5532 r_cc_receive_fsm = CC_RECEIVE_DATA_UPDT_HEADER; 5466 5533 } 5467 // test for a multi updt 5468 else if ((receive_type == DspinDhccpParam::TYPE_MULTI_UPDT_DATA) or 5469 (receive_type == DspinDhccpParam::TYPE_MULTI_UPDT_INST)) 5534 else if (receive_type == DspinDhccpParam::TYPE_MULTI_UPDT_INST) 5470 5535 { 5471 r_cc_receive_fsm = CC_RECEIVE_ UPDT_HEADER;5536 r_cc_receive_fsm = CC_RECEIVE_INS_UPDT_HEADER; 5472 5537 } 5473 5538 // test for a multi inval 5539 else if (receive_type == DspinDhccpParam::TYPE_MULTI_INVAL_DATA) 5540 { 5541 r_cc_receive_fsm = CC_RECEIVE_DATA_INVAL_HEADER; 5542 } 5474 5543 else 5475 5544 { 5476 r_cc_receive_fsm = CC_RECEIVE_IN VAL_HEADER;5545 r_cc_receive_fsm = CC_RECEIVE_INS_INVAL_HEADER; 5477 5546 } 5478 5547 } 5479 break;5480 }5481 //////////////////////5482 case CC_RECEIVE_CLACK:5483 {5484 // initialize dspin received data5485 uint64_t receive_data = p_dspin_in.data.read();5486 5487 // for data CLACK, wait for dcache to take the request5488 if ((r_cc_receive_data_ins.read() == 0) and5489 not (r_cc_receive_dcache_req.read()))5490 {5491 // request dcache to handle the CLACK5492 r_cc_receive_dcache_req = true;5493 r_cc_receive_dcache_set = DspinDhccpParam::dspin_get(receive_data,5494 DspinDhccpParam::CLEANUP_ACK_SET) &5495 ((1ULL<<(uint32_log2(m_dcache_sets)))-1);5496 r_cc_receive_dcache_way = DspinDhccpParam::dspin_get(receive_data,5497 DspinDhccpParam::CLEANUP_ACK_WAY) &5498 ((1ULL<<(uint32_log2(m_dcache_ways)))-1);5499 r_cc_receive_dcache_type = CC_TYPE_CLACK;5500 // get back to idle state5501 r_cc_receive_fsm = CC_RECEIVE_IDLE;5502 break;5503 }5504 // for ins CLACK, wait for icache to take the request5505 if ((r_cc_receive_data_ins.read() == 1) and5506 not (r_cc_receive_icache_req.read()))5507 {5508 // request icache to handle the CLACK5509 r_cc_receive_icache_req = true;5510 r_cc_receive_icache_set = DspinDhccpParam::dspin_get(receive_data,5511 DspinDhccpParam::CLEANUP_ACK_SET) &5512 ((1ULL<<(uint32_log2(m_icache_sets)))-1);5513 r_cc_receive_icache_way = DspinDhccpParam::dspin_get(receive_data,5514 DspinDhccpParam::CLEANUP_ACK_WAY) &5515 ((1ULL<<(uint32_log2(m_icache_ways)))-1);5516 r_cc_receive_icache_type = CC_TYPE_CLACK;5517 // get back to idle state5518 r_cc_receive_fsm = CC_RECEIVE_IDLE;5519 break;5520 }5521 // keep waiting for the correct cache to accept the request5522 5548 break; 5523 5549 } … … 5533 5559 { 5534 5560 // initialize dspin received data 5535 uint64_t receive_data = p_dspin_ in.data.read();5561 uint64_t receive_data = p_dspin_m2p.data.read(); 5536 5562 // wait for both dcache and icache to take the request 5537 5563 // TODO maybe we need to wait for both only to leave the state, but … … 5540 5566 if (not (r_cc_receive_icache_req.read()) and 5541 5567 not (r_cc_receive_dcache_req.read()) and 5542 (p_dspin_ in.write.read()))5568 (p_dspin_m2p.write.read())) 5543 5569 { 5544 5570 // request dcache to handle the BROADCAST 5545 r_cc_receive_dcache_req 5571 r_cc_receive_dcache_req = true; 5546 5572 r_cc_receive_dcache_nline = DspinDhccpParam::dspin_get(receive_data, 5547 5573 DspinDhccpParam::BROADCAST_NLINE); 5548 r_cc_receive_dcache_type = CC_TYPE_ BRDCAST;5574 r_cc_receive_dcache_type = CC_TYPE_INVAL; 5549 5575 // request icache to handle the BROADCAST 5550 r_cc_receive_icache_req 5576 r_cc_receive_icache_req = true; 5551 5577 r_cc_receive_icache_nline = DspinDhccpParam::dspin_get(receive_data, 5552 5578 DspinDhccpParam::BROADCAST_NLINE); 5553 r_cc_receive_icache_type = CC_TYPE_ BRDCAST;5579 r_cc_receive_icache_type = CC_TYPE_INVAL; 5554 5580 // get back to idle state 5555 5581 r_cc_receive_fsm = CC_RECEIVE_IDLE; … … 5560 5586 } 5561 5587 ///////////////////////////// 5562 case CC_RECEIVE_ INVAL_HEADER:5588 case CC_RECEIVE_DATA_INVAL_HEADER: 5563 5589 { 5564 5590 // sample updt tab index in the HEADER, then skip to second flit 5565 uint64_t receive_data = p_dspin_in.data.read(); 5591 r_cc_receive_fsm = CC_RECEIVE_DATA_INVAL_NLINE; 5592 break; 5593 } 5594 ///////////////////////////// 5595 case CC_RECEIVE_INS_INVAL_HEADER: 5596 { 5597 // sample updt tab index in the HEADER, then skip to second flit 5598 r_cc_receive_fsm = CC_RECEIVE_INS_INVAL_NLINE; 5599 break; 5600 } 5601 //////////////////////////// 5602 case CC_RECEIVE_DATA_INVAL_NLINE: 5603 { 5604 // sample nline in the second flit 5605 uint64_t receive_data = p_dspin_m2p.data.read(); 5566 5606 // for data INVAL, wait for dcache to take the request 5567 if ((r_cc_receive_data_ins.read() == 0) and 5568 not (r_cc_receive_dcache_req.read())) 5569 { 5570 r_cc_receive_dcache_updt_tab_idx = DspinDhccpParam::dspin_get(receive_data, 5571 DspinDhccpParam::MULTI_INVAL_UPDT_INDEX); 5572 r_cc_receive_fsm = CC_RECEIVE_INVAL_NLINE; 5573 break; 5574 } 5575 // for ins INVAL, wait for icache to take the request 5576 if ((r_cc_receive_data_ins.read() == 1) and 5577 not (r_cc_receive_icache_req.read())) 5578 { 5579 r_cc_receive_icache_updt_tab_idx = DspinDhccpParam::dspin_get(receive_data, 5580 DspinDhccpParam::MULTI_INVAL_UPDT_INDEX); 5581 r_cc_receive_fsm = CC_RECEIVE_INVAL_NLINE; 5582 break; 5583 } 5584 // keep waiting for the correct cache to accept the request 5585 break; 5586 } 5587 //////////////////////////// 5588 case CC_RECEIVE_INVAL_NLINE: 5589 { 5590 // sample nline in the second flit 5591 uint64_t receive_data = p_dspin_in.data.read(); 5592 // for data INVAL, wait for dcache to take the request 5593 if ( (r_cc_receive_data_ins.read() == 0) and 5594 not (r_cc_receive_dcache_req.read()) and 5595 (p_dspin_in.write.read()) ) 5607 if (p_dspin_m2p.write.read() and not r_cc_receive_dcache_req.read()) 5596 5608 { 5597 5609 // request dcache to handle the INVAL 5598 r_cc_receive_dcache_req 5599 r_cc_receive_dcache_nline 5610 r_cc_receive_dcache_req = true; 5611 r_cc_receive_dcache_nline = DspinDhccpParam::dspin_get(receive_data,DspinDhccpParam::MULTI_INVAL_NLINE); 5600 5612 r_cc_receive_dcache_type = CC_TYPE_INVAL; 5601 5613 // get back to idle state … … 5603 5615 break; 5604 5616 } 5617 break; 5618 } 5619 ////////////////////////////// 5620 case CC_RECEIVE_INS_INVAL_NLINE: 5621 { 5622 // sample nline in the second flit 5623 uint64_t receive_data = p_dspin_m2p.data.read(); 5605 5624 // for ins INVAL, wait for icache to take the request 5606 if ( (r_cc_receive_data_ins.read() == 1) and not (r_cc_receive_icache_req.read()) and (p_dspin_in.write.read()))5625 if (p_dspin_m2p.write.read() and not r_cc_receive_icache_req.read()) 5607 5626 { 5608 5627 // request icache to handle the INVAL 5609 r_cc_receive_icache_req 5610 r_cc_receive_icache_nline 5628 r_cc_receive_icache_req = true; 5629 r_cc_receive_icache_nline = DspinDhccpParam::dspin_get(receive_data,DspinDhccpParam::MULTI_INVAL_NLINE); 5611 5630 r_cc_receive_icache_type = CC_TYPE_INVAL; 5612 5631 // get back to idle state … … 5614 5633 break; 5615 5634 } 5616 // we should never get there 5617 assert ( false && "ERROR in CC_VCACHE : CC_RECEIVE_INVAL_NLINE\n"); 5635 break; 5618 5636 } 5619 5637 //////////////////////////// 5620 case CC_RECEIVE_ UPDT_HEADER:5638 case CC_RECEIVE_DATA_UPDT_HEADER: 5621 5639 { 5622 5640 // sample updt tab index in the HEADER, than skip to second flit 5623 uint64_t receive_data = p_dspin_ in.data.read();5641 uint64_t receive_data = p_dspin_m2p.data.read(); 5624 5642 // for data INVAL, wait for dcache to take the request and fifo to 5625 5643 // be empty 5626 if ( (r_cc_receive_data_ins.read() == 0) and not r_cc_receive_dcache_req.read() and r_cc_receive_updt_fifo_be.empty())5644 if (not r_cc_receive_dcache_req.read()) 5627 5645 { 5628 5646 r_cc_receive_dcache_updt_tab_idx = DspinDhccpParam::dspin_get(receive_data,DspinDhccpParam::MULTI_UPDT_UPDT_INDEX); 5629 r_cc_receive_fsm = CC_RECEIVE_ UPDT_NLINE;5647 r_cc_receive_fsm = CC_RECEIVE_DATA_UPDT_NLINE; 5630 5648 break; 5631 5649 } 5650 break; 5651 } 5652 //////////////////////////// 5653 case CC_RECEIVE_INS_UPDT_HEADER: 5654 { 5655 // sample updt tab index in the HEADER, than skip to second flit 5656 uint64_t receive_data = p_dspin_m2p.data.read(); 5632 5657 // for ins INVAL, wait for icache to take the request and fifo to be 5633 5658 // empty 5634 if ( (r_cc_receive_data_ins.read() == 1) and not r_cc_receive_icache_req.read() and r_cc_receive_updt_fifo_be.empty())5659 if (not r_cc_receive_icache_req.read()) 5635 5660 { 5636 5661 r_cc_receive_icache_updt_tab_idx = DspinDhccpParam::dspin_get(receive_data,DspinDhccpParam::MULTI_UPDT_UPDT_INDEX); 5637 r_cc_receive_fsm = CC_RECEIVE_ UPDT_NLINE;5662 r_cc_receive_fsm = CC_RECEIVE_INS_UPDT_NLINE; 5638 5663 break; 5639 5664 } … … 5642 5667 } 5643 5668 /////////////////////////// 5644 case CC_RECEIVE_ UPDT_NLINE:5669 case CC_RECEIVE_DATA_UPDT_NLINE: 5645 5670 { 5646 5671 // sample nline and word index in the second flit 5647 uint64_t receive_data = p_dspin_ in.data.read();5672 uint64_t receive_data = p_dspin_m2p.data.read(); 5648 5673 // for data INVAL, wait for dcache to take the request and fifo to 5649 5674 // be empty 5650 if ( (r_cc_receive_data_ins.read() == 0) and 5651 not (r_cc_receive_dcache_req.read()) and 5652 r_cc_receive_updt_fifo_be.empty() and 5653 (p_dspin_in.write.read()) ) 5654 { 5675 if ( r_cc_receive_updt_fifo_be.empty() and 5676 p_dspin_m2p.write.read() ) 5677 { 5678 r_cc_receive_dcache_req = true; 5655 5679 r_cc_receive_dcache_nline = DspinDhccpParam::dspin_get(receive_data,DspinDhccpParam::MULTI_UPDT_NLINE); 5656 5680 r_cc_receive_word_idx = DspinDhccpParam::dspin_get(receive_data,DspinDhccpParam::MULTI_UPDT_WORD_INDEX); 5657 5681 r_cc_receive_dcache_type = CC_TYPE_UPDT; 5658 5682 // get back to idle state 5659 r_cc_receive_fsm = CC_RECEIVE_ UPDT_DATA;5683 r_cc_receive_fsm = CC_RECEIVE_DATA_UPDT_DATA; 5660 5684 break; 5661 5685 } 5686 break; 5687 } 5688 //////////////////////////// 5689 case CC_RECEIVE_INS_UPDT_NLINE: 5690 { 5691 // sample nline and word index in the second flit 5692 uint64_t receive_data = p_dspin_m2p.data.read(); 5662 5693 // for ins INVAL, wait for icache to take the request and fifo to be 5663 5694 // empty 5664 if ( (r_cc_receive_data_ins.read() == 1) and 5665 not (r_cc_receive_icache_req.read()) and 5666 r_cc_receive_updt_fifo_be.empty() and 5667 (p_dspin_in.write.read())) 5668 { 5695 if ( r_cc_receive_updt_fifo_be.empty() and 5696 p_dspin_m2p.write.read() ) 5697 { 5698 r_cc_receive_icache_req = true; 5669 5699 r_cc_receive_icache_nline = DspinDhccpParam::dspin_get(receive_data,DspinDhccpParam::MULTI_UPDT_NLINE); 5670 5700 r_cc_receive_word_idx = DspinDhccpParam::dspin_get(receive_data,DspinDhccpParam::MULTI_UPDT_WORD_INDEX); 5671 5701 r_cc_receive_icache_type = CC_TYPE_UPDT; 5672 5702 // get back to idle state 5673 r_cc_receive_fsm = CC_RECEIVE_ UPDT_DATA;5703 r_cc_receive_fsm = CC_RECEIVE_INS_UPDT_DATA; 5674 5704 break; 5675 5705 } 5676 // we should never get there5677 assert ( false && "ERROR in CC_VCACHE : CC_RECEIVE_UPDT_NLINE \n");5678 5706 break; 5679 5707 } 5680 5708 ////////////////////////// 5681 case CC_RECEIVE_UPDT_DATA: 5682 { 5683 if ((r_cc_receive_data_ins.read() == 0) and not (r_cc_receive_dcache_req.read()) and (p_dspin_in.write.read())) 5684 r_cc_receive_dcache_req = true; 5685 if ((r_cc_receive_data_ins.read() == 1) and not (r_cc_receive_icache_req.read()) and (p_dspin_in.write.read())) 5686 r_cc_receive_icache_req = true; 5687 5709 case CC_RECEIVE_DATA_UPDT_DATA: 5710 { 5688 5711 // wait for the fifo 5689 if (r_cc_receive_updt_fifo_be.wok() and (p_dspin_ in.write.read()))5690 { 5691 uint64_t receive_data = p_dspin_ in.data.read();5692 bool receive_eop = p_dspin_ in.eop.read();5712 if (r_cc_receive_updt_fifo_be.wok() and (p_dspin_m2p.write.read())) 5713 { 5714 uint64_t receive_data = p_dspin_m2p.data.read(); 5715 bool receive_eop = p_dspin_m2p.eop.read(); 5693 5716 cc_receive_updt_fifo_be = DspinDhccpParam::dspin_get(receive_data,DspinDhccpParam::MULTI_UPDT_BE); 5694 5717 cc_receive_updt_fifo_data = DspinDhccpParam::dspin_get(receive_data,DspinDhccpParam::MULTI_UPDT_DATA); … … 5699 5722 break; 5700 5723 } 5724 ////////////////////////// 5725 case CC_RECEIVE_INS_UPDT_DATA: 5726 { 5727 // wait for the fifo 5728 if (r_cc_receive_updt_fifo_be.wok() and (p_dspin_m2p.write.read())) 5729 { 5730 uint64_t receive_data = p_dspin_m2p.data.read(); 5731 bool receive_eop = p_dspin_m2p.eop.read(); 5732 cc_receive_updt_fifo_be = DspinDhccpParam::dspin_get(receive_data,DspinDhccpParam::MULTI_UPDT_BE); 5733 cc_receive_updt_fifo_data = DspinDhccpParam::dspin_get(receive_data,DspinDhccpParam::MULTI_UPDT_DATA); 5734 cc_receive_updt_fifo_eop = receive_eop; 5735 cc_receive_updt_fifo_put = true; 5736 if ( receive_eop ) r_cc_receive_fsm = CC_RECEIVE_IDLE; 5737 } 5738 break; 5739 } 5740 5701 5741 } // end switch CC_RECEIVE FSM 5742 5743 ///////////////// DSPIN CLACK interface /////////////// 5744 5745 uint64_t clack_type = DspinDhccpParam::dspin_get(r_dspin_clack_flit.read(), 5746 DspinDhccpParam::CLACK_TYPE); 5747 5748 size_t clack_way = DspinDhccpParam::dspin_get(r_dspin_clack_flit.read(), 5749 DspinDhccpParam::CLACK_WAY); 5750 5751 size_t clack_set = DspinDhccpParam::dspin_get(r_dspin_clack_flit.read(), 5752 DspinDhccpParam::CLACK_SET); 5753 5754 bool dspin_clack_get = false; 5755 bool dcache_clack_request = (clack_type == DspinDhccpParam::TYPE_CLACK_DATA); 5756 bool icache_clack_request = (clack_type == DspinDhccpParam::TYPE_CLACK_INST); 5757 5758 if (r_dspin_clack_req.read()) 5759 { 5760 // CLACK DATA: Send request to DCACHE FSM 5761 if (dcache_clack_request and not r_dcache_clack_req.read()){ 5762 r_dcache_clack_req = true; 5763 r_dcache_clack_way = clack_way & ((1ULL<<(uint32_log2(m_dcache_ways)))-1); 5764 r_dcache_clack_set = clack_set & ((1ULL<<(uint32_log2(m_dcache_sets)))-1); 5765 dspin_clack_get = true; 5766 } 5767 5768 // CLACK INST: Send request to ICACHE FSM 5769 else if (icache_clack_request and not r_icache_clack_req.read()){ 5770 r_icache_clack_req = true; 5771 r_icache_clack_way = clack_way & ((1ULL<<(uint32_log2(m_dcache_ways)))-1); 5772 r_icache_clack_set = clack_set & ((1ULL<<(uint32_log2(m_icache_sets)))-1); 5773 dspin_clack_get = true; 5774 } 5775 } 5776 else 5777 { 5778 dspin_clack_get = true; 5779 } 5780 5781 if (dspin_clack_get) 5782 { 5783 r_dspin_clack_req = p_dspin_clack.write.read(); 5784 r_dspin_clack_flit = p_dspin_clack.data.read(); 5785 } 5702 5786 5703 5787 ///////////////// Response FIFOs update ////////////////////// … … 5897 5981 case CC_SEND_IDLE: 5898 5982 { 5899 p_dspin_ out.write = false;5983 p_dspin_p2m.write = false; 5900 5984 break; 5901 5985 } … … 5906 5990 // DspinDhccpParam::dspin_set(dspin_send_data, 5907 5991 // 0, 5908 // DspinDhccpParam:: FROM_L1_EOP);5992 // DspinDhccpParam::P2M_EOP); 5909 5993 DspinDhccpParam::dspin_set(dspin_send_data, 5910 5994 m_cc_global_id, … … 5912 5996 DspinDhccpParam::dspin_set(dspin_send_data, 5913 5997 0, 5914 DspinDhccpParam:: FROM_L1_BC);5998 DspinDhccpParam::P2M_BC); 5915 5999 5916 6000 if(r_cc_send_last_client.read() == 0) // dcache active request … … 5934 6018 DspinDhccpParam::dspin_set(dspin_send_data, 5935 6019 DspinDhccpParam::TYPE_CLEANUP_DATA, 5936 DspinDhccpParam:: FROM_L1_TYPE);6020 DspinDhccpParam::P2M_TYPE); 5937 6021 } 5938 6022 else // icache active request … … 5956 6040 DspinDhccpParam::dspin_set(dspin_send_data, 5957 6041 DspinDhccpParam::TYPE_CLEANUP_INST, 5958 DspinDhccpParam:: FROM_L1_TYPE);6042 DspinDhccpParam::P2M_TYPE); 5959 6043 } 5960 6044 // send flit 5961 p_dspin_ out.data = dspin_send_data;5962 p_dspin_ out.write = true;5963 p_dspin_ out.eop = false;6045 p_dspin_p2m.data = dspin_send_data; 6046 p_dspin_p2m.write = true; 6047 p_dspin_p2m.eop = false; 5964 6048 break; 5965 6049 } … … 5970 6054 // DspinDhccpParam::dspin_set(dspin_send_data, 5971 6055 // 1, 5972 // DspinDhccpParam:: FROM_L1_EOP);6056 // DspinDhccpParam::P2M_EOP); 5973 6057 5974 6058 if(r_cc_send_last_client.read() == 0) // dcache active request … … 5985 6069 } 5986 6070 // send flit 5987 p_dspin_ out.data = dspin_send_data;5988 p_dspin_ out.write = true;5989 p_dspin_ out.eop = true;6071 p_dspin_p2m.data = dspin_send_data; 6072 p_dspin_p2m.write = true; 6073 p_dspin_p2m.eop = true; 5990 6074 break; 5991 6075 } … … 5996 6080 // DspinDhccpParam::dspin_set(dspin_send_data, 5997 6081 // 1, 5998 // DspinDhccpParam:: FROM_L1_EOP);6082 // DspinDhccpParam::P2M_EOP); 5999 6083 DspinDhccpParam::dspin_set(dspin_send_data, 6000 6084 0, 6001 DspinDhccpParam:: FROM_L1_BC);6085 DspinDhccpParam::P2M_BC); 6002 6086 DspinDhccpParam::dspin_set(dspin_send_data, 6003 6087 DspinDhccpParam::TYPE_MULTI_ACK, 6004 DspinDhccpParam:: FROM_L1_TYPE);6088 DspinDhccpParam::P2M_TYPE); 6005 6089 6006 6090 if(r_cc_send_last_client.read() == 0) // dcache active request … … 6034 6118 } 6035 6119 // send flit 6036 p_dspin_ out.data = dspin_send_data;6037 p_dspin_ out.write = true;6038 p_dspin_ out.eop = true;6120 p_dspin_p2m.data = dspin_send_data; 6121 p_dspin_p2m.write = true; 6122 p_dspin_p2m.eop = true; 6039 6123 6040 6124 break; … … 6044 6128 // Receive coherence packets 6045 6129 // It depends on the CC_RECEIVE FSM 6046 6047 6130 switch( r_cc_receive_fsm.read() ) 6048 6131 { … … 6050 6133 case CC_RECEIVE_IDLE: 6051 6134 { 6052 p_dspin_in.read = false; 6053 break; 6054 } 6055 ////////////////////// 6056 case CC_RECEIVE_CLACK: 6057 { 6058 if (((r_cc_receive_data_ins.read() == 0) and not (r_cc_receive_dcache_req.read())) or 6059 ((r_cc_receive_data_ins.read() == 1) and not (r_cc_receive_icache_req.read()))) 6060 p_dspin_in.read = true; 6061 else 6062 p_dspin_in.read = false; 6135 p_dspin_m2p.read = false; 6063 6136 break; 6064 6137 } … … 6066 6139 case CC_RECEIVE_BRDCAST_HEADER: 6067 6140 { 6068 p_dspin_ in.read = true;6141 p_dspin_m2p.read = true; 6069 6142 break; 6070 6143 } … … 6076 6149 // flip_flop to check that ? 6077 6150 if (not (r_cc_receive_icache_req.read()) and not (r_cc_receive_dcache_req.read())) 6078 p_dspin_ in.read = true;6151 p_dspin_m2p.read = true; 6079 6152 else 6080 p_dspin_ in.read = false;6153 p_dspin_m2p.read = false; 6081 6154 break; 6082 6155 } 6083 6156 ///////////////////////////// 6084 case CC_RECEIVE_INVAL_HEADER: 6085 { 6086 if (((r_cc_receive_data_ins.read() == 0) and not (r_cc_receive_dcache_req.read())) or 6087 ((r_cc_receive_data_ins.read() == 1) and not (r_cc_receive_icache_req.read()))) 6088 p_dspin_in.read = true; 6157 case CC_RECEIVE_DATA_INVAL_HEADER: 6158 case CC_RECEIVE_INS_INVAL_HEADER: 6159 { 6160 p_dspin_m2p.read = true; 6161 break; 6162 } 6163 //////////////////////////// 6164 case CC_RECEIVE_DATA_INVAL_NLINE: 6165 { 6166 p_dspin_m2p.read = not r_cc_receive_dcache_req.read(); 6167 break; 6168 } 6169 case CC_RECEIVE_INS_INVAL_NLINE: 6170 { 6171 p_dspin_m2p.read = not r_cc_receive_icache_req.read(); 6172 break; 6173 } 6174 /////////////////////////// 6175 case CC_RECEIVE_DATA_UPDT_HEADER: 6176 { 6177 if (not r_cc_receive_dcache_req.read()) 6178 p_dspin_m2p.read = true; 6089 6179 else 6090 p_dspin_ in.read = false;6180 p_dspin_m2p.read = false; 6091 6181 break; 6092 6182 } 6093 6183 //////////////////////////// 6094 case CC_RECEIVE_INVAL_NLINE: 6095 { 6096 p_dspin_in.read = true; 6097 break; 6098 } 6099 //////////////////////////// 6100 case CC_RECEIVE_UPDT_HEADER: 6101 { 6102 if (((r_cc_receive_data_ins.read() == 0) and 6103 not r_cc_receive_dcache_req.read() and 6104 r_cc_receive_updt_fifo_be.empty()) 6105 or 6106 (((r_cc_receive_data_ins.read() == 1) and 6107 not r_cc_receive_icache_req.read()) and 6108 r_cc_receive_updt_fifo_be.empty())) 6109 p_dspin_in.read = true; 6184 case CC_RECEIVE_INS_UPDT_HEADER: 6185 { 6186 if ( not r_cc_receive_icache_req.read()) 6187 p_dspin_m2p.read = true; 6110 6188 else 6111 p_dspin_ in.read = false;6189 p_dspin_m2p.read = false; 6112 6190 break; 6113 6191 } 6114 6192 /////////////////////////// 6115 case CC_RECEIVE_UPDT_NLINE: 6116 { 6117 if (((r_cc_receive_data_ins.read() == 0) and 6118 not (r_cc_receive_dcache_req.read()) and 6119 r_cc_receive_updt_fifo_be.empty()) 6120 or 6121 ((r_cc_receive_data_ins.read() == 1) and 6122 not (r_cc_receive_icache_req.read()) and 6123 r_cc_receive_updt_fifo_be.empty())) 6124 p_dspin_in.read = true; 6193 case CC_RECEIVE_DATA_UPDT_NLINE: 6194 case CC_RECEIVE_INS_UPDT_NLINE: 6195 { 6196 if(r_cc_receive_updt_fifo_be.empty()) 6197 p_dspin_m2p.read = true; 6125 6198 else 6126 p_dspin_in.read = false; 6127 break; 6128 } 6129 ////////////////////////// 6130 case CC_RECEIVE_UPDT_DATA: 6199 p_dspin_m2p.read = false; 6200 break; 6201 } 6202 /////////////////////////// 6203 case CC_RECEIVE_DATA_UPDT_DATA: 6204 case CC_RECEIVE_INS_UPDT_DATA: 6131 6205 { 6132 6206 if (r_cc_receive_updt_fifo_be.wok()) 6133 p_dspin_ in.read = true;6207 p_dspin_m2p.read = true; 6134 6208 else 6135 p_dspin_ in.read = false;6209 p_dspin_m2p.read = false; 6136 6210 break; 6137 6211 } 6138 6212 } // end switch CC_RECEIVE FSM 6139 6213 6214 6215 int clack_type = DspinDhccpParam::dspin_get(r_dspin_clack_flit.read(), 6216 DspinDhccpParam::CLACK_TYPE); 6217 6218 bool dspin_clack_get = false; 6219 bool dcache_clack_request = (clack_type == DspinDhccpParam::TYPE_CLACK_DATA); 6220 bool icache_clack_request = (clack_type == DspinDhccpParam::TYPE_CLACK_INST); 6221 6222 if (r_dspin_clack_req.read()) 6223 { 6224 // CLACK DATA: wait if pending request to DCACHE FSM 6225 if (dcache_clack_request and not r_dcache_clack_req.read()) 6226 { 6227 dspin_clack_get = true; 6228 } 6229 6230 // CLACK INST: wait if pending request to ICACHE FSM 6231 else if (icache_clack_request and not r_icache_clack_req.read()) 6232 { 6233 dspin_clack_get = true; 6234 } 6235 } 6236 else 6237 { 6238 dspin_clack_get = true; 6239 } 6240 6241 p_dspin_clack.read = dspin_clack_get; 6140 6242 } // end genMoore 6141 6243 -
trunk/modules/vci_mem_cache
-
Property
svn:mergeinfo
set to
/branches/v5/modules/vci_mem_cache merged eligible
-
Property
svn:mergeinfo
set to
-
trunk/modules/vci_mem_cache/caba/metadata/vci_mem_cache.sd
r434 r468 49 49 Port('caba:vci_initiator', 'p_vci_ixr'), 50 50 Port('caba:dspin_input', 51 'p_dspin_ in',51 'p_dspin_p2m', 52 52 dspin_data_size = parameter.Reference('dspin_in_width') 53 53 ), 54 54 Port('caba:dspin_output', 55 'p_dspin_out', 55 'p_dspin_m2p', 56 dspin_data_size = parameter.Reference('dspin_out_width') 57 ), 58 Port('caba:dspin_output', 59 'p_dspin_clack', 56 60 dspin_data_size = parameter.Reference('dspin_out_width') 57 61 ), -
trunk/modules/vci_mem_cache/caba/source/include/vci_mem_cache.h
r449 r468 54 54 #define TRT_ENTRIES 4 // Number of entries in TRT 55 55 #define UPT_ENTRIES 4 // Number of entries in UPT 56 #define IVT_ENTRIES 4 // Number of entries in IVT 56 57 #define HEAP_ENTRIES 1024 // Number of entries in HEAP 57 58 … … 122 123 CC_SEND_WRITE_IDLE, 123 124 CC_SEND_CAS_IDLE, 124 CC_SEND_CLEANUP_IDLE,125 125 CC_SEND_CONFIG_INVAL_HEADER, 126 126 CC_SEND_CONFIG_INVAL_NLINE, 127 127 CC_SEND_CONFIG_BRDCAST_HEADER, 128 128 CC_SEND_CONFIG_BRDCAST_NLINE, 129 CC_SEND_CLEANUP_ACK,130 129 CC_SEND_XRAM_RSP_BRDCAST_HEADER, 131 130 CC_SEND_XRAM_RSP_BRDCAST_NLINE, … … 163 162 CONFIG_DIR_REQ, 164 163 CONFIG_DIR_ACCESS, 165 CONFIG_DIR_ UPT_LOCK,164 CONFIG_DIR_IVT_LOCK, 166 165 CONFIG_BC_SEND, 167 166 CONFIG_BC_WAIT, … … 211 210 WRITE_MISS_XRAM_REQ, 212 211 WRITE_BC_TRT_LOCK, 213 WRITE_BC_ UPT_LOCK,212 WRITE_BC_IVT_LOCK, 214 213 WRITE_BC_DIR_INVAL, 215 214 WRITE_BC_CC_SEND, … … 274 273 CAS_UPT_NEXT, 275 274 CAS_BC_TRT_LOCK, 276 CAS_BC_ UPT_LOCK,275 CAS_BC_IVT_LOCK, 277 276 CAS_BC_DIR_INVAL, 278 277 CAS_BC_CC_SEND, … … 299 298 CLEANUP_HEAP_CLEAN, 300 299 CLEANUP_HEAP_FREE, 301 CLEANUP_ UPT_LOCK,302 CLEANUP_ UPT_DECREMENT,303 CLEANUP_ UPT_CLEAR,300 CLEANUP_IVT_LOCK, 301 CLEANUP_IVT_DECREMENT, 302 CLEANUP_IVT_CLEAR, 304 303 CLEANUP_WRITE_RSP, 305 304 CLEANUP_CONFIG_ACK, … … 332 331 enum alloc_upt_fsm_state_e 333 332 { 334 ALLOC_UPT_CONFIG,335 333 ALLOC_UPT_WRITE, 336 ALLOC_UPT_XRAM_RSP, 337 ALLOC_UPT_MULTI_ACK, 338 ALLOC_UPT_CLEANUP, 339 ALLOC_UPT_CAS 334 ALLOC_UPT_CAS, 335 ALLOC_UPT_MULTI_ACK 336 }; 337 338 /* States of the ALLOC_IVT fsm */ 339 enum alloc_ivt_fsm_state_e 340 { 341 ALLOC_IVT_WRITE, 342 ALLOC_IVT_XRAM_RSP, 343 ALLOC_IVT_CLEANUP, 344 ALLOC_IVT_CAS, 345 ALLOC_IVT_CONFIG 340 346 }; 341 347 … … 451 457 soclib::caba::VciTarget<vci_param_int> p_vci_tgt; 452 458 soclib::caba::VciInitiator<vci_param_ext> p_vci_ixr; 453 soclib::caba::DspinInput<dspin_in_width> p_dspin_in; 454 soclib::caba::DspinOutput<dspin_out_width> p_dspin_out; 459 soclib::caba::DspinInput<dspin_in_width> p_dspin_p2m; 460 soclib::caba::DspinOutput<dspin_out_width> p_dspin_m2p; 461 soclib::caba::DspinOutput<dspin_out_width> p_dspin_clack; 455 462 456 463 VciMemCache( … … 468 475 const size_t trt_lines=TRT_ENTRIES, 469 476 const size_t upt_lines=UPT_ENTRIES, 477 const size_t ivt_lines=IVT_ENTRIES, 470 478 const size_t debug_start_cycle=0, 471 479 const bool debug_ok=false ); … … 502 510 TransactionTab m_trt; // xram transaction table 503 511 uint32_t m_upt_lines; 504 UpdateTab m_upt; // pending update & invalidate 512 UpdateTab m_upt; // pending update 513 UpdateTab m_ivt; // pending invalidate 505 514 CacheDirectory m_cache_directory; // data cache directory 506 515 CacheData m_cache_data; // data array[set][way][word] … … 591 600 sc_signal<size_t> r_config_heap_next; // current pointer to scan HEAP 592 601 593 sc_signal<size_t> r_config_ upt_index; // UPT index602 sc_signal<size_t> r_config_ivt_index; // IVT index 594 603 595 604 // Buffer between CONFIG fsm and TGT_RSP fsm (send a done response to L1 cache) … … 780 789 sc_signal<size_t> r_cleanup_to_tgt_rsp_pktid; // transaction pktid 781 790 782 // Buffer between CLEANUP fsm and CC_SEND fsm (acknowledge a cleanup command from L1)783 sc_signal<bool> r_cleanup_to_cc_send_req; // valid request784 sc_signal<size_t> r_cleanup_to_cc_send_srcid; // L1 srcid785 sc_signal<size_t> r_cleanup_to_cc_send_set_index; // L1 set index786 sc_signal<size_t> r_cleanup_to_cc_send_way_index; // L1 way index787 sc_signal<bool> r_cleanup_to_cc_send_inst; // Instruction Cleanup Ack788 789 791 /////////////////////////////////////////////////////// 790 792 // Registers controlled by CAS fsm … … 872 874 sc_signal<size_t> r_xram_rsp_victim_ptr; // victim line pointer to the heap 873 875 sc_signal<data_t> * r_xram_rsp_victim_data; // victim line data 874 sc_signal<size_t> r_xram_rsp_ upt_index; // UPT entry index876 sc_signal<size_t> r_xram_rsp_ivt_index; // IVT entry index 875 877 sc_signal<size_t> r_xram_rsp_next_ptr; // Next pointer to the heap 876 878 … … 953 955 954 956 //////////////////////////////////////////////////// 957 // Registers controlled by ALLOC_IVT fsm 958 //////////////////////////////////////////////////// 959 960 sc_signal<int> r_alloc_ivt_fsm; 961 962 //////////////////////////////////////////////////// 955 963 // Registers controlled by ALLOC_HEAP fsm 956 964 //////////////////////////////////////////////////// -
trunk/modules/vci_mem_cache/caba/source/src/vci_mem_cache.cpp
r449 r468 99 99 "CC_SEND_WRITE_IDLE", 100 100 "CC_SEND_CAS_IDLE", 101 "CC_SEND_CLEANUP_IDLE",102 101 "CC_SEND_CONFIG_INVAL_HEADER", 103 102 "CC_SEND_CONFIG_INVAL_NLINE", 104 103 "CC_SEND_CONFIG_BRDCAST_HEADER", 105 104 "CC_SEND_CONFIG_BRDCAST_NLINE", 106 "CC_SEND_CLEANUP_ACK",107 105 "CC_SEND_XRAM_RSP_BRDCAST_HEADER", 108 106 "CC_SEND_XRAM_RSP_BRDCAST_NLINE", … … 136 134 "CONFIG_DIR_REQ", 137 135 "CONFIG_DIR_ACCESS", 138 "CONFIG_DIR_ UPT_LOCK",136 "CONFIG_DIR_IVT_LOCK", 139 137 "CONFIG_BC_SEND", 140 138 "CONFIG_BC_WAIT", … … 180 178 "WRITE_MISS_XRAM_REQ", 181 179 "WRITE_BC_TRT_LOCK", 182 "WRITE_BC_ UPT_LOCK",180 "WRITE_BC_IVT_LOCK", 183 181 "WRITE_BC_DIR_INVAL", 184 182 "WRITE_BC_CC_SEND", … … 235 233 "CAS_UPT_NEXT", 236 234 "CAS_BC_TRT_LOCK", 237 "CAS_BC_ UPT_LOCK",235 "CAS_BC_IVT_LOCK", 238 236 "CAS_BC_DIR_INVAL", 239 237 "CAS_BC_CC_SEND", … … 258 256 "CLEANUP_HEAP_CLEAN", 259 257 "CLEANUP_HEAP_FREE", 260 "CLEANUP_ UPT_LOCK",261 "CLEANUP_ UPT_DECREMENT",262 "CLEANUP_ UPT_CLEAR",258 "CLEANUP_IVT_LOCK", 259 "CLEANUP_IVT_DECREMENT", 260 "CLEANUP_IVT_CLEAR", 263 261 "CLEANUP_WRITE_RSP", 264 262 "CLEANUP_CONFIG_ACK", … … 268 266 { 269 267 "ALLOC_DIR_RESET", 268 "ALLOC_DIR_CONFIG", 270 269 "ALLOC_DIR_READ", 271 270 "ALLOC_DIR_WRITE", … … 285 284 { 286 285 "ALLOC_UPT_WRITE", 287 "ALLOC_UPT_XRAM_RSP", 288 "ALLOC_UPT_MULTI_ACK", 289 "ALLOC_UPT_CLEANUP", 290 "ALLOC_UPT_CAS" 286 "ALLOC_UPT_CAS", 287 "ALLOC_UPT_MULTI_ACK" 288 }; 289 const char *alloc_ivt_fsm_str[] = 290 { 291 "ALLOC_IVT_WRITE", 292 "ALLOC_IVT_XRAM_RSP", 293 "ALLOC_IVT_CLEANUP", 294 "ALLOC_IVT_CAS", 295 "ALLOC_IVT_CONFIG" 291 296 }; 292 297 const char *alloc_heap_fsm_str[] = … … 328 333 const size_t trt_lines, // number of TRT entries 329 334 const size_t upt_lines, // number of UPT entries 335 const size_t ivt_lines, // number of IVT entries 330 336 const size_t debug_start_cycle, 331 337 const bool debug_ok) … … 337 343 p_vci_tgt( "p_vci_tgt" ), 338 344 p_vci_ixr( "p_vci_ixr" ), 339 p_dspin_in( "p_dspin_in" ), 340 p_dspin_out( "p_dspin_out" ), 345 p_dspin_p2m( "p_dspin_p2m" ), 346 p_dspin_m2p( "p_dspin_m2p" ), 347 p_dspin_clack( "p_dspin_clack" ), 341 348 342 349 m_seglist( mtp.getSegmentList(tgtid_d) ), … … 355 362 m_upt_lines(upt_lines), 356 363 m_upt(upt_lines), 364 m_ivt(ivt_lines), 357 365 m_cache_directory(nways, nsets, nwords, vci_param_int::N), 358 366 m_cache_data(nways, nsets, nwords), … … 446 454 r_alloc_trt_fsm("r_alloc_trt_fsm"), 447 455 r_alloc_upt_fsm("r_alloc_upt_fsm"), 456 r_alloc_ivt_fsm("r_alloc_ivt_fsm"), 448 457 r_alloc_heap_fsm("r_alloc_heap_fsm"), 449 458 r_alloc_heap_reset_cpt("r_alloc_heap_reset_cpt") … … 623 632 << " | " << ixr_cmd_fsm_str[r_ixr_cmd_fsm.read()] 624 633 << " | " << ixr_rsp_fsm_str[r_ixr_rsp_fsm.read()] 625 << " | " << xram_rsp_fsm_str[r_xram_rsp_fsm ] << std::endl;634 << " | " << xram_rsp_fsm_str[r_xram_rsp_fsm.read()] << std::endl; 626 635 std::cout << " " << alloc_dir_fsm_str[r_alloc_dir_fsm.read()] 627 636 << " | " << alloc_trt_fsm_str[r_alloc_trt_fsm.read()] 628 637 << " | " << alloc_upt_fsm_str[r_alloc_upt_fsm.read()] 638 << " | " << alloc_ivt_fsm_str[r_alloc_ivt_fsm.read()] 629 639 << " | " << alloc_heap_fsm_str[r_alloc_heap_fsm.read()] << std::endl; 630 640 } … … 702 712 r_alloc_trt_fsm = ALLOC_TRT_READ; 703 713 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 714 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 704 715 r_ixr_rsp_fsm = IXR_RSP_IDLE; 705 716 r_xram_rsp_fsm = XRAM_RSP_IDLE; … … 714 725 m_trt.init(); 715 726 m_upt.init(); 727 m_ivt.init(); 716 728 m_llsc_table.init(); 717 729 … … 1534 1546 r_config_dir_next_ptr = entry.ptr; 1535 1547 1536 r_config_fsm = CONFIG_DIR_ UPT_LOCK;1548 r_config_fsm = CONFIG_DIR_IVT_LOCK; 1537 1549 } 1538 1550 else if ( entry.valid and // hit & sync command … … 1563 1575 } 1564 1576 ///////////////////////// 1565 case CONFIG_DIR_ UPT_LOCK: // enter this state in case of INVAL command1566 // Try to get both DIR & UPT locks, and return1567 // to LOOP state if UPT full.1568 // Register inval in UPT, and invalidate the1569 // directory if UPT not full.1570 { 1571 if ( r_alloc_ upt_fsm.read() == ALLOC_UPT_CONFIG )1577 case CONFIG_DIR_IVT_LOCK: // enter this state in case of INVAL command 1578 // Try to get both DIR & IVT locks, and return 1579 // to LOOP state if IVT full. 1580 // Register inval in IVT, and invalidate the 1581 // directory if IVT not full. 1582 { 1583 if ( r_alloc_ivt_fsm.read() == ALLOC_IVT_CONFIG ) 1572 1584 { 1573 1585 size_t set = m_y[(addr_t)(r_config_address.read())]; … … 1583 1595 #if DEBUG_MEMC_CONFIG 1584 1596 if(m_debug) 1585 std::cout << " <MEMC " << name() << " CONFIG_DIR_ UPT_LOCK>"1597 std::cout << " <MEMC " << name() << " CONFIG_DIR_IVT_LOCK>" 1586 1598 << " No copies in L1 : inval DIR entry" << std::endl; 1587 1599 #endif 1588 1600 } 1589 else // try to register inval in UPT1601 else // try to register inval in IVT 1590 1602 { 1591 1603 bool wok = false; … … 1598 1610 size_t nb_copies = r_config_dir_count.read(); 1599 1611 1600 wok = m_ upt.set(false, // it's an inval transaction1612 wok = m_ivt.set(false, // it's an inval transaction 1601 1613 broadcast, 1602 1614 false, // no response required … … 1609 1621 index); 1610 1622 1611 if ( wok ) // UPT success => inval DIR slot1623 if ( wok ) // IVT success => inval DIR slot 1612 1624 { 1613 1625 m_cache_directory.inval( way, set ); 1614 r_config_ upt_index = index;1626 r_config_ivt_index = index; 1615 1627 if ( broadcast ) r_config_fsm = CONFIG_BC_SEND; 1616 1628 else r_config_fsm = CONFIG_INV_SEND; … … 1618 1630 #if DEBUG_MEMC_CONFIG 1619 1631 if(m_debug) 1620 std::cout << " <MEMC " << name() << " CONFIG_DIR_ UPT_LOCK>"1621 << " Inval DIR entry and register inval in UPT"1632 std::cout << " <MEMC " << name() << " CONFIG_DIR_IVT_LOCK>" 1633 << " Inval DIR entry and register inval in IVT" 1622 1634 << " : index = " << std::dec << index 1623 1635 << " / broadcast = " << broadcast << std::endl; 1624 1636 #endif 1625 1637 } 1626 else // UPT full => release both DIR and UPT locks1638 else // IVT full => release both DIR and IVT locks 1627 1639 { 1628 1640 r_config_fsm = CONFIG_LOOP; … … 1630 1642 #if DEBUG_MEMC_CONFIG 1631 1643 if(m_debug) 1632 std::cout << " <MEMC " << name() << " CONFIG_DIR_ UPT_LOCK>"1633 << " UPT full : release DIR & UPT locks and retry" << std::endl;1644 std::cout << " <MEMC " << name() << " CONFIG_DIR_IVT_LOCK>" 1645 << " IVT full : release DIR & IVT locks and retry" << std::endl; 1634 1646 #endif 1635 1647 } … … 1646 1658 r_config_to_cc_send_multi_req = false; 1647 1659 r_config_to_cc_send_brdcast_req = true; 1648 r_config_to_cc_send_trdid = r_config_ upt_index.read();1660 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 1649 1661 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 1650 1662 r_cleanup_to_config_ack = false; … … 1686 1698 r_config_to_cc_send_multi_req = true; 1687 1699 r_config_to_cc_send_brdcast_req = false; 1688 r_config_to_cc_send_trdid = r_config_ upt_index.read();1700 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 1689 1701 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 1690 1702 r_multi_ack_to_config_ack = false; … … 1848 1860 switch(r_read_fsm.read()) 1849 1861 { 1850 1851 1852 1862 /////////////// 1863 case READ_IDLE: // waiting a read request 1864 { 1853 1865 if(m_cmd_read_addr_fifo.rok()) 1854 1866 { 1855 1867 1856 1868 #if DEBUG_MEMC_READ 1857 if(m_debug)1858 std::cout << " <MEMC " << name() << " READ_IDLE> Read request"1859 << " : address = " << std::hex << m_cmd_read_addr_fifo.read()1860 << " / srcid = " << m_cmd_read_srcid_fifo.read()1861 << " / trdid = " << m_cmd_read_trdid_fifo.read()1862 << " / pktid = " << m_cmd_read_pktid_fifo.read()1863 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl;1869 if(m_debug) 1870 std::cout << " <MEMC " << name() << " READ_IDLE> Read request" 1871 << " : address = " << std::hex << m_cmd_read_addr_fifo.read() 1872 << " / srcid = " << m_cmd_read_srcid_fifo.read() 1873 << " / trdid = " << m_cmd_read_trdid_fifo.read() 1874 << " / pktid = " << m_cmd_read_pktid_fifo.read() 1875 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 1864 1876 #endif 1865 1877 r_read_fsm = READ_DIR_REQ; … … 1961 1973 1962 1974 { 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1975 if(r_alloc_dir_fsm.read() == ALLOC_DIR_READ) 1976 { 1977 // check if this is an instruction read, this means pktid is either 1978 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 1979 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 1980 bool inst_read = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 1981 // check if this is a cached read, this means pktid is either 1982 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 1983 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 1984 bool cached_read = (m_cmd_read_pktid_fifo.read() & 0x1); 1985 bool is_cnt = r_read_is_cnt.read(); 1986 1987 // read data in the cache 1988 size_t set = m_y[(addr_t)(m_cmd_read_addr_fifo.read())]; 1989 size_t way = r_read_way.read(); 1990 1991 m_cache_data.read_line(way, set, r_read_data); 1992 1993 if(m_monitor_ok) check_monitor( m_cmd_read_addr_fifo.read(), r_read_data[0], true); 1994 1995 // update the cache directory 1996 DirectoryEntry entry; 1997 entry.valid = true; 1998 entry.is_cnt = is_cnt; 1999 entry.dirty = r_read_dirty.read(); 2000 entry.tag = r_read_tag.read(); 2001 entry.lock = r_read_lock.read(); 2002 entry.ptr = r_read_ptr.read(); 2003 2004 if(cached_read) // Cached read => we must update the copies 2005 { 2006 if(!is_cnt) // Not counter mode 2007 { 2008 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 1997 2009 #if L1_MULTI_CACHE 1998 1999 #endif 2000 2001 2002 2003 2004 2005 2010 entry.owner.cache_id = m_cmd_read_pktid_fifo.read(); 2011 #endif 2012 entry.owner.inst = inst_read; 2013 entry.count = r_read_count.read() + 1; 2014 } 2015 else // Counter mode 2016 { 2017 entry.owner.srcid = 0; 2006 2018 #if L1_MULTI_CACHE 2007 2008 #endif 2009 2010 2011 2012 2013 2014 2015 2019 entry.owner.cache_id = 0; 2020 #endif 2021 entry.owner.inst = false; 2022 entry.count = r_read_count.read() + 1; 2023 } 2024 } 2025 else // Uncached read 2026 { 2027 entry.owner.srcid = r_read_copy.read(); 2016 2028 #if L1_MULTI_CACHE 2017 2018 #endif 2019 2020 2021 2029 entry.owner.cache_id = r_read_copy_cache.read(); 2030 #endif 2031 entry.owner.inst = r_read_copy_inst.read(); 2032 entry.count = r_read_count.read(); 2033 } 2022 2034 2023 2035 #if DEBUG_MEMC_READ 2024 if(m_debug)2025 std::cout << " <MEMC " << name() << " READ_DIR_HIT> Update directory entry:"2026 << " addr = " << std::hex << m_cmd_read_addr_fifo.read()2027 << " / set = " << std::dec << set2028 << " / way = " << way2029 << " / owner_id = " << std::hex << entry.owner.srcid2030 << " / owner_ins = " << std::dec << entry.owner.inst2031 << " / count = " << entry.count2032 << " / is_cnt = " << entry.is_cnt << std::endl;2033 #endif 2034 2035 2036 2037 2038 2036 if(m_debug) 2037 std::cout << " <MEMC " << name() << " READ_DIR_HIT> Update directory entry:" 2038 << " addr = " << std::hex << m_cmd_read_addr_fifo.read() 2039 << " / set = " << std::dec << set 2040 << " / way = " << way 2041 << " / owner_id = " << std::hex << entry.owner.srcid 2042 << " / owner_ins = " << std::dec << entry.owner.inst 2043 << " / count = " << entry.count 2044 << " / is_cnt = " << entry.is_cnt << std::endl; 2045 #endif 2046 2047 m_cache_directory.write(set, way, entry); 2048 r_read_fsm = READ_RSP; 2049 } 2050 break; 2039 2051 } 2040 2052 /////////////////// … … 2750 2762 2751 2763 wok = m_upt.set(true, // it's an update transaction 2752 false, 2753 true, 2754 false, 2764 false, // it's not a broadcast 2765 true, // response required 2766 false, // no acknowledge required 2755 2767 srcid, 2756 2768 trdid, … … 2904 2916 entry.owner.inst) // put the next srcid in the fifo 2905 2917 { 2906 dec_upt_counter 2918 dec_upt_counter = false; 2907 2919 write_to_cc_send_fifo_put = true; 2908 2920 write_to_cc_send_fifo_inst = entry.owner.inst; … … 2957 2969 { 2958 2970 r_write_to_cc_send_multi_req = true; 2959 if(r_write_to_dec.read() or dec_upt_counter) 2971 if(r_write_to_dec.read() or dec_upt_counter) r_write_fsm = WRITE_UPT_DEC; 2960 2972 else r_write_fsm = WRITE_IDLE; 2961 2973 } … … 3200 3212 { 3201 3213 r_write_trt_index = wok_index; 3202 r_write_fsm = WRITE_BC_ UPT_LOCK;3214 r_write_fsm = WRITE_BC_IVT_LOCK; 3203 3215 } 3204 3216 else // wait an empty entry in TRT … … 3217 3229 3218 3230 ////////////////////// 3219 case WRITE_BC_ UPT_LOCK: // register BC transaction in UPT3220 { 3221 if(r_alloc_ upt_fsm.read() == ALLOC_UPT_WRITE)3231 case WRITE_BC_IVT_LOCK: // register BC transaction in IVT 3232 { 3233 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 3222 3234 { 3223 3235 bool wok = false; … … 3229 3241 size_t nb_copies = r_write_count.read(); 3230 3242 3231 wok = m_ upt.set(false, // it's an inval transaction3232 true, 3233 true, 3234 false, 3243 wok = m_ivt.set(false, // it's an inval transaction 3244 true, // it's a broadcast 3245 true, // response required 3246 false, // no acknowledge required 3235 3247 srcid, 3236 3248 trdid, … … 3242 3254 #if DEBUG_MEMC_WRITE 3243 3255 if( m_debug and wok ) 3244 std::cout << " <MEMC " << name() << " WRITE_BC_ UPT_LOCK> Register broadcast inval in UPT"3256 std::cout << " <MEMC " << name() << " WRITE_BC_IVT_LOCK> Register broadcast inval in IVT" 3245 3257 << " / nb_copies = " << r_write_count.read() << std::endl; 3246 3258 #endif … … 3248 3260 3249 3261 if(wok) r_write_fsm = WRITE_BC_DIR_INVAL; 3250 else 3262 else r_write_fsm = WRITE_WAIT; 3251 3263 } 3252 3264 break; … … 3259 3271 // and invalidate the line in directory 3260 3272 if((r_alloc_trt_fsm.read() != ALLOC_TRT_WRITE) or 3261 (r_alloc_upt_fsm.read() != ALLOC_UPT_WRITE) or3262 3273 (r_alloc_ivt_fsm.read() != ALLOC_IVT_WRITE) or 3274 (r_alloc_dir_fsm.read() != ALLOC_DIR_WRITE)) 3263 3275 { 3264 3276 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_BC_DIR_INVAL state" << std::endl; 3265 std::cout << "bad TRT, DIR, or UPT allocation" << std::endl;3277 std::cout << "bad TRT, DIR, or IVT allocation" << std::endl; 3266 3278 exit(0); 3267 3279 } … … 3791 3803 } 3792 3804 ///////////////////////// 3793 case XRAM_RSP_INVAL_LOCK: // Take the UPT lock to check a possible pending inval3794 { 3795 if(r_alloc_ upt_fsm == ALLOC_UPT_XRAM_RSP)3805 case XRAM_RSP_INVAL_LOCK: // Take the IVT lock to check a possible pending inval 3806 { 3807 if(r_alloc_ivt_fsm == ALLOC_IVT_XRAM_RSP) 3796 3808 { 3797 3809 size_t index = 0; 3798 if(m_ upt.search_inval(r_xram_rsp_trt_buf.nline, index)) // pending inval3810 if(m_ivt.search_inval(r_xram_rsp_trt_buf.nline, index)) // pending inval 3799 3811 { 3800 3812 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; … … 3803 3815 if(m_debug) 3804 3816 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL_LOCK>" 3805 << " Get acces to UPT, but line invalidation registered"3817 << " Get acces to IVT, but line invalidation registered" 3806 3818 << " / nline = " << std::hex << r_xram_rsp_trt_buf.nline 3807 3819 << " / index = " << std::dec << index << std::endl; … … 3809 3821 3810 3822 } 3811 else if(m_ upt.is_full() and r_xram_rsp_victim_inval.read()) // UPT full3823 else if(m_ivt.is_full() and r_xram_rsp_victim_inval.read()) // IVT full 3812 3824 { 3813 3825 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; … … 3816 3828 if(m_debug) 3817 3829 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL_LOCK>" 3818 << " Get acces to UPT, but inval required and UPT full" << std::endl;3830 << " Get acces to IVT, but inval required and IVT full" << std::endl; 3819 3831 #endif 3820 3832 } … … 3826 3838 if(m_debug) 3827 3839 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL_LOCK>" 3828 << " Get acces to UPT" << std::endl;3840 << " Get acces to IVT" << std::endl; 3829 3841 #endif 3830 3842 } … … 3846 3858 /////////////////////// 3847 3859 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory) 3848 // and possibly set an inval request in UPT3860 // and possibly set an inval request in IVT 3849 3861 { 3850 3862 // check if this is an instruction read, this means pktid is either … … 3904 3916 m_cache_directory.write(set, way, entry); 3905 3917 3906 // request an invalidattion request in UPT for victim line3918 // request an invalidattion request in IVT for victim line 3907 3919 if(r_xram_rsp_victim_inval.read()) 3908 3920 { … … 3911 3923 size_t count_copies = r_xram_rsp_victim_count.read(); 3912 3924 3913 bool wok = m_ upt.set(false, // it's an inval transaction3925 bool wok = m_ivt.set(false, // it's an inval transaction 3914 3926 broadcast, // set broadcast bit 3915 3927 false, // no response required … … 3922 3934 index); 3923 3935 3924 r_xram_rsp_ upt_index = index;3936 r_xram_rsp_ivt_index = index; 3925 3937 3926 3938 if(!wok) 3927 3939 { 3928 3940 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_DIR_UPDT" 3929 << " update_tab entry free but write unsuccessful" << std::endl;3941 << " invalidate_tab entry free but write unsuccessful" << std::endl; 3930 3942 exit(0); 3931 3943 } … … 4036 4048 r_xram_rsp_to_cc_send_brdcast_req = r_xram_rsp_victim_is_cnt.read(); 4037 4049 r_xram_rsp_to_cc_send_nline = r_xram_rsp_victim_nline.read(); 4038 r_xram_rsp_to_cc_send_trdid = r_xram_rsp_ upt_index;4050 r_xram_rsp_to_cc_send_trdid = r_xram_rsp_ivt_index; 4039 4051 xram_rsp_to_cc_send_fifo_srcid = r_xram_rsp_victim_copy.read(); 4040 4052 xram_rsp_to_cc_send_fifo_inst = r_xram_rsp_victim_copy_inst.read(); … … 4252 4264 DspinDhccpParam::dspin_get( 4253 4265 flit, 4254 DspinDhccpParam:: FROM_L1_TYPE);4266 DspinDhccpParam::P2M_TYPE); 4255 4267 4256 4268 r_cleanup_way_index = … … 4381 4393 else // miss : check UPT for a pending invalidation transaction 4382 4394 { 4383 r_cleanup_fsm = CLEANUP_ UPT_LOCK;4395 r_cleanup_fsm = CLEANUP_IVT_LOCK; 4384 4396 } 4385 4397 … … 4839 4851 } 4840 4852 ////////////////////// 4841 case CLEANUP_ UPT_LOCK: // get the lock protecting the UPT to search a pending4853 case CLEANUP_IVT_LOCK: // get the lock protecting the IVT to search a pending 4842 4854 // invalidate transaction matching the cleanup 4843 4855 { 4844 if(r_alloc_ upt_fsm.read() != ALLOC_UPT_CLEANUP) break;4856 if(r_alloc_ivt_fsm.read() != ALLOC_IVT_CLEANUP) break; 4845 4857 4846 4858 size_t index = 0; 4847 4859 bool match_inval; 4848 4860 4849 match_inval = m_ upt.search_inval(r_cleanup_nline.read(), index);4861 match_inval = m_ivt.search_inval(r_cleanup_nline.read(), index); 4850 4862 4851 4863 if ( not match_inval ) // no pending inval … … 4856 4868 if(m_debug) 4857 4869 std::cout << " <MEMC " << name() 4858 << " CLEANUP_ UPT_LOCK> Unexpected cleanup"4859 << " with no corresponding UPT entry:"4870 << " CLEANUP_IVT_LOCK> Unexpected cleanup" 4871 << " with no corresponding IVT entry:" 4860 4872 << " address = " << std::hex 4861 4873 << (r_cleanup_nline.read() *4*m_words) … … 4866 4878 4867 4879 // pending inval 4868 r_cleanup_write_srcid = m_ upt.srcid(index);4869 r_cleanup_write_trdid = m_ upt.trdid(index);4870 r_cleanup_write_pktid = m_ upt.pktid(index);4871 r_cleanup_need_rsp = m_ upt.need_rsp(index);4872 r_cleanup_need_ack = m_ upt.need_ack(index);4880 r_cleanup_write_srcid = m_ivt.srcid(index); 4881 r_cleanup_write_trdid = m_ivt.trdid(index); 4882 r_cleanup_write_pktid = m_ivt.pktid(index); 4883 r_cleanup_need_rsp = m_ivt.need_rsp(index); 4884 r_cleanup_need_ack = m_ivt.need_ack(index); 4873 4885 r_cleanup_index = index; 4874 4886 4875 r_cleanup_fsm = CLEANUP_ UPT_DECREMENT;4887 r_cleanup_fsm = CLEANUP_IVT_DECREMENT; 4876 4888 4877 4889 #if DEBUG_MEMC_CLEANUP 4878 4890 if(m_debug) 4879 4891 std::cout << " <MEMC " << name() 4880 << " CLEANUP_ UPT_LOCK> Cleanup matching pending"4881 << " invalidate transaction on UPT:"4892 << " CLEANUP_IVT_LOCK> Cleanup matching pending" 4893 << " invalidate transaction on IVT:" 4882 4894 << " address = " << std::hex << r_cleanup_nline.read() * m_words * 4 4883 << " / upt_entry = " << index << std::endl;4895 << " / ivt_entry = " << index << std::endl; 4884 4896 #endif 4885 4897 break; 4886 4898 } 4887 4899 /////////////////////////// 4888 case CLEANUP_ UPT_DECREMENT: // decrement response counter in UPT matching entry4889 { 4890 if(r_alloc_ upt_fsm.read() != ALLOC_UPT_CLEANUP)4900 case CLEANUP_IVT_DECREMENT: // decrement response counter in IVT matching entry 4901 { 4902 if(r_alloc_ivt_fsm.read() != ALLOC_IVT_CLEANUP) 4891 4903 { 4892 4904 std::cout 4893 4905 << "VCI_MEM_CACHE ERROR " << name() 4894 << " CLEANUP_ UPT_DECREMENT state" << std::endl4895 << "Bad UPT allocation"4906 << " CLEANUP_IVT_DECREMENT state" << std::endl 4907 << "Bad IVT allocation" 4896 4908 << std::endl; 4897 4909 … … 4900 4912 4901 4913 size_t count = 0; 4902 m_ upt.decrement(r_cleanup_index.read(), count);4914 m_ivt.decrement(r_cleanup_index.read(), count); 4903 4915 4904 4916 if(count == 0) // multi inval transaction completed 4905 4917 { 4906 r_cleanup_fsm = CLEANUP_ UPT_CLEAR;4918 r_cleanup_fsm = CLEANUP_IVT_CLEAR; 4907 4919 } 4908 4920 else // multi inval transaction not completed … … 4913 4925 #if DEBUG_MEMC_CLEANUP 4914 4926 if(m_debug) 4915 std::cout << " <MEMC " << name() << " CLEANUP_ UPT_DECREMENT>"4916 << " Decrement response counter in UPT:"4917 << " UPT_index = " << r_cleanup_index.read()4927 std::cout << " <MEMC " << name() << " CLEANUP_IVT_DECREMENT>" 4928 << " Decrement response counter in IVT:" 4929 << " IVT_index = " << r_cleanup_index.read() 4918 4930 << " / rsp_count = " << count << std::endl; 4919 4931 #endif … … 4921 4933 } 4922 4934 /////////////////////// 4923 case CLEANUP_ UPT_CLEAR: // Clear UPT entry4924 { 4925 if(r_alloc_ upt_fsm.read() != ALLOC_UPT_CLEANUP)4935 case CLEANUP_IVT_CLEAR: // Clear IVT entry 4936 { 4937 if(r_alloc_ivt_fsm.read() != ALLOC_IVT_CLEANUP) 4926 4938 { 4927 4939 std::cout 4928 4940 << "VCI_MEM_CACHE ERROR " << name() 4929 << " CLEANUP_ UPT_CLEAR state" << std::endl4930 << "Bad UPT allocation"4941 << " CLEANUP_IVT_CLEAR state" << std::endl 4942 << "Bad IVT allocation" 4931 4943 << std::endl; 4932 4944 … … 4934 4946 } 4935 4947 4936 m_ upt.clear(r_cleanup_index.read());4948 m_ivt.clear(r_cleanup_index.read()); 4937 4949 4938 4950 if ( r_cleanup_need_rsp.read() ) r_cleanup_fsm = CLEANUP_WRITE_RSP; … … 4943 4955 if(m_debug) 4944 4956 std::cout << " <MEMC " << name() 4945 << " CLEANUP_ UPT_CLEAR> Clear entry in UPT:"4946 << " UPT_index = " << r_cleanup_index.read() << std::endl;4957 << " CLEANUP_IVT_CLEAR> Clear entry in IVT:" 4958 << " IVT_index = " << r_cleanup_index.read() << std::endl; 4947 4959 #endif 4948 4960 break; … … 4989 5001 } 4990 5002 //////////////////////// 4991 case CLEANUP_SEND_CLACK: // acknowledgement to a cleanup command 4992 // on the coherence network (request to the CC_SEND FSM). 4993 // wait if pending request to the CC_SEND FSM 4994 { 4995 if(r_cleanup_to_cc_send_req.read()) break; 4996 4997 r_cleanup_to_cc_send_req = true; 4998 r_cleanup_to_cc_send_set_index = r_cleanup_nline.read() & 0xFFFF; 4999 r_cleanup_to_cc_send_way_index = r_cleanup_way_index.read(); 5000 r_cleanup_to_cc_send_srcid = r_cleanup_srcid.read(); 5001 r_cleanup_to_cc_send_inst = r_cleanup_inst.read(); 5003 case CLEANUP_SEND_CLACK: // acknowledgement to a cleanup command 5004 // on the coherence CLACK network. 5005 { 5006 if(not p_dspin_clack.read) break; 5002 5007 5003 5008 r_cleanup_fsm = CLEANUP_IDLE; … … 5007 5012 std::cout << " <MEMC " << name() 5008 5013 << " CLEANUP_SEND_CLACK> Send the response to a cleanup request:" 5009 << " srcid = " << std::dec << r_cleanup_srcid.read() << std::endl; 5014 << " nline = " << std::hex << r_cleanup_nline.read() 5015 << " / way = " << std::dec << r_cleanup_way.read() 5016 << " / srcid = " << std::dec << r_cleanup_srcid.read() 5017 << std::endl; 5010 5018 #endif 5011 5019 break; … … 5242 5250 !r_cas_to_cc_send_brdcast_req.read()) 5243 5251 { 5244 r_cas_fsm = CAS_UPT_LOCK; // multi update required5252 r_cas_fsm = CAS_UPT_LOCK; // multi update required 5245 5253 } 5246 5254 else … … 5507 5515 { 5508 5516 r_cas_trt_index = wok_index; 5509 r_cas_fsm = CAS_BC_ UPT_LOCK;5517 r_cas_fsm = CAS_BC_IVT_LOCK; 5510 5518 } 5511 5519 else … … 5522 5530 } 5523 5531 ///////////////////// 5524 case CAS_BC_ UPT_LOCK: // register a broadcast inval transaction in UPT5532 case CAS_BC_IVT_LOCK: // register a broadcast inval transaction in IVT 5525 5533 // write data in cache in case of successful registration 5526 5534 { 5527 if(r_alloc_ upt_fsm.read() == ALLOC_UPT_CAS)5535 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS) 5528 5536 { 5529 5537 bool wok = false; … … 5535 5543 size_t nb_copies = r_cas_count.read(); 5536 5544 5537 // register a broadcast inval transaction in UPT5538 wok = m_ upt.set(false, // it's an inval transaction5539 true, 5540 true, 5541 false, 5545 // register a broadcast inval transaction in IVT 5546 wok = m_ivt.set(false, // it's an inval transaction 5547 true, // it's a broadcast 5548 true, // response required 5549 false, // no acknowledge required 5542 5550 srcid, 5543 5551 trdid, … … 5547 5555 index); 5548 5556 5549 if(wok) // UPT not full5557 if(wok) // IVT not full 5550 5558 { 5551 5559 // cache update … … 5573 5581 if(m_debug) 5574 5582 std::cout << " <MEMC " << name() 5575 << " CAS_BC_ UPT_LOCK> Register a broadcast inval transaction in UPT"5583 << " CAS_BC_IVT_LOCK> Register a broadcast inval transaction in IVT" 5576 5584 << " / nline = " << std::hex << nline 5577 5585 << " / count = " << std::dec << nb_copies 5578 << " / upt_index = " << index << std::endl;5579 #endif 5580 } 5581 else // releases the lock protecting UPT5586 << " / ivt_index = " << index << std::endl; 5587 #endif 5588 } 5589 else // releases the lock protecting IVT 5582 5590 { 5583 5591 r_cas_fsm = CAS_WAIT; … … 5590 5598 { 5591 5599 if((r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 5592 (r_alloc_upt_fsm.read() == ALLOC_UPT_CAS) and5593 5600 (r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS) and 5601 (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS)) 5594 5602 { 5595 5603 // set TRT … … 5834 5842 // network, used to update or invalidate cache lines in L1 caches. 5835 5843 // 5836 // This fsm is used also to acknowledge CLEANUP a command after request from 5837 // the CLEANUP fsm. 5838 // 5839 // It implements a round-robin priority between the five possible client FSMs 5840 // XRAM_RSP > CAS > CLEANUP > WRITE > CONFIG 5844 // It implements a round-robin priority between the four possible client FSMs 5845 // XRAM_RSP > CAS > WRITE > CONFIG 5841 5846 // 5842 5847 // Each FSM can request the next services: … … 5850 5855 // r_config_to_cc_send_brdcast_req : broadcast-inval 5851 5856 // 5852 // - r_cleanup_to_cc_send_req : cleanup acknowledgement5853 //5854 5857 // An inval request is a double DSPIN flit command containing: 5855 5858 // 1. the index of the line to be invalidated. … … 5894 5897 break; 5895 5898 } 5896 // CLEANUP5897 if (r_cleanup_to_cc_send_req.read())5898 {5899 r_cc_send_fsm = CC_SEND_CLEANUP_ACK;5900 break;5901 }5902 5899 // WRITE 5903 5900 if(m_write_to_cc_send_inst_fifo.rok() or … … 5973 5970 break; 5974 5971 } 5975 // CLEANUP5976 if (r_cleanup_to_cc_send_req.read())5977 {5978 r_cc_send_fsm = CC_SEND_CLEANUP_ACK;5979 break;5980 }5981 5972 // WRITE 5982 5973 if(m_write_to_cc_send_inst_fifo.rok() or … … 6012 6003 break; 6013 6004 } 6014 // CLEANUP6015 if(r_cleanup_to_cc_send_req.read())6016 {6017 r_cc_send_fsm = CC_SEND_CLEANUP_ACK;6018 break;6019 }6020 6005 // WRITE 6021 6006 if(m_write_to_cc_send_inst_fifo.rok() or … … 6065 6050 case CC_SEND_CAS_IDLE: // CLEANUP FSM has highest priority 6066 6051 { 6067 if(r_cleanup_to_cc_send_req.read())6068 {6069 r_cc_send_fsm = CC_SEND_CLEANUP_ACK;6070 break;6071 }6072 6052 if(m_write_to_cc_send_inst_fifo.rok() or 6073 6053 r_write_to_cc_send_multi_req.read()) … … 6124 6104 break; 6125 6105 } 6126 //////////////////////////6127 case CC_SEND_CLEANUP_IDLE: // WRITE FSM has highest priority6128 {6129 // WRITE6130 if(m_write_to_cc_send_inst_fifo.rok() or6131 r_write_to_cc_send_multi_req.read())6132 {6133 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER;6134 m_cpt_update++;6135 break;6136 }6137 if(r_write_to_cc_send_brdcast_req.read())6138 {6139 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER;6140 m_cpt_inval++;6141 break;6142 }6143 // CONFIG6144 if(r_config_to_cc_send_multi_req.read())6145 {6146 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER;6147 m_cpt_inval++;6148 break;6149 }6150 if(r_config_to_cc_send_brdcast_req.read())6151 {6152 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER;6153 m_cpt_inval++;6154 break;6155 }6156 // XRAM_RSP6157 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or6158 r_xram_rsp_to_cc_send_multi_req.read())6159 {6160 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER;6161 m_cpt_inval++;6162 break;6163 }6164 if(r_xram_rsp_to_cc_send_brdcast_req.read())6165 {6166 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER;6167 m_cpt_inval++;6168 break;6169 }6170 // CAS6171 if(m_cas_to_cc_send_inst_fifo.rok() or6172 r_cas_to_cc_send_multi_req.read())6173 {6174 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER;6175 m_cpt_update++;6176 break;6177 }6178 if(r_cas_to_cc_send_brdcast_req.read())6179 {6180 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER;6181 m_cpt_inval++;6182 break;6183 }6184 // CLEANUP6185 if(r_cleanup_to_cc_send_req.read())6186 {6187 r_cc_send_fsm = CC_SEND_CLEANUP_ACK;6188 break;6189 }6190 break;6191 }6192 6106 ///////////////////////////////// 6193 6107 case CC_SEND_CONFIG_INVAL_HEADER: // send first flit multi-inval (from CONFIG FSM) … … 6195 6109 if(m_config_to_cc_send_inst_fifo.rok()) 6196 6110 { 6197 if(not p_dspin_ out.read) break;6111 if(not p_dspin_m2p.read) break; 6198 6112 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_NLINE; 6199 6113 break; … … 6206 6120 case CC_SEND_CONFIG_INVAL_NLINE: // send second flit multi-inval (from CONFIG FSM) 6207 6121 { 6208 if(not p_dspin_ out.read) break;6122 if(not p_dspin_m2p.read) break; 6209 6123 m_cpt_inval_mult++; 6210 6124 config_to_cc_send_fifo_get = true; … … 6222 6136 case CC_SEND_CONFIG_BRDCAST_HEADER: // send first flit BC-inval (from CONFIG FSM) 6223 6137 { 6224 if(not p_dspin_ out.read) break;6138 if(not p_dspin_m2p.read) break; 6225 6139 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_NLINE; 6226 6140 break; … … 6229 6143 case CC_SEND_CONFIG_BRDCAST_NLINE: // send second flit BC-inval (from CONFIG FSM) 6230 6144 { 6231 if(not p_dspin_ out.read) break;6145 if(not p_dspin_m2p.read) break; 6232 6146 m_cpt_inval_brdcast++; 6233 6147 r_config_to_cc_send_brdcast_req = false; … … 6242 6156 break; 6243 6157 } 6244 /////////////////////////6245 case CC_SEND_CLEANUP_ACK: // send one flit for a cleanup acknowledgement6246 {6247 if(not p_dspin_out.read) break;6248 6249 r_cleanup_to_cc_send_req = false;6250 r_cc_send_fsm = CC_SEND_CLEANUP_IDLE;6251 6252 #if DEBUG_MEMC_CC_SEND6253 if(m_debug)6254 std::cout << " <MEMC " << name()6255 << " CC_SEND_CLEANUP_ACK> Cleanup Ack for srcid "6256 << std::hex << r_cleanup_to_cc_send_srcid.read() << std::endl;6257 #endif6258 break;6259 }6260 6158 /////////////////////////////////// 6261 6159 case CC_SEND_XRAM_RSP_INVAL_HEADER: // send first flit multi-inval (from XRAM_RSP FSM) … … 6263 6161 if(m_xram_rsp_to_cc_send_inst_fifo.rok()) 6264 6162 { 6265 if(not p_dspin_ out.read) break;6163 if(not p_dspin_m2p.read) break; 6266 6164 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_NLINE; 6267 6165 break; … … 6274 6172 case CC_SEND_XRAM_RSP_INVAL_NLINE: // send second flit multi-inval (from XRAM_RSP FSM) 6275 6173 { 6276 if(not p_dspin_ out.read) break;6174 if(not p_dspin_m2p.read) break; 6277 6175 m_cpt_inval_mult++; 6278 6176 xram_rsp_to_cc_send_fifo_get = true; … … 6282 6180 if(m_debug) 6283 6181 std::cout << " <MEMC " << name() 6284 << " CC_SEND_XRAM_RSP_INVAL_NLINE> BC-Inval for line "6182 << " CC_SEND_XRAM_RSP_INVAL_NLINE> Multicast-Inval for line " 6285 6183 << std::hex << r_xram_rsp_to_cc_send_nline.read() << std::endl; 6286 6184 #endif … … 6290 6188 case CC_SEND_XRAM_RSP_BRDCAST_HEADER: // send first flit broadcast-inval (from XRAM_RSP FSM) 6291 6189 { 6292 if(not p_dspin_ out.read) break;6190 if(not p_dspin_m2p.read) break; 6293 6191 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_NLINE; 6294 6192 break; … … 6297 6195 case CC_SEND_XRAM_RSP_BRDCAST_NLINE: // send second flit broadcast-inval (from XRAM_RSP FSM) 6298 6196 { 6299 if(not p_dspin_ out.read) break;6197 if(not p_dspin_m2p.read) break; 6300 6198 m_cpt_inval_brdcast++; 6301 6199 r_xram_rsp_to_cc_send_brdcast_req = false; … … 6313 6211 case CC_SEND_WRITE_BRDCAST_HEADER: // send first flit broadcast-inval (from WRITE FSM) 6314 6212 { 6315 if(not p_dspin_ out.read) break;6213 if(not p_dspin_m2p.read) break; 6316 6214 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_NLINE; 6317 6215 break; … … 6320 6218 case CC_SEND_WRITE_BRDCAST_NLINE: // send second flit broadcast-inval (from WRITE FSM) 6321 6219 { 6322 if(not p_dspin_ out.read) break;6220 if(not p_dspin_m2p.read) break; 6323 6221 6324 6222 m_cpt_inval_brdcast++; … … 6340 6238 if(m_write_to_cc_send_inst_fifo.rok()) 6341 6239 { 6342 if(not p_dspin_ out.read) break;6240 if(not p_dspin_m2p.read) break; 6343 6241 6344 6242 r_cc_send_fsm = CC_SEND_WRITE_UPDT_NLINE; … … 6357 6255 case CC_SEND_WRITE_UPDT_NLINE: // send second flit for a multi-update (from WRITE FSM) 6358 6256 { 6359 if(not p_dspin_ out.read) break;6257 if(not p_dspin_m2p.read) break; 6360 6258 m_cpt_update_mult++; 6361 6259 … … 6374 6272 case CC_SEND_WRITE_UPDT_DATA: // send N data flits for a multi-update (from WRITE FSM) 6375 6273 { 6376 if(not p_dspin_ out.read) break;6274 if(not p_dspin_m2p.read) break; 6377 6275 if(r_cc_send_cpt.read() == (r_write_to_cc_send_count.read()-1)) 6378 6276 { … … 6388 6286 case CC_SEND_CAS_BRDCAST_HEADER: // send first flit broadcast-inval (from CAS FSM) 6389 6287 { 6390 if(not p_dspin_ out.read) break;6288 if(not p_dspin_m2p.read) break; 6391 6289 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_NLINE; 6392 6290 break; … … 6395 6293 case CC_SEND_CAS_BRDCAST_NLINE: // send second flit broadcast-inval (from CAS FSM) 6396 6294 { 6397 if(not p_dspin_ out.read) break;6295 if(not p_dspin_m2p.read) break; 6398 6296 m_cpt_inval_brdcast++; 6399 6297 … … 6414 6312 if(m_cas_to_cc_send_inst_fifo.rok()) 6415 6313 { 6416 if(not p_dspin_ out.read) break;6314 if(not p_dspin_m2p.read) break; 6417 6315 6418 6316 r_cc_send_fsm = CC_SEND_CAS_UPDT_NLINE; … … 6432 6330 case CC_SEND_CAS_UPDT_NLINE: // send second flit for a multi-update (from CAS FSM) 6433 6331 { 6434 if(not p_dspin_ out.read) break;6332 if(not p_dspin_m2p.read) break; 6435 6333 6436 6334 m_cpt_update_mult++; … … 6450 6348 case CC_SEND_CAS_UPDT_DATA: // send first data for a multi-update (from CAS FSM) 6451 6349 { 6452 if(not p_dspin_ out.read) break;6350 if(not p_dspin_m2p.read) break; 6453 6351 6454 6352 if(r_cas_to_cc_send_is_long.read()) … … 6465 6363 case CC_SEND_CAS_UPDT_DATA_HIGH: // send second data for a multi-update (from CAS FSM) 6466 6364 { 6467 if(not p_dspin_ out.read) break;6365 if(not p_dspin_m2p.read) break; 6468 6366 cas_to_cc_send_fifo_get = true; 6469 6367 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; … … 6485 6383 case CC_RECEIVE_IDLE: 6486 6384 { 6487 if(not p_dspin_ in.write) break;6385 if(not p_dspin_p2m.write) break; 6488 6386 6489 6387 uint8_t type = 6490 6388 DspinDhccpParam::dspin_get( 6491 p_dspin_ in.data.read(),6492 DspinDhccpParam:: FROM_L1_TYPE);6389 p_dspin_p2m.data.read(), 6390 DspinDhccpParam::P2M_TYPE); 6493 6391 6494 6392 if((type == DspinDhccpParam::TYPE_CLEANUP_DATA) or … … 6516 6414 // write first CLEANUP flit in CC_RECEIVE to CLEANUP fifo 6517 6415 6518 if(not p_dspin_ in.write or not m_cc_receive_to_cleanup_fifo.wok())6416 if(not p_dspin_p2m.write or not m_cc_receive_to_cleanup_fifo.wok()) 6519 6417 break; 6520 6418 6521 assert(not p_dspin_ in.eop.read() and6419 assert(not p_dspin_p2m.eop.read() and 6522 6420 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 6523 6421 "CLEANUP command must have two flits"); … … 6533 6431 // write second CLEANUP flit in CC_RECEIVE to CLEANUP fifo 6534 6432 6535 if(not p_dspin_ in.write or not m_cc_receive_to_cleanup_fifo.wok())6433 if(not p_dspin_p2m.write or not m_cc_receive_to_cleanup_fifo.wok()) 6536 6434 break; 6537 6435 6538 assert(p_dspin_ in.eop.read() and6436 assert(p_dspin_p2m.eop.read() and 6539 6437 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 6540 6438 "CLEANUP command must have two flits"); … … 6552 6450 6553 6451 // wait for a WOK in the CC_RECEIVE to MULTI_ACK fifo 6554 if(not p_dspin_ in.write or not m_cc_receive_to_multi_ack_fifo.wok())6452 if(not p_dspin_p2m.write or not m_cc_receive_to_multi_ack_fifo.wok()) 6555 6453 break; 6556 6454 6557 assert(p_dspin_ in.eop.read() and6455 assert(p_dspin_p2m.eop.read() and 6558 6456 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 6559 6457 "MULTI_ACK command must have one flit"); … … 6965 6863 // ALLOC_UPT FSM 6966 6864 //////////////////////////////////////////////////////////////////////////////////// 6967 // The ALLOC_UPT FSM allocates the access to the Update/Inval Table (UPT), 6968 // with a round robin priority between six FSMs, with the following order: 6969 // CONFIG > MULTI_ACK > WRITE > XRAM_RSP > CLEANUP > CAS 6970 // - The CONFIG FSM initiates an inval transaction and sets a new entry in UPT. 6865 // The ALLOC_UPT FSM allocates the access to the Update Table (UPT), 6866 // with a round robin priority between three FSMs, with the following order: 6867 // WRITE -> CAS -> MULTI_ACK 6868 // - The WRITE FSM initiates update transaction and sets a new entry in UPT. 6869 // - The CAS FSM does the same thing as the WRITE FSM. 6971 6870 // - The MULTI_ACK FSM complete those trasactions and erase the UPT entry. 6972 // - The WRITE FSM initiates update transaction and sets a new entry in UPT.6973 // - The XRAM_RSP FSM initiates an inval transactions and sets a new entry in UPT.6974 // - The CLEANUP FSM decrement an entry in UPT.6975 // - The CAS FSM does the same thing as the WRITE FSM.6976 6871 // The resource is always allocated. 6977 6872 ///////////////////////////////////////////////////////////////////////////////////// 6978 6979 6873 switch(r_alloc_upt_fsm.read()) 6980 6874 { 6981 //////////////////////6982 case ALLOC_UPT_CONFIG: // allocated to CONFIG FSM6983 if (r_config_fsm.read() != CONFIG_DIR_UPT_LOCK)6984 {6985 if(r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK)6986 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK;6987 6988 else if((r_write_fsm.read() == WRITE_UPT_LOCK) or6989 (r_write_fsm.read() == WRITE_BC_UPT_LOCK))6990 r_alloc_upt_fsm = ALLOC_UPT_WRITE;6991 6992 else if(r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK)6993 r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP;6994 6995 else if(r_cleanup_fsm.read() == CLEANUP_UPT_LOCK)6996 r_alloc_upt_fsm = ALLOC_UPT_CLEANUP;6997 6998 else if((r_cas_fsm.read() == CAS_UPT_LOCK) or6999 (r_cas_fsm.read() == CAS_BC_UPT_LOCK))7000 r_alloc_upt_fsm = ALLOC_UPT_CAS;7001 }7002 break;7003 7004 6875 ///////////////////////// 7005 case ALLOC_UPT_MULTI_ACK: // allocated to MULTI_ACK FSM 7006 if( (r_multi_ack_fsm.read() != MULTI_ACK_UPT_LOCK) and 7007 (r_multi_ack_fsm.read() != MULTI_ACK_UPT_CLEAR)) 7008 { 7009 if((r_write_fsm.read() == WRITE_UPT_LOCK) or 7010 (r_write_fsm.read() == WRITE_BC_UPT_LOCK)) 7011 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 7012 7013 else if(r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK) 7014 r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; 7015 7016 else if(r_cleanup_fsm.read() == CLEANUP_UPT_LOCK) 7017 r_alloc_upt_fsm = ALLOC_UPT_CLEANUP; 7018 7019 else if((r_cas_fsm.read() == CAS_UPT_LOCK) or 7020 (r_cas_fsm.read() == CAS_BC_UPT_LOCK)) 7021 r_alloc_upt_fsm = ALLOC_UPT_CAS; 7022 7023 else if(r_config_fsm.read() == CONFIG_DIR_UPT_LOCK) 7024 r_alloc_upt_fsm = ALLOC_UPT_CONFIG; 7025 } 7026 break; 7027 7028 ///////////////////// 7029 case ALLOC_UPT_WRITE: // allocated to WRITE FSM 7030 if((r_write_fsm.read() != WRITE_UPT_LOCK) and 7031 (r_write_fsm.read() != WRITE_BC_UPT_LOCK)) 7032 { 7033 if(r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK) 7034 r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; 7035 7036 else if(r_cleanup_fsm.read() == CLEANUP_UPT_LOCK) 7037 r_alloc_upt_fsm = ALLOC_UPT_CLEANUP; 7038 7039 else if((r_cas_fsm.read() == CAS_UPT_LOCK) or 7040 (r_cas_fsm.read() == CAS_BC_UPT_LOCK)) 7041 r_alloc_upt_fsm = ALLOC_UPT_CAS; 7042 7043 else if(r_config_fsm.read() == CONFIG_DIR_UPT_LOCK) 7044 r_alloc_upt_fsm = ALLOC_UPT_CONFIG; 7045 7046 else if(r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK) 7047 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK; 7048 } 7049 break; 7050 7051 //////////////////////// 7052 case ALLOC_UPT_XRAM_RSP: 7053 if(r_xram_rsp_fsm.read() != XRAM_RSP_INVAL_LOCK) 7054 { 7055 if(r_cleanup_fsm.read() == CLEANUP_UPT_LOCK) 7056 r_alloc_upt_fsm = ALLOC_UPT_CLEANUP; 7057 7058 else if((r_cas_fsm.read() == CAS_UPT_LOCK) or 7059 (r_cas_fsm.read() == CAS_BC_UPT_LOCK)) 7060 r_alloc_upt_fsm = ALLOC_UPT_CAS; 7061 7062 else if(r_config_fsm.read() == CONFIG_DIR_UPT_LOCK) 7063 r_alloc_upt_fsm = ALLOC_UPT_CONFIG; 7064 7065 else if(r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK) 7066 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK; 7067 7068 else if((r_write_fsm.read() == WRITE_UPT_LOCK) or 7069 (r_write_fsm.read() == WRITE_BC_UPT_LOCK)) 7070 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 7071 } 7072 break; 7073 6876 case ALLOC_UPT_WRITE: // allocated to WRITE FSM 6877 if (r_write_fsm.read() != WRITE_UPT_LOCK) 6878 { 6879 if (r_cas_fsm.read() == CAS_UPT_LOCK) 6880 r_alloc_upt_fsm = ALLOC_UPT_CAS; 6881 6882 else if (r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK) 6883 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK; 6884 } 6885 break; 6886 6887 ///////////////////////// 6888 case ALLOC_UPT_CAS: // allocated to CAS FSM 6889 if (r_cas_fsm.read() != CAS_UPT_LOCK) 6890 { 6891 if (r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK) 6892 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK; 6893 6894 else if (r_write_fsm.read() == WRITE_UPT_LOCK) 6895 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 6896 } 6897 break; 6898 6899 ///////////////////////// 6900 case ALLOC_UPT_MULTI_ACK: // allocated to MULTI_ACK FSM 6901 if ((r_multi_ack_fsm.read() != MULTI_ACK_UPT_LOCK ) and 6902 (r_multi_ack_fsm.read() != MULTI_ACK_UPT_CLEAR)) 6903 { 6904 if (r_write_fsm.read() == WRITE_UPT_LOCK) 6905 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 6906 6907 else if (r_cas_fsm.read() == CAS_UPT_LOCK) 6908 r_alloc_upt_fsm = ALLOC_UPT_CAS; 6909 } 6910 break; 6911 } // end switch r_alloc_upt_fsm 6912 6913 //////////////////////////////////////////////////////////////////////////////////// 6914 // ALLOC_IVT FSM 6915 //////////////////////////////////////////////////////////////////////////////////// 6916 // The ALLOC_IVT FSM allocates the access to the Invalidate Table (IVT), 6917 // with a round robin priority between five FSMs, with the following order: 6918 // WRITE -> XRAM_RSP -> CLEANUP -> CAS -> CONFIG 6919 // - The WRITE FSM initiates broadcast invalidate transactions and sets a new entry 6920 // in IVT. 6921 // - The CAS FSM does the same thing as the WRITE FSM. 6922 // - The XRAM_RSP FSM initiates broadcast/multicast invalidate transaction and sets 6923 // a new entry in the IVT 6924 // - The CONFIG FSM does the same thing as the XRAM_RSP FSM 6925 // - The CLEANUP FSM complete those trasactions and erase the IVT entry. 6926 // The resource is always allocated. 6927 ///////////////////////////////////////////////////////////////////////////////////// 6928 switch(r_alloc_ivt_fsm.read()) 6929 { 7074 6930 ////////////////////////// 7075 case ALLOC_UPT_CLEANUP: 7076 if((r_cleanup_fsm.read() != CLEANUP_UPT_LOCK ) and 7077 (r_cleanup_fsm.read() != CLEANUP_UPT_DECREMENT)) 7078 { 7079 if((r_cas_fsm.read() == CAS_UPT_LOCK) or 7080 (r_cas_fsm.read() == CAS_BC_UPT_LOCK)) 7081 r_alloc_upt_fsm = ALLOC_UPT_CAS; 7082 7083 else if(r_config_fsm.read() == CONFIG_DIR_UPT_LOCK) 7084 r_alloc_upt_fsm = ALLOC_UPT_CONFIG; 7085 7086 else if(r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK) 7087 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK; 7088 7089 else if((r_write_fsm.read() == WRITE_UPT_LOCK) or 7090 (r_write_fsm.read() == WRITE_BC_UPT_LOCK)) 7091 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 7092 7093 else if(r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK) 7094 r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; 7095 } 7096 break; 6931 case ALLOC_IVT_WRITE: // allocated to WRITE FSM 6932 if (r_write_fsm.read() != WRITE_BC_IVT_LOCK) 6933 { 6934 if (r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK) 6935 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 6936 6937 else if (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 6938 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 6939 6940 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 6941 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 6942 6943 else if (r_config_fsm.read() == CONFIG_DIR_IVT_LOCK) 6944 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 6945 } 6946 break; 7097 6947 7098 6948 ////////////////////////// 7099 case ALLOC_UPT_CAS: 7100 if((r_cas_fsm.read() != CAS_UPT_LOCK) and 7101 (r_cas_fsm.read() != CAS_BC_UPT_LOCK)) 7102 { 7103 if(r_config_fsm.read() == CONFIG_DIR_UPT_LOCK) 7104 r_alloc_upt_fsm = ALLOC_UPT_CONFIG; 7105 7106 else if(r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK) 7107 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK; 7108 7109 else if((r_write_fsm.read() == WRITE_UPT_LOCK) or 7110 (r_write_fsm.read() == WRITE_BC_UPT_LOCK)) 7111 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 7112 7113 else if(r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK) 7114 r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; 7115 7116 else if(r_cleanup_fsm.read() == CLEANUP_UPT_LOCK) 7117 r_alloc_upt_fsm = ALLOC_UPT_CLEANUP; 7118 } 7119 break; 7120 7121 } // end switch r_alloc_upt_fsm 6949 case ALLOC_IVT_XRAM_RSP: // allocated to XRAM_RSP FSM 6950 if(r_xram_rsp_fsm.read() != XRAM_RSP_INVAL_LOCK) 6951 { 6952 if(r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 6953 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 6954 6955 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 6956 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 6957 6958 else if (r_config_fsm.read() == CONFIG_DIR_IVT_LOCK) 6959 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 6960 6961 else if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 6962 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 6963 } 6964 break; 6965 6966 ////////////////////////// 6967 case ALLOC_IVT_CLEANUP: // allocated to CLEANUP FSM 6968 if ((r_cleanup_fsm.read() != CLEANUP_IVT_LOCK ) and 6969 (r_cleanup_fsm.read() != CLEANUP_IVT_DECREMENT)) 6970 { 6971 if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 6972 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 6973 6974 else if (r_config_fsm.read() == CONFIG_DIR_IVT_LOCK) 6975 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 6976 6977 else if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 6978 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 6979 6980 else if (r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK) 6981 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 6982 } 6983 break; 6984 6985 ////////////////////////// 6986 case ALLOC_IVT_CAS: // allocated to CAS FSM 6987 if (r_cas_fsm.read() != CAS_BC_IVT_LOCK) 6988 { 6989 if (r_config_fsm.read() == CONFIG_DIR_IVT_LOCK) 6990 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 6991 6992 else if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 6993 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 6994 6995 else if (r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK) 6996 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 6997 6998 else if (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 6999 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 7000 } 7001 break; 7002 7003 ////////////////////////// 7004 case ALLOC_IVT_CONFIG: // allocated to CONFIG FSM 7005 if (r_config_fsm.read() != CONFIG_DIR_IVT_LOCK) 7006 { 7007 if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 7008 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 7009 7010 else if (r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK) 7011 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 7012 7013 else if (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 7014 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 7015 7016 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 7017 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 7018 } 7019 break; 7020 7021 } // end switch r_alloc_ivt_fsm 7122 7022 7123 7023 //////////////////////////////////////////////////////////////////////////////////// … … 7149 7049 if ( (r_config_fsm.read() != CONFIG_DIR_REQ) and 7150 7050 (r_config_fsm.read() != CONFIG_DIR_ACCESS) and 7151 (r_config_fsm.read() != CONFIG_DIR_ UPT_LOCK) )7051 (r_config_fsm.read() != CONFIG_DIR_IVT_LOCK) ) 7152 7052 { 7153 7053 if(r_read_fsm.read() == READ_DIR_REQ) … … 7202 7102 (r_write_fsm.read() != WRITE_DIR_HIT) and 7203 7103 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 7204 (r_write_fsm.read() != WRITE_BC_ UPT_LOCK) and7104 (r_write_fsm.read() != WRITE_BC_IVT_LOCK) and 7205 7105 (r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 7206 7106 (r_write_fsm.read() != WRITE_UPT_LOCK) and … … 7238 7138 (r_cas_fsm.read() != CAS_DIR_HIT_WRITE) and 7239 7139 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 7240 (r_cas_fsm.read() != CAS_BC_ UPT_LOCK) and7140 (r_cas_fsm.read() != CAS_BC_IVT_LOCK) and 7241 7141 (r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 7242 7142 (r_cas_fsm.read() != CAS_UPT_LOCK) and … … 7352 7252 if((r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 7353 7253 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 7354 (r_write_fsm.read() != WRITE_BC_ UPT_LOCK))7254 (r_write_fsm.read() != WRITE_BC_IVT_LOCK)) 7355 7255 { 7356 7256 if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or … … 7375 7275 if((r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 7376 7276 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 7377 (r_cas_fsm.read() != CAS_BC_ UPT_LOCK))7277 (r_cas_fsm.read() != CAS_BC_IVT_LOCK)) 7378 7278 { 7379 7279 if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and … … 7665 7565 m_cc_receive_to_cleanup_fifo.update( cc_receive_to_cleanup_fifo_get, 7666 7566 cc_receive_to_cleanup_fifo_put, 7667 p_dspin_ in.data.read() );7567 p_dspin_p2m.data.read() ); 7668 7568 7669 7569 //////////////////////////////////////////////////////////////////////////////////// … … 7673 7573 m_cc_receive_to_multi_ack_fifo.update( cc_receive_to_multi_ack_fifo_get, 7674 7574 cc_receive_to_multi_ack_fifo_put, 7675 p_dspin_ in.data.read() );7575 p_dspin_p2m.data.read() ); 7676 7576 7677 7577 //////////////////////////////////////////////////////////////////////////////////// … … 8015 7915 8016 7916 //////////////////////////////////////////////////////////////////// 8017 // p_dspin_ outport (CC_SEND FSM)7917 // p_dspin_m2p port (CC_SEND FSM) 8018 7918 //////////////////////////////////////////////////////////////////// 8019 7919 8020 p_dspin_ out.write = false;8021 p_dspin_ out.eop = false;8022 p_dspin_ out.data = 0;7920 p_dspin_m2p.write = false; 7921 p_dspin_m2p.eop = false; 7922 p_dspin_m2p.data = 0; 8023 7923 8024 7924 switch(r_cc_send_fsm.read()) … … 8029 7929 case CC_SEND_WRITE_IDLE: 8030 7930 case CC_SEND_CAS_IDLE: 8031 case CC_SEND_CLEANUP_IDLE:8032 7931 { 8033 7932 break; … … 8064 7963 DspinDhccpParam::dspin_set( flit, 8065 7964 multi_inval_type, 8066 DspinDhccpParam:: FROM_MC_TYPE);8067 p_dspin_ out.write = true;8068 p_dspin_ out.data = flit;7965 DspinDhccpParam::M2P_TYPE); 7966 p_dspin_m2p.write = true; 7967 p_dspin_m2p.data = flit; 8069 7968 break; 8070 7969 } … … 8076 7975 r_config_to_cc_send_nline.read(), 8077 7976 DspinDhccpParam::MULTI_INVAL_NLINE); 8078 p_dspin_ out.eop = true;8079 p_dspin_ out.write = true;8080 p_dspin_ out.data = flit;7977 p_dspin_m2p.eop = true; 7978 p_dspin_m2p.write = true; 7979 p_dspin_m2p.data = flit; 8081 7980 break; 8082 7981 } 8083 ////////////////////////8084 case CC_SEND_CLEANUP_ACK:8085 {8086 uint8_t cleanup_ack_type;8087 if(r_cleanup_to_cc_send_inst.read())8088 {8089 cleanup_ack_type = DspinDhccpParam::TYPE_CLEANUP_ACK_INST;8090 }8091 else8092 {8093 cleanup_ack_type = DspinDhccpParam::TYPE_CLEANUP_ACK_DATA;8094 }8095 8096 uint64_t flit = 0;8097 uint64_t dest =8098 r_cleanup_to_cc_send_srcid.read() <<8099 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S);8100 8101 DspinDhccpParam::dspin_set(8102 flit,8103 dest,8104 DspinDhccpParam::CLEANUP_ACK_DEST);8105 8106 DspinDhccpParam::dspin_set(8107 flit,8108 r_cleanup_to_cc_send_set_index.read(),8109 DspinDhccpParam::CLEANUP_ACK_SET);8110 8111 DspinDhccpParam::dspin_set(8112 flit,8113 r_cleanup_to_cc_send_way_index.read(),8114 DspinDhccpParam::CLEANUP_ACK_WAY);8115 8116 DspinDhccpParam::dspin_set(8117 flit,8118 cleanup_ack_type,8119 DspinDhccpParam::FROM_MC_TYPE);8120 8121 p_dspin_out.eop = true;8122 p_dspin_out.write = true;8123 p_dspin_out.data = flit;8124 8125 break;8126 }8127 8128 7982 /////////////////////////////////// 8129 7983 case CC_SEND_XRAM_RSP_INVAL_HEADER: … … 8159 8013 DspinDhccpParam::dspin_set( flit, 8160 8014 multi_inval_type, 8161 DspinDhccpParam:: FROM_MC_TYPE);8162 p_dspin_ out.write = true;8163 p_dspin_ out.data = flit;8015 DspinDhccpParam::M2P_TYPE); 8016 p_dspin_m2p.write = true; 8017 p_dspin_m2p.data = flit; 8164 8018 break; 8165 8019 } … … 8173 8027 r_xram_rsp_to_cc_send_nline.read(), 8174 8028 DspinDhccpParam::MULTI_INVAL_NLINE); 8175 p_dspin_ out.eop = true;8176 p_dspin_ out.write = true;8177 p_dspin_ out.data = flit;8029 p_dspin_m2p.eop = true; 8030 p_dspin_m2p.write = true; 8031 p_dspin_m2p.data = flit; 8178 8032 break; 8179 8033 } … … 8197 8051 DspinDhccpParam::dspin_set( flit, 8198 8052 1ULL, 8199 DspinDhccpParam:: FROM_MC_BC);8200 p_dspin_ out.write = true;8201 p_dspin_ out.data = flit;8053 DspinDhccpParam::M2P_BC); 8054 p_dspin_m2p.write = true; 8055 p_dspin_m2p.data = flit; 8202 8056 break; 8203 8057 } … … 8209 8063 r_xram_rsp_to_cc_send_nline.read(), 8210 8064 DspinDhccpParam::BROADCAST_NLINE); 8211 p_dspin_ out.write = true;8212 p_dspin_ out.eop = true;8213 p_dspin_ out.data = flit;8065 p_dspin_m2p.write = true; 8066 p_dspin_m2p.eop = true; 8067 p_dspin_m2p.data = flit; 8214 8068 break; 8215 8069 } … … 8221 8075 r_config_to_cc_send_nline.read(), 8222 8076 DspinDhccpParam::BROADCAST_NLINE); 8223 p_dspin_ out.write = true;8224 p_dspin_ out.eop = true;8225 p_dspin_ out.data = flit;8077 p_dspin_m2p.write = true; 8078 p_dspin_m2p.eop = true; 8079 p_dspin_m2p.data = flit; 8226 8080 break; 8227 8081 } … … 8233 8087 r_write_to_cc_send_nline.read(), 8234 8088 DspinDhccpParam::BROADCAST_NLINE); 8235 p_dspin_ out.write = true;8236 p_dspin_ out.eop = true;8237 p_dspin_ out.data = flit;8089 p_dspin_m2p.write = true; 8090 p_dspin_m2p.eop = true; 8091 p_dspin_m2p.data = flit; 8238 8092 break; 8239 8093 } … … 8245 8099 r_cas_to_cc_send_nline.read(), 8246 8100 DspinDhccpParam::BROADCAST_NLINE); 8247 p_dspin_ out.write = true;8248 p_dspin_ out.eop = true;8249 p_dspin_ out.data = flit;8101 p_dspin_m2p.write = true; 8102 p_dspin_m2p.eop = true; 8103 p_dspin_m2p.data = flit; 8250 8104 break; 8251 8105 } … … 8288 8142 flit, 8289 8143 multi_updt_type, 8290 DspinDhccpParam:: FROM_MC_TYPE);8291 8292 p_dspin_ out.write = true;8293 p_dspin_ out.data = flit;8144 DspinDhccpParam::M2P_TYPE); 8145 8146 p_dspin_m2p.write = true; 8147 p_dspin_m2p.data = flit; 8294 8148 8295 8149 break; … … 8310 8164 DspinDhccpParam::MULTI_UPDT_NLINE); 8311 8165 8312 p_dspin_ out.write = true;8313 p_dspin_ out.data = flit;8166 p_dspin_m2p.write = true; 8167 p_dspin_m2p.data = flit; 8314 8168 8315 8169 break; … … 8337 8191 DspinDhccpParam::MULTI_UPDT_DATA); 8338 8192 8339 p_dspin_ out.write = true;8340 p_dspin_ out.eop = (r_cc_send_cpt.read() == (r_write_to_cc_send_count.read()-1));8341 p_dspin_ out.data = flit;8193 p_dspin_m2p.write = true; 8194 p_dspin_m2p.eop = (r_cc_send_cpt.read() == (r_write_to_cc_send_count.read()-1)); 8195 p_dspin_m2p.data = flit; 8342 8196 8343 8197 break; … … 8381 8235 flit, 8382 8236 multi_updt_type, 8383 DspinDhccpParam:: FROM_MC_TYPE);8384 8385 p_dspin_ out.write = true;8386 p_dspin_ out.data = flit;8237 DspinDhccpParam::M2P_TYPE); 8238 8239 p_dspin_m2p.write = true; 8240 p_dspin_m2p.data = flit; 8387 8241 8388 8242 break; … … 8403 8257 DspinDhccpParam::MULTI_UPDT_NLINE); 8404 8258 8405 p_dspin_ out.write = true;8406 p_dspin_ out.data = flit;8259 p_dspin_m2p.write = true; 8260 p_dspin_m2p.data = flit; 8407 8261 8408 8262 break; … … 8423 8277 DspinDhccpParam::MULTI_UPDT_DATA); 8424 8278 8425 p_dspin_ out.write = true;8426 p_dspin_ out.eop = not r_cas_to_cc_send_is_long.read();8427 p_dspin_ out.data = flit;8279 p_dspin_m2p.write = true; 8280 p_dspin_m2p.eop = not r_cas_to_cc_send_is_long.read(); 8281 p_dspin_m2p.data = flit; 8428 8282 8429 8283 break; … … 8444 8298 DspinDhccpParam::MULTI_UPDT_DATA); 8445 8299 8446 p_dspin_ out.write = true;8447 p_dspin_ out.eop = true;8448 p_dspin_ out.data = flit;8300 p_dspin_m2p.write = true; 8301 p_dspin_m2p.eop = true; 8302 p_dspin_m2p.data = flit; 8449 8303 8450 8304 break; … … 8452 8306 } 8453 8307 8308 //////////////////////////////////////////////////////////////////// 8309 // p_dspin_clack port (CLEANUP FSM) 8310 //////////////////////////////////////////////////////////////////// 8311 8312 switch(r_cleanup_fsm.read()) 8313 { 8314 case CLEANUP_IDLE: 8315 case CLEANUP_GET_NLINE: 8316 case CLEANUP_DIR_REQ: 8317 case CLEANUP_DIR_LOCK: 8318 case CLEANUP_DIR_WRITE: 8319 case CLEANUP_HEAP_REQ: 8320 case CLEANUP_HEAP_LOCK: 8321 case CLEANUP_HEAP_SEARCH: 8322 case CLEANUP_HEAP_CLEAN: 8323 case CLEANUP_HEAP_FREE: 8324 case CLEANUP_IVT_LOCK: 8325 case CLEANUP_IVT_DECREMENT: 8326 case CLEANUP_IVT_CLEAR: 8327 case CLEANUP_WRITE_RSP: 8328 case CLEANUP_CONFIG_ACK: 8329 p_dspin_clack.write = false; 8330 p_dspin_clack.eop = false; 8331 p_dspin_clack.data = 0; 8332 8333 break; 8334 8335 case CLEANUP_SEND_CLACK: 8336 { 8337 uint8_t cleanup_ack_type; 8338 if(r_cleanup_inst.read()) 8339 { 8340 cleanup_ack_type = DspinDhccpParam::TYPE_CLACK_INST; 8341 } 8342 else 8343 { 8344 cleanup_ack_type = DspinDhccpParam::TYPE_CLACK_DATA; 8345 } 8346 8347 uint64_t flit = 0; 8348 uint64_t dest = 8349 r_cleanup_srcid.read() << 8350 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 8351 8352 DspinDhccpParam::dspin_set( 8353 flit, 8354 dest, 8355 DspinDhccpParam::CLACK_DEST); 8356 8357 DspinDhccpParam::dspin_set( 8358 flit, 8359 r_cleanup_nline.read() & 0xFFFF, 8360 DspinDhccpParam::CLACK_SET); 8361 8362 DspinDhccpParam::dspin_set( 8363 flit, 8364 r_cleanup_way_index.read(), 8365 DspinDhccpParam::CLACK_WAY); 8366 8367 DspinDhccpParam::dspin_set( 8368 flit, 8369 cleanup_ack_type, 8370 DspinDhccpParam::CLACK_TYPE); 8371 8372 p_dspin_clack.eop = true; 8373 p_dspin_clack.write = true; 8374 p_dspin_clack.data = flit; 8375 } 8376 break; 8377 } 8378 8454 8379 /////////////////////////////////////////////////////////////////// 8455 // p_dspin_ inport (CC_RECEIVE FSM)8380 // p_dspin_p2m port (CC_RECEIVE FSM) 8456 8381 /////////////////////////////////////////////////////////////////// 8457 p_dspin_in.read = false;8382 // 8458 8383 switch(r_cc_receive_fsm.read()) 8459 8384 { 8460 8385 case CC_RECEIVE_IDLE: 8461 8386 { 8387 p_dspin_p2m.read = false; 8462 8388 break; 8463 8389 } … … 8465 8391 case CC_RECEIVE_CLEANUP_EOP: 8466 8392 { 8467 p_dspin_ in.read = m_cc_receive_to_cleanup_fifo.wok();8393 p_dspin_p2m.read = m_cc_receive_to_cleanup_fifo.wok(); 8468 8394 break; 8469 8395 } 8470 8396 case CC_RECEIVE_MULTI_ACK: 8471 8397 { 8472 p_dspin_ in.read = m_cc_receive_to_multi_ack_fifo.wok();8398 p_dspin_p2m.read = m_cc_receive_to_multi_ack_fifo.wok(); 8473 8399 break; 8474 8400 }
Note: See TracChangeset
for help on using the changeset viewer.