424 | | ////////////////////////////////////////////////// |
425 | | // This function prints a trace of internal states |
426 | | ////////////////////////////////////////////////// |
427 | | |
428 | | tmpl(void)::print_trace() |
429 | | { |
430 | | std::cout << "MEM_CACHE " << name() << std::endl; |
431 | | std::cout << " / " << tgt_cmd_fsm_str[r_tgt_cmd_fsm] |
432 | | << " / " << read_fsm_str[r_read_fsm] |
433 | | << " / " << write_fsm_str[r_write_fsm] |
434 | | << " / " << tgt_rsp_fsm_str[r_tgt_rsp_fsm] |
435 | | << " / " << init_cmd_fsm_str[r_init_cmd_fsm] |
436 | | << " / " << init_rsp_fsm_str[r_init_rsp_fsm] << std::endl; |
437 | | } |
438 | | |
439 | | ///////////////////////////////////////// |
440 | | // This function prints the statistics |
441 | | ///////////////////////////////////////// |
442 | | |
443 | | tmpl(void)::print_stats() |
444 | | { |
| 433 | ///////////////////////////////////////////////////// |
| 434 | tmpl(void)::cache_monitor( vci_addr_t addr ) |
| 435 | ///////////////////////////////////////////////////// |
| 436 | { |
| 437 | size_t way = 0; |
| 438 | DirectoryEntry entry = m_cache_directory.read(addr, way); |
| 439 | if ( (entry.count != m_debug_previous_count) or |
| 440 | (entry.valid != m_debug_previous_hit) ) |
| 441 | { |
| 442 | std::cout << " MEMC " << name() |
| 443 | << " cache change at cycle " << std::dec << m_cpt_cycles |
| 444 | << " for address " << std::hex << addr |
| 445 | << " / HIT = " << entry.valid |
| 446 | << " / COUNT = " << std::dec << entry.count << std::endl; |
| 447 | } |
| 448 | m_debug_previous_count = entry.count; |
| 449 | m_debug_previous_hit = entry.valid; |
| 450 | } |
| 451 | |
| 452 | ////////////////////////////////////////////////// |
| 453 | tmpl(void)::print_trace() |
| 454 | ////////////////////////////////////////////////// |
| 455 | { |
| 456 | std::cout << "MEMC " << name() << std::endl; |
| 457 | std::cout << " " << tgt_cmd_fsm_str[r_tgt_cmd_fsm] |
| 458 | << " | " << tgt_rsp_fsm_str[r_tgt_rsp_fsm] |
| 459 | << " | " << read_fsm_str[r_read_fsm] |
| 460 | << " | " << write_fsm_str[r_write_fsm] |
| 461 | << " | " << sc_fsm_str[r_sc_fsm] |
| 462 | << " | " << cleanup_fsm_str[r_cleanup_fsm] << std::endl; |
| 463 | std::cout << " " << init_cmd_fsm_str[r_init_cmd_fsm] |
| 464 | << " | " << init_rsp_fsm_str[r_init_rsp_fsm] |
| 465 | << " | " << ixr_cmd_fsm_str[r_ixr_cmd_fsm] |
| 466 | << " | " << ixr_rsp_fsm_str[r_ixr_rsp_fsm] |
| 467 | << " | " << xram_rsp_fsm_str[r_xram_rsp_fsm] << std::endl; |
| 468 | } |
| 469 | |
| 470 | ///////////////////////////////////////// |
| 471 | tmpl(void)::print_stats() |
| 472 | ///////////////////////////////////////// |
| 473 | { |
697 | | switch ( r_tgt_cmd_fsm.read() ) { |
698 | | |
699 | | ////////////////// |
700 | | case TGT_CMD_IDLE: |
701 | | { |
702 | | if ( p_vci_tgt.cmdval ) { |
703 | | |
704 | | PRINTF(" * <MEM_CACHE.TGT> Request from %d.%d (%d) at address %llx\n",(uint32_t)p_vci_tgt.srcid.read(),(uint32_t)p_vci_tgt.pktid.read(),(uint32_t)p_vci_tgt.trdid.read(),(uint64_t)p_vci_tgt.address.read()); |
705 | | |
706 | | if ( p_vci_tgt.cmd.read() == vci_param::CMD_READ ) |
707 | | { |
708 | | r_tgt_cmd_fsm = TGT_CMD_READ; |
| 750 | switch ( r_tgt_cmd_fsm.read() ) |
| 751 | { |
| 752 | ////////////////// |
| 753 | case TGT_CMD_IDLE: |
| 754 | { |
| 755 | if ( p_vci_tgt.cmdval ) |
| 756 | { |
| 757 | |
| 758 | #if DEBUG_MEMC_TGT_CMD |
| 759 | if( m_debug_tgt_cmd_fsm ) |
| 760 | { |
| 761 | std::cout << " <MEMC.TGT_CMD_IDLE> Receive command from srcid " << p_vci_tgt.srcid.read() |
| 762 | << " / for address " << p_vci_tgt.address.read() << std::endl; |
| 763 | } |
| 764 | #endif |
| 765 | if ( p_vci_tgt.cmd.read() == vci_param::CMD_READ ) |
| 766 | { |
| 767 | r_tgt_cmd_fsm = TGT_CMD_READ; |
| 768 | } |
| 769 | else if ( p_vci_tgt.cmd.read() == vci_param::CMD_WRITE ) |
| 770 | { |
| 771 | r_tgt_cmd_fsm = TGT_CMD_WRITE; |
| 772 | } |
| 773 | else if ( p_vci_tgt.cmd.read() == vci_param::CMD_STORE_COND ) |
| 774 | { |
| 775 | r_tgt_cmd_fsm = TGT_CMD_ATOMIC; |
| 776 | } |
| 777 | else |
| 778 | { |
| 779 | std::cout << "VCI_MEM_CACHE ERROR " << name() |
| 780 | << " TGT_CMD_IDLE state" << std::endl; |
| 781 | std::cout << " illegal VCI command type" << std::endl; |
| 782 | exit(0); |
| 783 | } |
| 784 | } |
| 785 | break; |
| 786 | } |
| 787 | ////////////////// |
| 788 | case TGT_CMD_READ: |
| 789 | { |
| 790 | if ((m_x[(vci_addr_t)p_vci_tgt.address.read()]+(p_vci_tgt.plen.read()>>2)) > 16) |
| 791 | { |
| 792 | std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" << std::endl; |
| 793 | std::cout << " illegal address/plen combination for VCI read command" << std::endl; |
| 794 | exit(0); |
| 795 | } |
| 796 | if ( !p_vci_tgt.eop.read() ) |
| 797 | { |
| 798 | std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" << std::endl; |
| 799 | std::cout << " read command packets must contain one single flit" << std::endl; |
| 800 | exit(0); |
| 801 | } |
| 802 | |
| 803 | if ( p_vci_tgt.cmdval && m_cmd_read_addr_fifo.wok() ) |
| 804 | { |
| 805 | |
| 806 | #if DEBUG_MEMC_TGT_CMD |
| 807 | if( m_debug_tgt_cmd_fsm ) |
| 808 | { |
| 809 | std::cout << " <MEMC.TGT_CMD_READ> Push into read_fifo:" |
| 810 | << " address = " << std::hex << p_vci_tgt.address.read() |
| 811 | << " srcid = " << p_vci_tgt.srcid.read() |
| 812 | << " trdid = " << p_vci_tgt.trdid.read() |
| 813 | << " plen = " << p_vci_tgt.plen.read() << std::endl; |
| 814 | } |
| 815 | #endif |
| 816 | cmd_read_fifo_put = true; |
| 817 | m_cpt_read++; |
| 818 | r_tgt_cmd_fsm = TGT_CMD_IDLE; |
793 | | switch ( r_init_rsp_fsm.read() ) { |
794 | | |
795 | | /////////////////// |
796 | | case INIT_RSP_IDLE: |
797 | | { |
798 | | |
799 | | if ( p_vci_ini.rspval ) { |
800 | | PRINTF(" * <MEM_CACHE.INIT_RSP> rsp val - trdid %d\n",(uint32_t)p_vci_ini.rtrdid.read()); |
801 | | |
802 | | ASSERT (( p_vci_ini.rtrdid.read() < m_update_tab.size()) |
803 | | ,"VCI_MEM_CACHE UPT index too large in VCI response paquet received by memory cache" ); |
804 | | ASSERT (p_vci_ini.reop |
805 | | ,"VCI_MEM_CACHE All response packets to update/invalidate requests must be one cell") ; |
806 | | r_init_rsp_upt_index = p_vci_ini.rtrdid.read(); |
807 | | r_init_rsp_fsm = INIT_RSP_UPT_LOCK; |
808 | | } else if( r_write_to_init_rsp_req.read() ){ |
809 | | r_init_rsp_upt_index = r_write_to_init_rsp_upt_index.read(); |
810 | | r_write_to_init_rsp_req = false; |
811 | | r_init_rsp_fsm = INIT_RSP_UPT_LOCK; |
812 | | } |
813 | | break; |
| 896 | switch ( r_init_rsp_fsm.read() ) |
| 897 | { |
| 898 | /////////////////// |
| 899 | case INIT_RSP_IDLE: // wait a response for a coherence transaction |
| 900 | { |
| 901 | if ( p_vci_ini.rspval ) |
| 902 | { |
| 903 | |
| 904 | #if DEBUG_MEMC_INIT_RSP |
| 905 | if( m_debug_init_rsp_fsm ) |
| 906 | { |
| 907 | std::cout << " <MEMC.INIT_RSP_IDLE> Response for UPT entry " |
| 908 | << p_vci_ini.rtrdid.read() << std::endl; |
| 909 | } |
| 910 | #endif |
| 911 | if ( p_vci_ini.rtrdid.read() >= m_update_tab.size() ) |
| 912 | { |
| 913 | std::cout << "VCI_MEM_CACHE ERROR " << name() |
| 914 | << " INIT_RSP_IDLE state" << std::endl; |
| 915 | std::cout << "index too large for UPT: " |
| 916 | << " / rtrdid = " << p_vci_ini.rtrdid.read() |
| 917 | << " / UPT size = " << m_update_tab.size() << std::endl; |
| 918 | exit(0); |
| 919 | } |
| 920 | if ( !p_vci_ini.reop.read() ) |
| 921 | { |
| 922 | std::cout << "VCI_MEM_CACHE ERROR " << name() |
| 923 | << " INIT_RSP_IDLE state" << std::endl; |
| 924 | std::cout << "all coherence response packets must be one flit" << std::endl; |
| 925 | exit(0); |
| 926 | } |
| 927 | |
| 928 | r_init_rsp_upt_index = p_vci_ini.rtrdid.read(); |
| 929 | r_init_rsp_fsm = INIT_RSP_UPT_LOCK; |
| 930 | } |
| 931 | else if( r_write_to_init_rsp_req.read() ) |
| 932 | { |
| 933 | r_init_rsp_upt_index = r_write_to_init_rsp_upt_index.read(); |
| 934 | r_write_to_init_rsp_req = false; |
| 935 | r_init_rsp_fsm = INIT_RSP_UPT_LOCK; |
| 936 | } |
| 937 | break; |
942 | | case READ_DIR_HIT: // read hit : update the memory cache |
943 | | { |
944 | | if( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) { |
945 | | // signals generation |
946 | | bool inst_read = (m_cmd_read_trdid_fifo.read() & 0x2); |
947 | | bool cached_read = (m_cmd_read_trdid_fifo.read() & 0x1); |
948 | | bool is_cnt = r_read_is_cnt.read(); |
949 | | |
950 | | // read data in the cache |
951 | | size_t set = m_y[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; |
952 | | size_t way = r_read_way.read(); |
953 | | for ( size_t i=0 ; i<m_words ; i++ ) { |
954 | | r_read_data[i] = m_cache_data[way][set][i]; |
955 | | } |
956 | | |
957 | | // update the cache directory (for the copies) |
958 | | DirectoryEntry entry; |
959 | | entry.valid = true; |
960 | | entry.is_cnt = is_cnt; |
961 | | entry.dirty = r_read_dirty.read(); |
962 | | entry.tag = r_read_tag.read(); |
963 | | entry.lock = r_read_lock.read(); |
964 | | entry.ptr = r_read_ptr.read(); |
965 | | if(cached_read){ // Cached read, we update the copy |
966 | | if(!is_cnt){ // Not counter mode |
967 | | entry.owner.srcid = m_cmd_read_srcid_fifo.read(); |
| 1110 | case READ_DIR_HIT: // read data in cache & update the directory |
| 1111 | // we enter this state in 3 cases: |
| 1112 | // - the read request is uncachable |
| 1113 | // - the cache line is in counter mode |
| 1114 | // - the cache line is valid but not replcated |
| 1115 | { |
| 1116 | if( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) |
| 1117 | { |
| 1118 | // signals generation |
| 1119 | bool inst_read = (m_cmd_read_trdid_fifo.read() & 0x2); |
| 1120 | bool cached_read = (m_cmd_read_trdid_fifo.read() & 0x1); |
| 1121 | bool is_cnt = r_read_is_cnt.read(); |
| 1122 | |
| 1123 | // read data in the cache |
| 1124 | size_t set = m_y[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; |
| 1125 | size_t way = r_read_way.read(); |
| 1126 | for ( size_t i=0 ; i<m_words ; i++ ) r_read_data[i] = m_cache_data[way][set][i]; |
| 1127 | |
| 1128 | // update the cache directory |
| 1129 | DirectoryEntry entry; |
| 1130 | entry.valid = true; |
| 1131 | entry.is_cnt = is_cnt; |
| 1132 | entry.dirty = r_read_dirty.read(); |
| 1133 | entry.tag = r_read_tag.read(); |
| 1134 | entry.lock = r_read_lock.read(); |
| 1135 | entry.ptr = r_read_ptr.read(); |
| 1136 | if (cached_read) // Cached read => we must update the copies |
| 1137 | { |
| 1138 | if (!is_cnt) // Not counter mode |
| 1139 | { |
| 1140 | entry.owner.srcid = m_cmd_read_srcid_fifo.read(); |
985 | | entry.owner.cache_id = r_read_copy_cache.read(); |
986 | | #endif |
987 | | |
988 | | entry.owner.inst = r_read_copy_inst.read(); |
989 | | entry.count = r_read_count.read(); |
990 | | } |
991 | | #ifdef DDEBUG |
992 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
993 | | std::cout << "In READ_DIR_HIT printing the entry of address is : " << std::endl; |
994 | | entry.print(); |
995 | | std::cout << "done" << std::endl; |
996 | | } |
997 | | #endif |
998 | | |
999 | | m_cache_directory.write(set, way, entry); |
1000 | | r_read_fsm = READ_RSP; |
1001 | | } |
1002 | | break; |
| 1161 | entry.owner.cache_id = r_read_copy_cache.read(); |
| 1162 | #endif |
| 1163 | entry.owner.inst = r_read_copy_inst.read(); |
| 1164 | entry.count = r_read_count.read(); |
| 1165 | } |
| 1166 | |
| 1167 | #if DEBUG_MEMC_READ |
| 1168 | if( m_debug_read_fsm ) |
| 1169 | { |
| 1170 | std::cout << " <MEMC.READ_DIR_HIT> Update directory entry:" |
| 1171 | << " set = " << std::dec << set |
| 1172 | << " / way = " << way |
| 1173 | << " / owner_id = " << entry.owner.srcid |
| 1174 | << " / owner_ins = " << entry.owner.inst |
| 1175 | << " / count = " << entry.count |
| 1176 | << " / is_cnt = " << entry.is_cnt << std::endl; |
| 1177 | } |
| 1178 | #endif |
| 1179 | |
| 1180 | m_cache_directory.write(set, way, entry); |
| 1181 | r_read_fsm = READ_RSP; |
| 1182 | } |
| 1183 | break; |
| 1184 | } |
| 1185 | //////////////////// |
| 1186 | case READ_HEAP_LOCK: // read data in cache, update the directory |
| 1187 | // and prepare the HEAP update |
| 1188 | { |
| 1189 | if( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) |
| 1190 | { |
| 1191 | // enter counter mode when we reach the limit of copies or the heap is full |
| 1192 | bool go_cnt = (r_read_count.read() >= r_copies_limit.read()) || m_heap.is_full(); |
| 1193 | |
| 1194 | // read data in the cache |
| 1195 | size_t set = m_y[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; |
| 1196 | size_t way = r_read_way.read(); |
| 1197 | for ( size_t i=0 ; i<m_words ; i++ ) r_read_data[i] = m_cache_data[way][set][i]; |
| 1198 | |
| 1199 | // update the cache directory |
| 1200 | DirectoryEntry entry; |
| 1201 | entry.valid = true; |
| 1202 | entry.is_cnt = go_cnt; |
| 1203 | entry.dirty = r_read_dirty.read(); |
| 1204 | entry.tag = r_read_tag.read(); |
| 1205 | entry.lock = r_read_lock.read(); |
| 1206 | entry.count = r_read_count.read() + 1; |
| 1207 | |
| 1208 | if (not go_cnt) // Not entering counter mode |
| 1209 | { |
| 1210 | entry.owner.srcid = r_read_copy.read(); |
| 1211 | #if L1_MULTI_CACHE |
| 1212 | entry.owner.cache_id= r_read_copy_cache.read(); |
| 1213 | #endif |
| 1214 | entry.owner.inst = r_read_copy_inst.read(); |
| 1215 | entry.ptr = m_heap.next_free_ptr(); // set pointer on the heap |
| 1216 | } |
| 1217 | else // Entering Counter mode |
| 1218 | { |
| 1219 | entry.owner.srcid = 0; |
| 1220 | #if L1_MULTI_CACHE |
| 1221 | entry.owner.cache_id= 0; |
| 1222 | #endif |
| 1223 | entry.owner.inst = false; |
| 1224 | entry.ptr = 0; |
| 1225 | } |
| 1226 | |
| 1227 | m_cache_directory.write(set, way, entry); |
| 1228 | |
| 1229 | // prepare the heap update (add an entry, or clear the linked list) |
| 1230 | if (not go_cnt) // not switching to counter mode |
| 1231 | { |
| 1232 | // We test if the next free entry in the heap is the last |
| 1233 | HeapEntry heap_entry = m_heap.next_free_entry(); |
| 1234 | r_read_next_ptr = heap_entry.next; |
| 1235 | r_read_last_free = ( heap_entry.next == m_heap.next_free_ptr() ); |
| 1236 | |
| 1237 | r_read_fsm = READ_HEAP_WRITE; // add an entry in the HEAP |
| 1238 | } |
| 1239 | else // switching to counter mode |
| 1240 | { |
| 1241 | if ( r_read_count.read()>1 ) // heap must be cleared |
| 1242 | { |
| 1243 | HeapEntry next_entry = m_heap.read(r_read_ptr.read()); |
| 1244 | r_read_next_ptr = m_heap.next_free_ptr(); |
| 1245 | m_heap.write_free_ptr(r_read_ptr.read()); |
| 1246 | |
| 1247 | if( next_entry.next == r_read_ptr.read() ) // last entry |
| 1248 | { |
| 1249 | r_read_fsm = READ_HEAP_LAST; // erase the entry |
| 1250 | } |
| 1251 | else // not the last entry |
| 1252 | { |
| 1253 | r_read_ptr = next_entry.next; |
| 1254 | r_read_fsm = READ_HEAP_ERASE; // erase the list |
| 1255 | } |
| 1256 | } |
| 1257 | else // the heap is not used / nothing to do |
| 1258 | { |
| 1259 | r_read_fsm = READ_RSP; |
| 1260 | } |
| 1261 | } |
| 1262 | |
| 1263 | #if DEBUG_MEMC_READ |
| 1264 | if( m_debug_read_fsm ) |
| 1265 | { |
| 1266 | std::cout << " <MEMC.READ_HEAP_LOCK> Update directory:" |
| 1267 | << " tag = " << std::hex << entry.tag |
| 1268 | << " set = " << std::dec << set |
| 1269 | << " way = " << way |
| 1270 | << " count = " << entry.count |
| 1271 | << " is_cnt = " << entry.is_cnt << std::endl; |
| 1272 | } |
| 1273 | #endif |
| 1274 | } |
| 1275 | break; |
| 1276 | } |
| 1277 | ///////////////////// |
| 1278 | case READ_HEAP_WRITE: // add a entry in the heap |
| 1279 | { |
| 1280 | if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) |
| 1281 | { |
| 1282 | HeapEntry heap_entry; |
| 1283 | heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read(); |
| 1284 | #if L1_MULTI_CACHE |
| 1285 | heap_entry.owner.cache_id = m_cmd_read_pktid_fifo.read(); |
| 1286 | #endif |
| 1287 | heap_entry.owner.inst = (m_cmd_read_trdid_fifo.read() & 0x2); |
| 1288 | |
| 1289 | if(r_read_count.read() == 1) // creation of a new linked list |
| 1290 | { |
| 1291 | heap_entry.next = m_heap.next_free_ptr(); |
| 1292 | } |
| 1293 | else // head insertion in existing list |
| 1294 | { |
| 1295 | heap_entry.next = r_read_ptr.read(); |
| 1296 | } |
| 1297 | m_heap.write_free_entry(heap_entry); |
| 1298 | m_heap.write_free_ptr(r_read_next_ptr.read()); |
| 1299 | if(r_read_last_free.read()) m_heap.set_full(); |
| 1300 | |
| 1301 | r_read_fsm = READ_RSP; |
| 1302 | |
| 1303 | #if DEBUG_MEMC_READ |
| 1304 | if( m_debug_read_fsm ) |
| 1305 | { |
| 1306 | std::cout << " <MEMC.READ_HEAP_WRITE> Add an entry in the heap:" |
| 1307 | << " owner_id = " << heap_entry.owner.srcid |
| 1308 | << " owner_ins = " << heap_entry.owner.inst << std::endl; |
| 1309 | } |
| 1310 | #endif |
| 1311 | } |
| 1312 | else |
| 1313 | { |
| 1314 | std::cout << "VCI_MEM_CACHE ERROR " << name() |
| 1315 | << " READ_HEAP_WRITE state" << std::endl; |
| 1316 | std::cout << "Bad HEAP allocation" << std::endl; |
| 1317 | exit(0); |
| 1318 | } |
| 1319 | break; |
| 1320 | } |
| 1321 | ///////////////////// |
| 1322 | case READ_HEAP_ERASE: |
| 1323 | { |
| 1324 | if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) |
| 1325 | { |
| 1326 | HeapEntry next_entry = m_heap.read(r_read_ptr.read()); |
| 1327 | if( next_entry.next == r_read_ptr.read() ) |
| 1328 | { |
| 1329 | r_read_fsm = READ_HEAP_LAST; |
| 1330 | } |
| 1331 | else |
| 1332 | { |
| 1333 | r_read_ptr = next_entry.next; |
| 1334 | r_read_fsm = READ_HEAP_ERASE; |
| 1335 | } |
| 1336 | } |
| 1337 | else |
| 1338 | { |
| 1339 | std::cout << "VCI_MEM_CACHE ERROR " << name() |
| 1340 | << " READ_HEAP_ERASE state" << std::endl; |
| 1341 | std::cout << "Bad HEAP allocation" << std::endl; |
| 1342 | exit(0); |
| 1343 | } |
| 1344 | break; |
| 1345 | } |
| 1346 | //////////////////// |
| 1347 | case READ_HEAP_LAST: |
| 1348 | { |
| 1349 | if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) |
| 1350 | { |
| 1351 | HeapEntry last_entry; |
| 1352 | last_entry.owner.srcid = 0; |
| 1353 | #if L1_MULTI_CACHE |
| 1354 | last_entry.owner.cache_id = 0; |
| 1355 | #endif |
| 1356 | last_entry.owner.inst = false; |
| 1357 | |
| 1358 | if(m_heap.is_full()) |
| 1359 | { |
| 1360 | last_entry.next = r_read_ptr.read(); |
| 1361 | m_heap.unset_full(); |
| 1362 | } |
| 1363 | else |
| 1364 | { |
| 1365 | last_entry.next = r_read_next_ptr.read(); |
| 1366 | } |
| 1367 | m_heap.write(r_read_ptr.read(),last_entry); |
| 1368 | r_read_fsm = READ_RSP; |
| 1369 | } |
| 1370 | else |
| 1371 | { |
| 1372 | std::cout << "VCI_MEM_CACHE ERROR " << name() |
| 1373 | << " READ_HEAP_LAST state" << std::endl; |
| 1374 | std::cout << "Bad HEAP allocation" << std::endl; |
| 1375 | exit(0); |
| 1376 | } |
| 1377 | break; |
1005 | | case READ_HEAP_LOCK: |
1006 | | { |
1007 | | if( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) { |
1008 | | bool is_cnt = (r_read_count.read() >= r_copies_limit.read()) || m_heap_directory.is_full(); |
1009 | | // read data in the cache |
1010 | | size_t set = m_y[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; |
1011 | | size_t way = r_read_way.read(); |
1012 | | for ( size_t i=0 ; i<m_words ; i++ ) { |
1013 | | r_read_data[i] = m_cache_data[way][set][i]; |
1014 | | } |
1015 | | |
1016 | | // update the cache directory (for the copies) |
1017 | | DirectoryEntry entry; |
1018 | | entry.valid = true; |
1019 | | entry.is_cnt = is_cnt; // when we reach the limit of copies or the heap is full |
1020 | | entry.dirty = r_read_dirty.read(); |
1021 | | entry.tag = r_read_tag.read(); |
1022 | | entry.lock = r_read_lock.read(); |
1023 | | if(!is_cnt){ // Not counter mode |
1024 | | entry.owner.srcid = r_read_copy.read(); |
1025 | | #if L1_MULTI_CACHE |
1026 | | entry.owner.cache_id= r_read_copy_cache.read(); |
1027 | | #endif |
1028 | | entry.owner.inst = r_read_copy_inst.read(); |
1029 | | entry.count = r_read_count.read() + 1; |
1030 | | entry.ptr = m_heap_directory.next_free_ptr(); |
1031 | | } else { // Counter mode |
1032 | | entry.owner.srcid = 0; |
1033 | | #if L1_MULTI_CACHE |
1034 | | entry.owner.cache_id= 0; |
1035 | | #endif |
1036 | | entry.owner.inst = false; |
1037 | | entry.count = r_read_count.read() + 1; |
1038 | | entry.ptr = 0; |
1039 | | } |
1040 | | #ifdef DDEBUG |
1041 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
1042 | | std::cout << "In READ_HEAP_LOCK printing the entry of address is : " << std::endl; |
1043 | | entry.print(); |
1044 | | std::cout << "done" << std::endl; |
1045 | | } |
1046 | | #endif |
1047 | | |
1048 | | m_cache_directory.write(set, way, entry); |
1049 | | |
1050 | | if(!is_cnt){ |
1051 | | HeapEntry free_heap_entry = m_heap_directory.next_free_entry(); |
1052 | | r_read_next_ptr = free_heap_entry.next; |
1053 | | if( free_heap_entry.next == m_heap_directory.next_free_ptr() ) { // Last free heap entry |
1054 | | r_read_last_free = true; |
1055 | | } else { |
1056 | | r_read_last_free = false; |
1057 | | } |
1058 | | r_read_fsm = READ_HEAP_WRITE; // we add an entry in the list of copies |
1059 | | } else { |
1060 | | if(r_read_count.read()>1) { // else there is no list of copies... |
1061 | | HeapEntry next_entry = m_heap_directory.read(r_read_ptr.read()); |
1062 | | r_read_next_ptr = m_heap_directory.next_free_ptr(); |
1063 | | m_heap_directory.write_free_ptr(r_read_ptr.read()); |
1064 | | if( next_entry.next == r_read_ptr.read() ) { // The last list member |
1065 | | r_read_fsm = READ_HEAP_LAST; // we erase the list of copies (counter mode) |
1066 | | } else { // Not the end of the list |
1067 | | r_read_ptr = next_entry.next; |
1068 | | r_read_fsm = READ_HEAP_ERASE; // we erase the list of copies (counter mode) |
1069 | | } |
1070 | | } else { |
1071 | | r_read_fsm = READ_RSP; |
1072 | | } |
1073 | | } |
1074 | | } |
1075 | | break; |
1076 | | } |
1077 | | ////////////// |
1078 | | case READ_HEAP_WRITE: |
1079 | | { |
1080 | | if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ){ |
1081 | | bool inst_read = (m_cmd_read_trdid_fifo.read() & 0x2); |
1082 | | HeapEntry new_heap_entry; |
1083 | | new_heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read(); |
1084 | | #if L1_MULTI_CACHE |
1085 | | new_heap_entry.owner.cache_id = m_cmd_read_pktid_fifo.read(); |
1086 | | #endif |
1087 | | new_heap_entry.owner.inst = inst_read; |
1088 | | if(r_read_count.read() == 1){ // creation of a new list |
1089 | | new_heap_entry.next = m_heap_directory.next_free_ptr(); |
1090 | | } else { // it is an insertion |
1091 | | new_heap_entry.next = r_read_ptr.read(); |
1092 | | } |
1093 | | m_heap_directory.write_free_entry(new_heap_entry); |
1094 | | m_heap_directory.write_free_ptr(r_read_next_ptr.read()); |
1095 | | if(r_read_last_free.read()) { |
1096 | | m_heap_directory.set_full(); |
1097 | | } |
1098 | | |
1099 | | r_read_fsm = READ_RSP; |
1100 | | } else { |
1101 | | ASSERT(false,"MEMCACHE Error : Bad HEAP allocation"); |
1102 | | } |
1103 | | break; |
1104 | | } |
1105 | | ////////////// |
1106 | | case READ_HEAP_ERASE: |
1107 | | { |
1108 | | if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ){ |
1109 | | HeapEntry next_entry = m_heap_directory.read(r_read_ptr.read()); |
1110 | | if( next_entry.next == r_read_ptr.read() ){ |
1111 | | r_read_fsm = READ_HEAP_LAST; |
1112 | | } else { |
1113 | | r_read_ptr = next_entry.next; |
1114 | | r_read_fsm = READ_HEAP_ERASE; |
1115 | | } |
1116 | | } else { |
1117 | | ASSERT(false,"MEMCACHE Error : Bad HEAP allocation"); |
1118 | | } |
1119 | | break; |
1120 | | } |
1121 | | ////////////// |
1122 | | case READ_HEAP_LAST: |
1123 | | { |
1124 | | if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ){ |
1125 | | HeapEntry last_entry; |
1126 | | last_entry.owner.srcid = 0; |
1127 | | #if L1_MULTI_CACHE |
1128 | | last_entry.owner.cache_id = 0; |
1129 | | #endif |
1130 | | last_entry.owner.inst = false; |
1131 | | if(m_heap_directory.is_full()){ |
1132 | | last_entry.next = r_read_ptr.read(); |
1133 | | m_heap_directory.unset_full(); |
1134 | | } else { |
1135 | | last_entry.next = r_read_next_ptr.read(); |
1136 | | } |
1137 | | m_heap_directory.write(r_read_ptr.read(),last_entry); |
1138 | | r_read_fsm = READ_RSP; |
1139 | | } else { |
1140 | | ASSERT(false,"MEMCACHE Error : Bad HEAP allocation"); |
1141 | | } |
1142 | | break; |
1143 | | } |
1144 | | ////////////// |
1145 | | case READ_RSP: // request the TGT_RSP FSM to return data |
1146 | | { |
1147 | | if( !r_read_to_tgt_rsp_req ) { |
1148 | | for ( size_t i=0 ; i<m_words ; i++ ) { |
1149 | | r_read_to_tgt_rsp_data[i] = r_read_data[i]; |
1150 | | } |
1151 | | r_read_to_tgt_rsp_word = m_x[(vci_addr_t)m_cmd_read_addr_fifo.read()]; |
1152 | | r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read(); |
1153 | | cmd_read_fifo_get = true; |
1154 | | r_read_to_tgt_rsp_req = true; |
1155 | | r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read(); |
1156 | | r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); |
1157 | | r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read(); |
1158 | | r_read_fsm = READ_IDLE; |
1159 | | } |
1160 | | break; |
| 1380 | case READ_RSP: // request the TGT_RSP FSM to return data |
| 1381 | { |
| 1382 | if( !r_read_to_tgt_rsp_req ) |
| 1383 | { |
| 1384 | for ( size_t i=0 ; i<m_words ; i++ ) r_read_to_tgt_rsp_data[i] = r_read_data[i]; |
| 1385 | r_read_to_tgt_rsp_word = m_x[(vci_addr_t)m_cmd_read_addr_fifo.read()]; |
| 1386 | r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read(); |
| 1387 | r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read(); |
| 1388 | r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); |
| 1389 | r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read(); |
| 1390 | cmd_read_fifo_get = true; |
| 1391 | r_read_to_tgt_rsp_req = true; |
| 1392 | r_read_fsm = READ_IDLE; |
| 1393 | |
| 1394 | #if DEBUG_MEMC_READ |
| 1395 | if( m_debug_read_fsm ) |
| 1396 | { |
| 1397 | std::cout << " <MEMC.READ_RSP> Request the TGT_RSP FSM to return data:" |
| 1398 | << " rsrcid = " << std::hex << m_cmd_read_srcid_fifo.read() |
| 1399 | << " / address = " << m_cmd_read_addr_fifo.read() |
| 1400 | << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; |
| 1401 | } |
| 1402 | #endif |
| 1403 | } |
| 1404 | break; |
1190 | | case READ_TRT_SET: |
1191 | | { |
1192 | | if ( r_alloc_trt_fsm.read() == ALLOC_TRT_READ ) { |
1193 | | m_transaction_tab.set(r_read_trt_index.read(), |
1194 | | true, |
1195 | | m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())], |
1196 | | m_cmd_read_srcid_fifo.read(), |
1197 | | m_cmd_read_trdid_fifo.read(), |
1198 | | m_cmd_read_pktid_fifo.read(), |
1199 | | true, |
1200 | | m_cmd_read_length_fifo.read(), |
1201 | | m_x[(vci_addr_t)(m_cmd_read_addr_fifo.read())], |
1202 | | std::vector<be_t>(m_words,0), |
1203 | | std::vector<data_t>(m_words,0)); |
1204 | | #ifdef TDEBUG |
1205 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
1206 | | std::cout << sc_time_stamp() << " " << name() << " READ_TRT_SET transaction table : " << std::endl; |
1207 | | for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) |
1208 | | m_transaction_tab.print(i); |
1209 | | } |
1210 | | #endif |
1211 | | |
1212 | | r_read_fsm = READ_XRAM_REQ; |
1213 | | } |
1214 | | break; |
1215 | | } |
1216 | | ///////////////////// |
1217 | | case READ_XRAM_REQ: |
1218 | | { |
1219 | | if( !r_read_to_ixr_cmd_req ) { |
1220 | | cmd_read_fifo_get = true; |
1221 | | r_read_to_ixr_cmd_req = true; |
1222 | | r_read_to_ixr_cmd_nline = m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; |
1223 | | r_read_to_ixr_cmd_trdid = r_read_trt_index.read(); |
1224 | | r_read_fsm = READ_IDLE; |
1225 | | } |
1226 | | break; |
| 1443 | case READ_TRT_SET: // register get transaction in TRT |
| 1444 | { |
| 1445 | if ( r_alloc_trt_fsm.read() == ALLOC_TRT_READ ) |
| 1446 | { |
| 1447 | m_transaction_tab.set(r_read_trt_index.read(), |
| 1448 | true, |
| 1449 | m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())], |
| 1450 | m_cmd_read_srcid_fifo.read(), |
| 1451 | m_cmd_read_trdid_fifo.read(), |
| 1452 | m_cmd_read_pktid_fifo.read(), |
| 1453 | true, |
| 1454 | m_cmd_read_length_fifo.read(), |
| 1455 | m_x[(vci_addr_t)(m_cmd_read_addr_fifo.read())], |
| 1456 | std::vector<be_t>(m_words,0), |
| 1457 | std::vector<data_t>(m_words,0)); |
| 1458 | #if DEBUG_MEMC_READ |
| 1459 | if( m_debug_read_fsm ) |
| 1460 | { |
| 1461 | std::cout << " <MEMC.READ_TRT_SET> Write in Transaction Table: " << std::hex |
| 1462 | << " address = " << m_cmd_read_addr_fifo.read() |
| 1463 | << " / srcid = " << m_cmd_read_srcid_fifo.read() << std::endl; |
| 1464 | } |
| 1465 | #endif |
| 1466 | r_read_fsm = READ_TRT_REQ; |
| 1467 | } |
| 1468 | break; |
| 1469 | } |
| 1470 | ////////////////// |
| 1471 | case READ_TRT_REQ: // consume the read request in the FIFO, |
| 1472 | // and send it to the ixr_cmd_fsm |
| 1473 | { |
| 1474 | if( not r_read_to_ixr_cmd_req ) |
| 1475 | { |
| 1476 | cmd_read_fifo_get = true; |
| 1477 | r_read_to_ixr_cmd_req = true; |
| 1478 | r_read_to_ixr_cmd_nline = m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; |
| 1479 | r_read_to_ixr_cmd_trdid = r_read_trt_index.read(); |
| 1480 | r_read_fsm = READ_IDLE; |
| 1481 | |
| 1482 | #if DEBUG_MEMC_READ |
| 1483 | if( m_debug_read_fsm ) |
| 1484 | { |
| 1485 | std::cout << " <MEMC.READ_TRT_REQ> Request GET transaction for address " |
| 1486 | << m_cmd_read_addr_fifo.read() << std::endl; |
| 1487 | } |
| 1488 | #endif |
| 1489 | } |
| 1490 | break; |
1294 | | case WRITE_NEXT: // copy next word of a write burst in local buffer |
1295 | | { |
1296 | | if ( m_cmd_write_addr_fifo.rok() ) { |
1297 | | m_cpt_write_cells++; |
1298 | | |
1299 | | // check that the next word is in the same cache line |
1300 | | ASSERT( (m_nline[(vci_addr_t)(r_write_address.read())] == m_nline[(vci_addr_t)(m_cmd_write_addr_fifo.read())]) |
1301 | | ,"VCI_MEM_CACHE write error in vci_mem_cache : write burst over a line" ); |
1302 | | // consume a word in the FIFO & write it in the local buffer |
1303 | | cmd_write_fifo_get=true; |
1304 | | size_t index = r_write_word_index.read() + r_write_word_count.read(); |
1305 | | r_write_be[index] = m_cmd_write_be_fifo.read(); |
1306 | | r_write_data[index] = m_cmd_write_data_fifo.read(); |
1307 | | r_write_word_count = r_write_word_count.read() + 1; |
1308 | | if( !((m_cmd_write_be_fifo.read() == 0x0)||(m_cmd_write_be_fifo.read() == 0xF)) ) |
1309 | | r_write_byte=true; |
1310 | | if ( m_cmd_write_eop_fifo.read() ) r_write_fsm = WRITE_DIR_LOCK; |
1311 | | } |
1312 | | break; |
| 1528 | case WRITE_IDLE: // copy first word of a write burst in local buffer |
| 1529 | { |
| 1530 | if ( m_cmd_write_addr_fifo.rok() ) |
| 1531 | { |
| 1532 | m_cpt_write++; |
| 1533 | m_cpt_write_cells++; |
| 1534 | |
| 1535 | // consume a word in the FIFO & write it in the local buffer |
| 1536 | cmd_write_fifo_get = true; |
| 1537 | size_t index = m_x[(vci_addr_t)(m_cmd_write_addr_fifo.read())]; |
| 1538 | |
| 1539 | r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); |
| 1540 | r_write_word_index = index; |
| 1541 | r_write_word_count = 1; |
| 1542 | r_write_data[index] = m_cmd_write_data_fifo.read(); |
| 1543 | r_write_srcid = m_cmd_write_srcid_fifo.read(); |
| 1544 | r_write_trdid = m_cmd_write_trdid_fifo.read(); |
| 1545 | r_write_pktid = m_cmd_write_pktid_fifo.read(); |
| 1546 | |
| 1547 | // initialize the be field for all words |
| 1548 | for ( size_t i=0 ; i<m_words ; i++ ) |
| 1549 | { |
| 1550 | if ( i == index ) r_write_be[i] = m_cmd_write_be_fifo.read(); |
| 1551 | else r_write_be[i] = 0x0; |
| 1552 | } |
| 1553 | |
| 1554 | if( !((m_cmd_write_be_fifo.read() == 0x0)||(m_cmd_write_be_fifo.read() == 0xF)) ) |
| 1555 | r_write_byte = true; |
| 1556 | else |
| 1557 | r_write_byte = false; |
| 1558 | |
| 1559 | if( m_cmd_write_eop_fifo.read() ) r_write_fsm = WRITE_DIR_LOCK; |
| 1560 | else r_write_fsm = WRITE_NEXT; |
| 1561 | |
| 1562 | #if DEBUG_MEMC_WRITE |
| 1563 | if( m_debug_write_fsm ) |
| 1564 | { |
| 1565 | std::cout << " <MEMC.WRITE_IDLE> Write request " |
| 1566 | << " srcid = " << std::hex << m_cmd_write_srcid_fifo.read() |
| 1567 | << " / address = " << m_cmd_write_addr_fifo.read() |
| 1568 | << " / data = " << m_cmd_write_data_fifo.read() << std::endl; |
| 1569 | } |
| 1570 | #endif |
| 1571 | } |
| 1572 | break; |
| 1573 | } |
| 1574 | //////////////// |
| 1575 | case WRITE_NEXT: // copy next word of a write burst in local buffer |
| 1576 | { |
| 1577 | if ( m_cmd_write_addr_fifo.rok() ) |
| 1578 | { |
| 1579 | |
| 1580 | #if DEBUG_MEMC_WRITE |
| 1581 | if( m_debug_write_fsm ) |
| 1582 | { |
| 1583 | std::cout << " <MEMC.WRITE_NEXT> Write another word in local buffer" << std::endl; |
| 1584 | } |
| 1585 | #endif |
| 1586 | m_cpt_write_cells++; |
| 1587 | |
| 1588 | // check that the next word is in the same cache line |
| 1589 | if ( (m_nline[(vci_addr_t)(r_write_address.read())] != |
| 1590 | m_nline[(vci_addr_t)(m_cmd_write_addr_fifo.read())]) ) |
| 1591 | { |
| 1592 | std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_NEXT state" << std::endl; |
| 1593 | std::cout << "all words in a write burst must be in same cache line" << std::endl; |
| 1594 | exit(0); |
| 1595 | } |
| 1596 | |
| 1597 | // consume a word in the FIFO & write it in the local buffer |
| 1598 | cmd_write_fifo_get=true; |
| 1599 | size_t index = r_write_word_index.read() + r_write_word_count.read(); |
| 1600 | |
| 1601 | r_write_be[index] = m_cmd_write_be_fifo.read(); |
| 1602 | r_write_data[index] = m_cmd_write_data_fifo.read(); |
| 1603 | r_write_word_count = r_write_word_count.read() + 1; |
| 1604 | |
| 1605 | if( !((m_cmd_write_be_fifo.read() == 0x0)||(m_cmd_write_be_fifo.read() == 0xF)) ) |
| 1606 | r_write_byte = true; |
| 1607 | |
| 1608 | if ( m_cmd_write_eop_fifo.read() ) r_write_fsm = WRITE_DIR_LOCK; |
| 1609 | } |
| 1610 | break; |
1328 | | r_write_copy_cache= entry.owner.cache_id; |
1329 | | #endif |
1330 | | r_write_copy_inst = entry.owner.inst; |
1331 | | r_write_count = entry.count; |
1332 | | r_write_ptr = entry.ptr; |
1333 | | r_write_way = way; |
1334 | | if( entry.is_cnt && entry.count ) { |
1335 | | r_write_fsm = WRITE_DIR_HIT_READ; |
1336 | | } else { |
1337 | | if(r_write_byte.read()) |
1338 | | r_write_fsm = WRITE_DIR_HIT_READ; |
1339 | | else r_write_fsm = WRITE_DIR_HIT; |
1340 | | } |
1341 | | } else { |
1342 | | r_write_fsm = WRITE_TRT_LOCK; |
1343 | | } |
1344 | | } |
1345 | | break; |
| 1628 | r_write_copy_cache = entry.owner.cache_id; |
| 1629 | #endif |
| 1630 | r_write_copy_inst = entry.owner.inst; |
| 1631 | r_write_count = entry.count; |
| 1632 | r_write_ptr = entry.ptr; |
| 1633 | r_write_way = way; |
| 1634 | |
| 1635 | if( entry.is_cnt && entry.count ) |
| 1636 | { |
| 1637 | r_write_fsm = WRITE_DIR_HIT_READ; |
| 1638 | } |
| 1639 | else |
| 1640 | { |
| 1641 | if (r_write_byte.read()) r_write_fsm = WRITE_DIR_HIT_READ; |
| 1642 | else r_write_fsm = WRITE_DIR_HIT; |
| 1643 | } |
| 1644 | } |
| 1645 | else // miss |
| 1646 | { |
| 1647 | r_write_fsm = WRITE_TRT_LOCK; |
| 1648 | } |
| 1649 | |
| 1650 | #if DEBUG_MEMC_WRITE |
| 1651 | if( m_debug_write_fsm ) |
| 1652 | { |
| 1653 | std::cout << " <MEMC.WRITE_DIR_LOCK> Check the directory: " |
| 1654 | << " address = " << r_write_address.read() |
| 1655 | << " hit = " << entry.valid |
| 1656 | << " count = " << std::dec << entry.count |
| 1657 | << " is_cnt = " << entry.is_cnt << std::endl; |
| 1658 | } |
| 1659 | #endif |
| 1660 | } |
| 1661 | break; |
| 1662 | } |
| 1663 | //////////////////////// |
| 1664 | case WRITE_DIR_HIT_READ: // read the cache and complete the buffer when be!=0xF |
| 1665 | { |
| 1666 | // update local buffer |
| 1667 | size_t set = m_y[(vci_addr_t)(r_write_address.read())]; |
| 1668 | size_t way = r_write_way.read(); |
| 1669 | for(size_t i=0 ; i<m_words ; i++) |
| 1670 | { |
| 1671 | data_t mask = 0; |
| 1672 | if (r_write_be[i].read() & 0x1) mask = mask | 0x000000FF; |
| 1673 | if (r_write_be[i].read() & 0x2) mask = mask | 0x0000FF00; |
| 1674 | if (r_write_be[i].read() & 0x4) mask = mask | 0x00FF0000; |
| 1675 | if (r_write_be[i].read() & 0x8) mask = mask | 0xFF000000; |
| 1676 | |
| 1677 | // complete only if mask is not null (for energy consumption) |
| 1678 | if ( r_write_be[i].read() || r_write_is_cnt.read() ) |
| 1679 | { |
| 1680 | r_write_data[i] = (r_write_data[i].read() & mask) | |
| 1681 | (m_cache_data[way][set][i] & ~mask); |
| 1682 | } |
| 1683 | } // end for |
| 1684 | |
| 1685 | // test if a coherence broadcast is required |
| 1686 | if( r_write_is_cnt.read() && r_write_count.read() ) r_write_fsm = WRITE_TRT_WRITE_LOCK; |
| 1687 | else r_write_fsm = WRITE_DIR_HIT; |
| 1688 | |
| 1689 | #if DEBUG_MEMC_WRITE |
| 1690 | if( m_debug_write_fsm ) |
| 1691 | { |
| 1692 | if( r_write_is_cnt.read() && r_write_count.read() ) |
| 1693 | { |
| 1694 | std::cout << " <MEMC.WRITE_DIR_HIT_READ> Read the cache to complete local buffer /" |
| 1695 | << " coherence broadcast required" << std::endl; |
| 1696 | } |
| 1697 | else |
| 1698 | { |
| 1699 | std::cout << " <MEMC.WRITE_DIR_HIT_READ> Read the cache to complete local buffer" |
| 1700 | << std::endl; |
| 1701 | } |
| 1702 | } |
| 1703 | #endif |
| 1704 | break; |
1399 | | bool no_update = (r_write_count.read()==0) || ( owner && (r_write_count.read()==1)); |
1400 | | |
1401 | | if( no_update ) // no update |
1402 | | { |
1403 | | // write data in cache |
1404 | | for(size_t i=0 ; i<m_words ; i++) { |
1405 | | if ( r_write_be[i].read() ) { |
1406 | | m_cache_data[way][set][i] = r_write_data[i].read(); |
1407 | | } |
1408 | | } // end for |
1409 | | } |
1410 | | |
1411 | | size_t count_signal = r_write_count.read(); |
1412 | | if(owner){ |
1413 | | count_signal = count_signal - 1; |
1414 | | } |
1415 | | r_write_count = count_signal; |
1416 | | r_write_to_dec = false; |
1417 | | |
1418 | | if ( no_update ) r_write_fsm = WRITE_RSP; |
1419 | | else |
| 1736 | // no_update is true when there is no need for coherence transaction |
| 1737 | bool no_update = (r_write_count.read()==0) || ( owner && (r_write_count.read()==1)); |
| 1738 | |
| 1739 | // write data in the cache if no transaction on the coherence network |
| 1740 | if( no_update ) |
| 1741 | { |
| 1742 | for(size_t i=0 ; i<m_words ; i++) |
| 1743 | { |
| 1744 | if ( r_write_be[i].read() ) m_cache_data[way][set][i] = r_write_data[i].read(); |
| 1745 | } |
| 1746 | } |
| 1747 | |
| 1748 | if ( owner ) r_write_count = r_write_count.read() - 1; |
| 1749 | r_write_to_dec = false; |
| 1750 | |
| 1751 | if ( no_update ) // Write transaction completed |
| 1752 | { |
| 1753 | r_write_fsm = WRITE_RSP; |
| 1754 | } |
| 1755 | else // coherence update required |
| 1756 | { |
| 1757 | if( !r_write_to_init_cmd_multi_req.read() && |
| 1758 | !r_write_to_init_cmd_brdcast_req.read() ) r_write_fsm = WRITE_UPT_LOCK; |
| 1759 | else r_write_fsm = WRITE_WAIT; |
| 1760 | } |
| 1761 | |
| 1762 | #if DEBUG_MEMC_WRITE |
| 1763 | if( m_debug_write_fsm ) |
| 1764 | { |
| 1765 | if ( no_update ) |
| 1766 | { |
| 1767 | std::cout << " <MEMC.WRITE_DIR_HIT> Write into cache / No coherence transaction" |
| 1768 | << std::endl; |
| 1769 | } |
| 1770 | else |
| 1771 | { |
| 1772 | std::cout << " <MEMC.WRITE_DIR_HIT> Coherence update required:" |
| 1773 | << " is_cnt = " << r_write_is_cnt.read() |
| 1774 | << " count = " << std::dec << r_write_count.read() |
| 1775 | << std::endl; |
| 1776 | } |
| 1777 | } |
| 1778 | #endif |
| 1779 | break; |
| 1780 | } |
| 1781 | ///////////////////// |
| 1782 | case WRITE_UPT_LOCK: // Try to register the update request in UPT |
| 1783 | { |
| 1784 | if ( r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) |
| 1785 | { |
| 1786 | bool wok = false; |
| 1787 | size_t index = 0; |
| 1788 | size_t srcid = r_write_srcid.read(); |
| 1789 | size_t trdid = r_write_trdid.read(); |
| 1790 | size_t pktid = r_write_pktid.read(); |
| 1791 | addr_t nline = m_nline[(vci_addr_t)(r_write_address.read())]; |
| 1792 | size_t nb_copies = r_write_count.read(); |
| 1793 | size_t set = m_y[(vci_addr_t)(r_write_address.read())]; |
| 1794 | size_t way = r_write_way.read(); |
| 1795 | |
| 1796 | wok = m_update_tab.set(true, // it's an update transaction |
| 1797 | false, // it's not a broadcast |
| 1798 | true, // it needs a response |
| 1799 | srcid, |
| 1800 | trdid, |
| 1801 | pktid, |
| 1802 | nline, |
| 1803 | nb_copies, |
| 1804 | index); |
| 1805 | if ( wok ) // write data in cache |
| 1806 | { |
| 1807 | for(size_t i=0 ; i<m_words ; i++) |
| 1808 | { |
| 1809 | if ( r_write_be[i].read() ) m_cache_data[way][set][i] = r_write_data[i].read(); |
| 1810 | } |
| 1811 | } |
| 1812 | |
| 1813 | #if DEBUG_MEMC_WRITE |
| 1814 | if( m_debug_write_fsm ) |
| 1815 | { |
| 1816 | if ( wok ) |
| 1817 | { |
| 1818 | std::cout << " <MEMC.WRITE_UPT_LOCK> Register the multicast update in UPT / " |
| 1819 | << " nb_copies = " << r_write_count.read() << std::endl; |
| 1820 | //m_update_tab.print(); |
| 1821 | } |
| 1822 | } |
| 1823 | #endif |
| 1824 | r_write_upt_index = index; |
| 1825 | // releases the lock protecting the Update Table and the Directory if no entry... |
| 1826 | if ( wok ) r_write_fsm = WRITE_HEAP_LOCK; |
| 1827 | else r_write_fsm = WRITE_WAIT; |
| 1828 | } |
| 1829 | break; |
| 1830 | } |
| 1831 | ///////////////////// |
| 1832 | case WRITE_HEAP_LOCK: |
| 1833 | { |
| 1834 | if( r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE ) |
| 1835 | { |
| 1836 | |
| 1837 | #if DEBUG_MEMC_WRITE |
| 1838 | if( m_debug_write_fsm ) |
| 1839 | { |
| 1840 | std::cout << " <MEMC.WRITE_HEAP_LOCK> Get acces to the HEAP" << std::endl; |
| 1841 | } |
| 1842 | #endif |
| 1843 | r_write_fsm = WRITE_UPT_REQ; |
| 1844 | } |
| 1845 | break; |
| 1846 | } |
| 1847 | ////////////////// |
| 1848 | case WRITE_UPT_REQ: |
| 1849 | { |
| 1850 | if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_WRITE ) |
| 1851 | { |
| 1852 | std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_UPT_REQ state" << std::endl; |
| 1853 | std::cout << "bad HEAP allocation" << std::endl; |
| 1854 | exit(0); |
| 1855 | } |
| 1856 | |
1421 | | !r_write_to_init_cmd_brdcast_req.read() ) |
1422 | | r_write_fsm = WRITE_UPT_LOCK; |
1423 | | else |
1424 | | r_write_fsm = WRITE_WAIT; |
1425 | | break; |
| 1858 | !r_write_to_init_cmd_brdcast_req.read() ) |
| 1859 | { |
| 1860 | r_write_to_init_cmd_brdcast_req = false; |
| 1861 | r_write_to_init_cmd_trdid = r_write_upt_index.read(); |
| 1862 | r_write_to_init_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; |
| 1863 | r_write_to_init_cmd_index = r_write_word_index.read(); |
| 1864 | r_write_to_init_cmd_count = r_write_word_count.read(); |
| 1865 | |
| 1866 | for(size_t i=0; i<m_words ; i++) |
| 1867 | { |
| 1868 | r_write_to_init_cmd_be[i]=r_write_be[i].read(); |
| 1869 | } |
| 1870 | |
| 1871 | size_t min = r_write_word_index.read(); |
| 1872 | size_t max = r_write_word_index.read() + r_write_word_count.read(); |
| 1873 | for (size_t i=min ; i<max ; i++) r_write_to_init_cmd_data[i] = r_write_data[i]; |
| 1874 | |
| 1875 | if( (r_write_copy.read() != r_write_srcid.read()) or |
| 1876 | #if L1_MULTI_CACHE |
| 1877 | (r_write_copy_cache.read() != r_write_pktid.read()) or |
| 1878 | #endif |
| 1879 | r_write_copy_inst.read() ) |
| 1880 | { |
| 1881 | // We put the first copy in the fifo |
| 1882 | write_to_init_cmd_fifo_put = true; |
| 1883 | write_to_init_cmd_fifo_inst = r_write_copy_inst.read(); |
| 1884 | write_to_init_cmd_fifo_srcid = r_write_copy.read(); |
| 1885 | #if L1_MULTI_CACHE |
| 1886 | write_to_init_cmd_fifo_cache_id= r_write_copy_cache.read(); |
| 1887 | #endif |
| 1888 | if(r_write_count.read() == 1) |
| 1889 | { |
| 1890 | r_write_fsm = WRITE_IDLE; |
| 1891 | r_write_to_init_cmd_multi_req = true; |
| 1892 | } |
| 1893 | else |
| 1894 | { |
| 1895 | r_write_fsm = WRITE_UPDATE; |
| 1896 | } |
| 1897 | } |
| 1898 | else |
| 1899 | { |
| 1900 | r_write_fsm = WRITE_UPDATE; |
| 1901 | } |
| 1902 | } |
| 1903 | break; |
| 1904 | } |
| 1905 | ////////////////// |
| 1906 | case WRITE_UPDATE: // send a multi-update request to INIT_CMD fsm |
| 1907 | { |
| 1908 | if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_WRITE ) |
| 1909 | { |
| 1910 | std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_UPDATE state" << std::endl; |
| 1911 | std::cout << "bad HEAP allocation" << std::endl; |
| 1912 | exit(0); |
| 1913 | } |
| 1914 | |
| 1915 | HeapEntry entry = m_heap.read(r_write_ptr.read()); |
| 1916 | write_to_init_cmd_fifo_inst = entry.owner.inst; |
| 1917 | write_to_init_cmd_fifo_srcid = entry.owner.srcid; |
| 1918 | #if L1_MULTI_CACHE |
| 1919 | write_to_init_cmd_fifo_cache_id = entry.owner.cache_id; |
| 1920 | #endif |
| 1921 | |
| 1922 | bool dec_upt_counter = r_write_to_dec.read(); |
| 1923 | if( (entry.owner.srcid != r_write_srcid.read()) or |
| 1924 | #if L1_MULTI_CACHE |
| 1925 | (entry.owner.cache_id != r_write_pktid.read()) or |
| 1926 | #endif |
| 1927 | entry.owner.inst) |
| 1928 | { |
| 1929 | write_to_init_cmd_fifo_put = true; |
| 1930 | } |
| 1931 | else |
| 1932 | { |
| 1933 | dec_upt_counter = true; |
| 1934 | } |
| 1935 | r_write_to_dec = dec_upt_counter; |
| 1936 | |
| 1937 | if( m_write_to_init_cmd_inst_fifo.wok() ) |
| 1938 | { |
| 1939 | r_write_ptr = entry.next; |
| 1940 | if( entry.next == r_write_ptr.read() ) // last copy |
| 1941 | { |
| 1942 | r_write_to_init_cmd_multi_req = true; |
| 1943 | if(dec_upt_counter) r_write_fsm = WRITE_UPT_DEC; |
| 1944 | else r_write_fsm = WRITE_IDLE; |
| 1945 | } |
| 1946 | else |
| 1947 | { |
| 1948 | r_write_fsm = WRITE_UPDATE; |
| 1949 | } |
| 1950 | } |
| 1951 | else |
| 1952 | { |
| 1953 | r_write_fsm = WRITE_UPDATE; |
| 1954 | } |
| 1955 | break; |
| 1956 | } |
| 1957 | ////////////////// |
| 1958 | case WRITE_UPT_DEC: // Post another coherence update request |
| 1959 | { |
| 1960 | if ( !r_write_to_init_rsp_req.read() ) |
| 1961 | { |
| 1962 | r_write_to_init_rsp_req = true; |
| 1963 | r_write_to_init_rsp_upt_index = r_write_upt_index.read(); |
| 1964 | r_write_fsm = WRITE_IDLE; |
| 1965 | } |
| 1966 | break; |
| 1967 | } |
| 1968 | /////////////// |
| 1969 | case WRITE_RSP: // Post a request to TGT_RSP FSM to acknowledge the write |
| 1970 | { |
| 1971 | if ( !r_write_to_tgt_rsp_req.read() ) |
| 1972 | { |
| 1973 | |
| 1974 | #if DEBUG_MEMC_WRITE |
| 1975 | if( m_debug_write_fsm ) |
| 1976 | { |
| 1977 | std::cout << " <MEMC.WRITE_RSP> Post a request to TGT_RSP FSM: rsrcid = " |
| 1978 | << std::hex << r_write_srcid.read() << std:: endl; |
| 1979 | } |
| 1980 | #endif |
| 1981 | r_write_to_tgt_rsp_req = true; |
| 1982 | r_write_to_tgt_rsp_srcid = r_write_srcid.read(); |
| 1983 | r_write_to_tgt_rsp_trdid = r_write_trdid.read(); |
| 1984 | r_write_to_tgt_rsp_pktid = r_write_pktid.read(); |
| 1985 | r_write_fsm = WRITE_IDLE; |
| 1986 | } |
| 1987 | break; |
| 1988 | } |
| 1989 | //////////////////// |
| 1990 | case WRITE_TRT_LOCK: // Miss : check Transaction Table |
| 1991 | { |
| 1992 | if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) |
| 1993 | { |
| 1994 | |
| 1995 | #if DEBUG_MEMC_WRITE |
| 1996 | if( m_debug_write_fsm ) |
| 1997 | { |
| 1998 | std::cout << " <MEMC.WRITE_TRT_LOCK> Check the TRT" << std::endl; |
| 1999 | } |
| 2000 | #endif |
| 2001 | size_t hit_index = 0; |
| 2002 | size_t wok_index = 0; |
| 2003 | vci_addr_t addr = (vci_addr_t)r_write_address.read(); |
| 2004 | bool hit_read = m_transaction_tab.hit_read(m_nline[addr], hit_index); |
| 2005 | bool hit_write = m_transaction_tab.hit_write(m_nline[addr]); |
| 2006 | bool wok = !m_transaction_tab.full(wok_index); |
| 2007 | |
| 2008 | if ( hit_read ) // register the modified data in TRT |
| 2009 | { |
| 2010 | r_write_trt_index = hit_index; |
| 2011 | r_write_fsm = WRITE_TRT_DATA; |
| 2012 | m_cpt_write_miss++; |
| 2013 | } |
| 2014 | else if ( wok && !hit_write ) // set a new entry in TRT |
| 2015 | { |
| 2016 | r_write_trt_index = wok_index; |
| 2017 | r_write_fsm = WRITE_TRT_SET; |
| 2018 | m_cpt_write_miss++; |
| 2019 | } |
| 2020 | else // wait an empty entry in TRT |
| 2021 | { |
| 2022 | r_write_fsm = WRITE_WAIT; |
| 2023 | m_cpt_trt_full++; |
| 2024 | } |
| 2025 | } |
| 2026 | break; |
| 2027 | } |
| 2028 | //////////////// |
| 2029 | case WRITE_WAIT: // release the locks protecting the shared ressources |
| 2030 | { |
| 2031 | |
| 2032 | #if DEBUG_MEMC_WRITE |
| 2033 | if( m_debug_write_fsm ) |
| 2034 | { |
| 2035 | std::cout << " <MEMC.WRITE_WAIT> Releases the locks before retry" << std::endl; |
| 2036 | } |
| 2037 | #endif |
| 2038 | r_write_fsm = WRITE_DIR_LOCK; |
| 2039 | break; |
| 2040 | } |
| 2041 | /////////////////// |
| 2042 | case WRITE_TRT_SET: // register a new transaction in TRT (Write Buffer) |
| 2043 | { |
| 2044 | if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) |
| 2045 | { |
| 2046 | std::vector<be_t> be_vector; |
| 2047 | std::vector<data_t> data_vector; |
| 2048 | be_vector.clear(); |
| 2049 | data_vector.clear(); |
| 2050 | for ( size_t i=0; i<m_words; i++ ) |
| 2051 | { |
| 2052 | be_vector.push_back(r_write_be[i]); |
| 2053 | data_vector.push_back(r_write_data[i]); |
| 2054 | } |
| 2055 | m_transaction_tab.set(r_write_trt_index.read(), |
| 2056 | true, // read request to XRAM |
| 2057 | m_nline[(vci_addr_t)(r_write_address.read())], |
| 2058 | r_write_srcid.read(), |
| 2059 | r_write_trdid.read(), |
| 2060 | r_write_pktid.read(), |
| 2061 | false, // not a processor read |
| 2062 | 0, // not a single word |
| 2063 | 0, // word index |
| 2064 | be_vector, |
| 2065 | data_vector); |
| 2066 | r_write_fsm = WRITE_XRAM_REQ; |
| 2067 | |
| 2068 | #if DEBUG_MEMC_WRITE |
| 2069 | if( m_debug_write_fsm ) |
| 2070 | { |
| 2071 | std::cout << " <MEMC.WRITE_TRT_SET> Set a new entry in TRT" << std::endl; |
| 2072 | } |
| 2073 | #endif |
| 2074 | } |
| 2075 | break; |
| 2076 | } |
| 2077 | //////////////////// |
| 2078 | case WRITE_TRT_DATA: // update an entry in TRT (Write Buffer) |
| 2079 | { |
| 2080 | if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) |
| 2081 | { |
| 2082 | std::vector<be_t> be_vector; |
| 2083 | std::vector<data_t> data_vector; |
| 2084 | be_vector.clear(); |
| 2085 | data_vector.clear(); |
| 2086 | for ( size_t i=0; i<m_words; i++ ) |
| 2087 | { |
| 2088 | be_vector.push_back(r_write_be[i]); |
| 2089 | data_vector.push_back(r_write_data[i]); |
| 2090 | } |
| 2091 | m_transaction_tab.write_data_mask(r_write_trt_index.read(), |
| 2092 | be_vector, |
| 2093 | data_vector); |
| 2094 | r_write_fsm = WRITE_RSP; |
| 2095 | |
| 2096 | #if DEBUG_MEMC_WRITE |
| 2097 | if( m_debug_write_fsm ) |
| 2098 | { |
| 2099 | std::cout << " <MEMC.WRITE_TRT_DATA> Modify an existing entry in TRT" << std::endl; |
| 2100 | m_transaction_tab.print( r_write_trt_index.read() ); |
| 2101 | } |
| 2102 | #endif |
| 2103 | } |
| 2104 | break; |
| 2105 | } |
| 2106 | //////////////////// |
| 2107 | case WRITE_XRAM_REQ: // send a request to IXR_CMD FSM |
| 2108 | { |
| 2109 | if ( !r_write_to_ixr_cmd_req ) |
| 2110 | { |
| 2111 | r_write_to_ixr_cmd_req = true; |
| 2112 | r_write_to_ixr_cmd_write = false; |
| 2113 | r_write_to_ixr_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; |
| 2114 | r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); |
| 2115 | r_write_fsm = WRITE_RSP; |
| 2116 | |
| 2117 | #if DEBUG_MEMC_WRITE |
| 2118 | if( m_debug_write_fsm ) |
| 2119 | { |
| 2120 | std::cout << " <MEMC.WRITE_XRAM_REQ> Post a request to the IXR_CMD FSM" << std::endl; |
| 2121 | } |
| 2122 | #endif |
| 2123 | } |
| 2124 | break; |
| 2125 | } |
| 2126 | ////////////////////////// |
| 2127 | case WRITE_TRT_WRITE_LOCK: // Check TRT not full |
| 2128 | { |
| 2129 | if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) |
| 2130 | { |
| 2131 | size_t wok_index = 0; |
| 2132 | bool wok = !m_transaction_tab.full( wok_index ); |
| 2133 | if ( wok ) // set a new entry in TRT |
| 2134 | { |
| 2135 | r_write_trt_index = wok_index; |
| 2136 | r_write_fsm = WRITE_INVAL_LOCK; |
| 2137 | } |
| 2138 | else // wait an empty entry in TRT |
| 2139 | { |
| 2140 | r_write_fsm = WRITE_WAIT; |
| 2141 | } |
| 2142 | |
| 2143 | #if DEBUG_MEMC_WRITE |
| 2144 | if( m_debug_write_fsm ) |
| 2145 | { |
| 2146 | std::cout << " <MEMC.WRITE_TRT_WRITE_LOCK> Check TRT : wok = " |
| 2147 | << wok << " index = " << wok_index << std::endl; |
| 2148 | } |
| 2149 | #endif |
| 2150 | } |
| 2151 | break; |
| 2152 | } |
| 2153 | ////////////////////// |
| 2154 | case WRITE_INVAL_LOCK: |
| 2155 | { |
| 2156 | if ( r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) |
| 2157 | { |
| 2158 | bool wok = false; |
| 2159 | size_t index = 0; |
| 2160 | size_t srcid = r_write_srcid.read(); |
| 2161 | size_t trdid = r_write_trdid.read(); |
| 2162 | size_t pktid = r_write_pktid.read(); |
| 2163 | addr_t nline = m_nline[(vci_addr_t)(r_write_address.read())]; |
| 2164 | size_t nb_copies = r_write_count.read(); |
| 2165 | |
| 2166 | wok =m_update_tab.set(false, // it's an inval transaction |
| 2167 | true, // it's a broadcast |
| 2168 | true, // it needs a response |
| 2169 | srcid, |
| 2170 | trdid, |
| 2171 | pktid, |
| 2172 | nline, |
| 2173 | nb_copies, |
| 2174 | index); |
| 2175 | |
| 2176 | #if DEBUG_MEMC_WRITE |
| 2177 | if( m_debug_write_fsm ) |
| 2178 | { |
| 2179 | if ( wok ) |
| 2180 | { |
| 2181 | std::cout << " <MEMC.WRITE_INVAL_LOCK> Register the broadcast inval in UPT / " |
| 2182 | << " nb_copies = " << r_write_count.read() << std::endl; |
| 2183 | //m_update_tab.print(); |
| 2184 | } |
| 2185 | } |
| 2186 | #endif |
| 2187 | r_write_upt_index = index; |
| 2188 | |
| 2189 | if ( wok ) r_write_fsm = WRITE_DIR_INVAL; |
| 2190 | else r_write_fsm = WRITE_WAIT; |
| 2191 | } |
| 2192 | break; |
1428 | | case WRITE_UPT_LOCK: // Try to register the request in Update Table |
1429 | | { |
1430 | | |
1431 | | if ( r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) { |
1432 | | bool wok = false; |
1433 | | size_t index = 0; |
1434 | | size_t srcid = r_write_srcid.read(); |
1435 | | size_t trdid = r_write_trdid.read(); |
1436 | | size_t pktid = r_write_pktid.read(); |
1437 | | addr_t nline = m_nline[(vci_addr_t)(r_write_address.read())]; |
1438 | | size_t nb_copies = r_write_count.read(); |
1439 | | size_t set = m_y[(vci_addr_t)(r_write_address.read())]; |
1440 | | size_t way = r_write_way.read(); |
1441 | | |
1442 | | wok =m_update_tab.set(true, // it's an update transaction |
1443 | | false, // it's not a broadcast |
1444 | | true, // it needs a response |
1445 | | srcid, |
1446 | | trdid, |
1447 | | pktid, |
1448 | | nline, |
1449 | | nb_copies, |
1450 | | index); |
1451 | | if(wok){ |
1452 | | // write data in cache |
1453 | | for(size_t i=0 ; i<m_words ; i++) { |
1454 | | if ( r_write_be[i].read() ) { |
1455 | | m_cache_data[way][set][i] = r_write_data[i].read(); |
1456 | | } |
1457 | | } // end for |
1458 | | } |
1459 | | #ifdef IDEBUG |
1460 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
1461 | | if(wok){ |
1462 | | std::cout << sc_time_stamp() << " " << name() << " WRITE_UPT_LOCK update table : " << std::endl; |
1463 | | m_update_tab.print(); |
1464 | | } |
1465 | | } |
1466 | | #endif |
1467 | | r_write_upt_index = index; |
1468 | | // releases the lock protecting the Update Table and the Directory if no entry... |
1469 | | if ( wok ) r_write_fsm = WRITE_HEAP_LOCK; |
1470 | | else r_write_fsm = WRITE_WAIT; |
1471 | | } |
1472 | | break; |
1473 | | } |
1474 | | ////////////////// |
1475 | | case WRITE_HEAP_LOCK: |
1476 | | { |
1477 | | if( r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE ){ |
1478 | | r_write_fsm = WRITE_UPT_REQ; |
1479 | | } |
1480 | | break; |
1481 | | } |
1482 | | ////////////////// |
1483 | | case WRITE_UPT_REQ: |
1484 | | { |
1485 | | ASSERT( (r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE) |
1486 | | ,"MemCache ERROR : bad HEAP allocation"); |
1487 | | if( !r_write_to_init_cmd_multi_req.read() && |
1488 | | !r_write_to_init_cmd_brdcast_req.read() ){ |
1489 | | r_write_to_init_cmd_brdcast_req = false; |
1490 | | r_write_to_init_cmd_trdid = r_write_upt_index.read(); |
1491 | | r_write_to_init_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; |
1492 | | r_write_to_init_cmd_index = r_write_word_index.read(); |
1493 | | r_write_to_init_cmd_count = r_write_word_count.read(); |
1494 | | |
1495 | | for(size_t i=0; i<m_words ; i++){ |
1496 | | r_write_to_init_cmd_be[i]=r_write_be[i].read(); |
1497 | | } |
1498 | | |
1499 | | size_t min = r_write_word_index.read(); |
1500 | | size_t max = r_write_word_index.read() + r_write_word_count.read(); |
1501 | | for (size_t i=min ; i<max ; i++) { |
1502 | | r_write_to_init_cmd_data[i] = r_write_data[i]; |
1503 | | } |
1504 | | |
1505 | | if((r_write_copy.read() != r_write_srcid.read()) or |
1506 | | #if L1_MULTI_CACHE |
1507 | | (r_write_copy_cache.read() != r_write_pktid.read()) or |
1508 | | #endif |
1509 | | r_write_copy_inst.read() ) { |
1510 | | // We put the first copy in the fifo |
1511 | | write_to_init_cmd_fifo_put = true; |
1512 | | write_to_init_cmd_fifo_inst = r_write_copy_inst.read(); |
1513 | | write_to_init_cmd_fifo_srcid = r_write_copy.read(); |
1514 | | #if L1_MULTI_CACHE |
1515 | | write_to_init_cmd_fifo_cache_id= r_write_copy_cache.read(); |
1516 | | #endif |
1517 | | if(r_write_count.read() == 1){ |
1518 | | r_write_fsm = WRITE_IDLE; |
1519 | | r_write_to_init_cmd_multi_req = true; |
1520 | | } else { |
1521 | | r_write_fsm = WRITE_UPDATE; |
1522 | | } |
1523 | | } else { |
1524 | | r_write_fsm = WRITE_UPDATE; |
1525 | | } |
1526 | | } |
1527 | | break; |
1528 | | } |
1529 | | ////////////////// |
1530 | | case WRITE_UPDATE: // send a multi-update request to INIT_CMD fsm |
1531 | | { |
1532 | | ASSERT( (r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE) |
1533 | | ,"MemCache ERROR : bad HEAP allocation"); |
1534 | | HeapEntry entry = m_heap_directory.read(r_write_ptr.read()); |
1535 | | write_to_init_cmd_fifo_inst = entry.owner.inst; |
1536 | | write_to_init_cmd_fifo_srcid = entry.owner.srcid; |
1537 | | #if L1_MULTI_CACHE |
1538 | | write_to_init_cmd_fifo_cache_id = entry.owner.cache_id; |
1539 | | #endif |
1540 | | |
1541 | | bool dec_upt_counter = r_write_to_dec.read(); |
1542 | | if( (entry.owner.srcid != r_write_srcid.read()) or |
1543 | | #if L1_MULTI_CACHE |
1544 | | (entry.owner.cache_id != r_write_pktid.read()) or |
1545 | | #endif |
1546 | | entry.owner.inst){ |
1547 | | write_to_init_cmd_fifo_put = true; |
1548 | | } else { |
1549 | | dec_upt_counter = true; |
1550 | | } |
1551 | | r_write_to_dec = dec_upt_counter; |
1552 | | |
1553 | | if( m_write_to_init_cmd_inst_fifo.wok() ){ |
1554 | | r_write_ptr = entry.next; |
1555 | | if( entry.next == r_write_ptr.read() ) { // last copy |
1556 | | r_write_to_init_cmd_multi_req = true; |
1557 | | if(dec_upt_counter){ |
1558 | | r_write_fsm = WRITE_UPT_DEC; |
1559 | | } else { |
1560 | | r_write_fsm = WRITE_IDLE; |
1561 | | } |
1562 | | } else { |
1563 | | r_write_fsm = WRITE_UPDATE; |
1564 | | } |
1565 | | } else { |
1566 | | r_write_fsm = WRITE_UPDATE; |
1567 | | } |
1568 | | break; |
1569 | | } |
1570 | | ////////////////// |
1571 | | case WRITE_UPT_DEC: |
1572 | | { |
1573 | | if(!r_write_to_init_rsp_req.read()){ |
1574 | | r_write_to_init_rsp_req = true; |
1575 | | r_write_to_init_rsp_upt_index = r_write_upt_index.read(); |
1576 | | r_write_fsm = WRITE_IDLE; |
1577 | | } |
1578 | | break; |
1579 | | } |
1580 | | /////////////// |
1581 | | case WRITE_RSP: // send a request to TGT_RSP FSM to acknowledge the write |
1582 | | { |
1583 | | if ( !r_write_to_tgt_rsp_req.read() ) { |
1584 | | |
1585 | | PRINTF(" * <MEM_CACHE.WRITE> Request from %d.%d (%d)\n",(uint32_t)r_write_srcid.read(), (uint32_t)r_write_trdid.read(), (uint32_t)r_write_pktid.read()); |
1586 | | |
1587 | | r_write_to_tgt_rsp_req = true; |
1588 | | r_write_to_tgt_rsp_srcid = r_write_srcid.read(); |
1589 | | r_write_to_tgt_rsp_trdid = r_write_trdid.read(); |
1590 | | r_write_to_tgt_rsp_pktid = r_write_pktid.read(); |
1591 | | r_write_fsm = WRITE_IDLE; |
1592 | | } |
1593 | | break; |
1594 | | } |
1595 | | //////////////////// |
1596 | | case WRITE_TRT_LOCK: // Miss : check Transaction Table |
1597 | | { |
1598 | | if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) { |
1599 | | #ifdef TDEBUG |
1600 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
1601 | | std::cout << sc_time_stamp() << " " << name() << " READ_TRT_LOCK " << std::endl; |
1602 | | } |
1603 | | #endif |
1604 | | size_t hit_index = 0; |
1605 | | size_t wok_index = 0; |
1606 | | bool hit_read = m_transaction_tab.hit_read(m_nline[(vci_addr_t)(r_write_address.read())],hit_index); |
1607 | | bool hit_write = m_transaction_tab.hit_write(m_nline[(vci_addr_t)(r_write_address.read())]); |
1608 | | bool wok = !m_transaction_tab.full(wok_index); |
1609 | | if ( hit_read ) { // register the modified data in TRT |
1610 | | r_write_trt_index = hit_index; |
1611 | | r_write_fsm = WRITE_TRT_DATA; |
1612 | | m_cpt_write_miss++; |
1613 | | } else if ( wok && !hit_write ) { // set a new entry in TRT |
1614 | | r_write_trt_index = wok_index; |
1615 | | r_write_fsm = WRITE_TRT_SET; |
1616 | | m_cpt_write_miss++; |
1617 | | } else { // wait an empty entry in TRT |
1618 | | r_write_fsm = WRITE_WAIT; |
1619 | | m_cpt_trt_full++; |
1620 | | } |
1621 | | } |
1622 | | break; |
1623 | | } |
1624 | | //////////////////// |
1625 | | case WRITE_WAIT: // release the lock protecting TRT |
1626 | | { |
1627 | | r_write_fsm = WRITE_DIR_LOCK; |
1628 | | break; |
1629 | | } |
1630 | | /////////////////// |
1631 | | case WRITE_TRT_SET: // register a new transaction in TRT (Write Buffer) |
1632 | | { |
1633 | | if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) |
1634 | | { |
1635 | | std::vector<be_t> be_vector; |
1636 | | std::vector<data_t> data_vector; |
1637 | | be_vector.clear(); |
1638 | | data_vector.clear(); |
1639 | | for ( size_t i=0; i<m_words; i++ ) |
1640 | | { |
1641 | | be_vector.push_back(r_write_be[i]); |
1642 | | data_vector.push_back(r_write_data[i]); |
1643 | | } |
| 2195 | case WRITE_DIR_INVAL: // Register a put transaction to XRAM in TRT |
| 2196 | // and invalidate the line in directory |
| 2197 | { |
| 2198 | if ( (r_alloc_trt_fsm.read() != ALLOC_TRT_WRITE ) || |
| 2199 | (r_alloc_upt_fsm.read() != ALLOC_UPT_WRITE ) || |
| 2200 | (r_alloc_dir_fsm.read() != ALLOC_DIR_WRITE ) ) |
| 2201 | { |
| 2202 | std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_DIR_INVAL state" << std::endl; |
| 2203 | std::cout << "bad TRT, DIR, or UPT allocation" << std::endl; |
| 2204 | exit(0); |
| 2205 | } |
| 2206 | |
| 2207 | // register a write request to XRAM in TRT |
1645 | | true, // read request to XRAM |
1646 | | m_nline[(vci_addr_t)(r_write_address.read())], |
1647 | | r_write_srcid.read(), |
1648 | | r_write_trdid.read(), |
1649 | | r_write_pktid.read(), |
1650 | | false, // not a processor read |
1651 | | 0, // not a single word |
1652 | | 0, // word index |
1653 | | be_vector, |
1654 | | data_vector); |
1655 | | #ifdef TDEBUG |
1656 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
1657 | | std::cout << sc_time_stamp() << " " << name() << " WRITE_TRT_SET transaction table : " << std::endl; |
1658 | | for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) |
1659 | | m_transaction_tab.print(i); |
1660 | | } |
1661 | | #endif |
1662 | | |
1663 | | r_write_fsm = WRITE_XRAM_REQ; |
1664 | | } |
1665 | | break; |
1666 | | } |
1667 | | /////////////////// |
1668 | | case WRITE_TRT_DATA: // update an entry in TRT (Write Buffer) |
1669 | | { |
1670 | | if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) { |
1671 | | std::vector<be_t> be_vector; |
1672 | | std::vector<data_t> data_vector; |
1673 | | be_vector.clear(); |
1674 | | data_vector.clear(); |
1675 | | for ( size_t i=0; i<m_words; i++ ) { |
1676 | | be_vector.push_back(r_write_be[i]); |
1677 | | data_vector.push_back(r_write_data[i]); |
1678 | | } |
1679 | | m_transaction_tab.write_data_mask(r_write_trt_index.read(), |
1680 | | be_vector, |
1681 | | data_vector); |
1682 | | r_write_fsm = WRITE_RSP; |
1683 | | #ifdef TDEBUG |
1684 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
1685 | | std::cout << sc_time_stamp() << " " << name() << " WRITE_TRT_DATA transaction table : " << std::endl; |
1686 | | for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) |
1687 | | m_transaction_tab.print(i); |
1688 | | } |
1689 | | #endif |
1690 | | |
1691 | | } |
1692 | | break; |
1693 | | } |
1694 | | //////////////////// |
1695 | | case WRITE_XRAM_REQ: // send a request to IXR_CMD FSM |
1696 | | { |
1697 | | |
1698 | | if ( !r_write_to_ixr_cmd_req ) { |
1699 | | r_write_to_ixr_cmd_req = true; |
1700 | | r_write_to_ixr_cmd_write = false; |
1701 | | r_write_to_ixr_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; |
1702 | | r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); |
1703 | | r_write_fsm = WRITE_RSP; |
1704 | | } |
1705 | | break; |
1706 | | } |
1707 | | //////////////////// |
1708 | | case WRITE_TRT_WRITE_LOCK: |
1709 | | { |
1710 | | if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) { |
1711 | | size_t wok_index = 0; |
1712 | | bool wok = !m_transaction_tab.full(wok_index); |
1713 | | if ( wok ) { // set a new entry in TRT |
1714 | | r_write_trt_index = wok_index; |
1715 | | r_write_fsm = WRITE_INVAL_LOCK; |
1716 | | } else { // wait an empty entry in TRT |
1717 | | r_write_fsm = WRITE_WAIT; |
1718 | | } |
1719 | | } |
1720 | | |
1721 | | break; |
1722 | | } |
1723 | | //////////////////// |
1724 | | case WRITE_INVAL_LOCK: |
1725 | | { |
1726 | | if ( r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) { |
1727 | | bool wok = false; |
1728 | | size_t index = 0; |
1729 | | size_t srcid = r_write_srcid.read(); |
1730 | | size_t trdid = r_write_trdid.read(); |
1731 | | size_t pktid = r_write_pktid.read(); |
1732 | | addr_t nline = m_nline[(vci_addr_t)(r_write_address.read())]; |
1733 | | size_t nb_copies = r_write_count.read(); |
1734 | | |
1735 | | wok =m_update_tab.set(false, // it's an inval transaction |
1736 | | true, // it's a broadcast |
1737 | | true, // it needs a response |
1738 | | srcid, |
1739 | | trdid, |
1740 | | pktid, |
1741 | | nline, |
1742 | | nb_copies, |
1743 | | index); |
1744 | | #ifdef IDEBUG |
1745 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
1746 | | if(wok){ |
1747 | | std::cout << sc_time_stamp() << " " << name() << " WRITE_INVAL_LOCK update table : " << std::endl; |
1748 | | m_update_tab.print(); |
1749 | | } |
1750 | | } |
1751 | | #endif |
1752 | | r_write_upt_index = index; |
1753 | | // releases the lock protecting Update Table if no entry... |
1754 | | if ( wok ) r_write_fsm = WRITE_DIR_INVAL; |
1755 | | else r_write_fsm = WRITE_WAIT; |
1756 | | } |
1757 | | |
1758 | | break; |
1759 | | } |
1760 | | //////////////////// |
1761 | | case WRITE_DIR_INVAL: |
1762 | | { |
1763 | | ASSERT(((r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) && |
1764 | | (r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) && |
1765 | | (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE )) |
1766 | | ,"MemCache ERROR : bad TRT,DIR or UPT allocation error"); |
1767 | | m_transaction_tab.set(r_write_trt_index.read(), |
1768 | | false, // write request to XRAM |
1769 | | m_nline[(vci_addr_t)(r_write_address.read())], |
1770 | | 0, |
1771 | | 0, |
1772 | | 0, |
1773 | | false, // not a processor read |
1774 | | 0, // not a single word |
1775 | | 0, // word index |
1776 | | std::vector<be_t>(m_words,0), |
1777 | | std::vector<data_t>(m_words,0)); |
1778 | | #ifdef TDEBUG |
1779 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
1780 | | std::cout << sc_time_stamp() << " " << name() << " WRITE_DIR_INVAL transaction table : " << std::endl; |
1781 | | for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) |
1782 | | m_transaction_tab.print(i); |
1783 | | } |
1784 | | #endif |
1785 | | |
| 2209 | false, // write request to XRAM |
| 2210 | m_nline[(vci_addr_t)(r_write_address.read())], |
| 2211 | 0, |
| 2212 | 0, |
| 2213 | 0, |
| 2214 | false, // not a processor read |
| 2215 | 0, // not a single word |
| 2216 | 0, // word index |
| 2217 | std::vector<be_t>(m_words,0), |
| 2218 | std::vector<data_t>(m_words,0)); |
1807 | | //////////////////// |
1808 | | case WRITE_INVAL: |
1809 | | { |
1810 | | if ( !r_write_to_init_cmd_multi_req.read() && |
1811 | | !r_write_to_init_cmd_brdcast_req.read() ) { |
1812 | | r_write_to_init_cmd_multi_req = false; |
1813 | | r_write_to_init_cmd_brdcast_req = true; |
1814 | | r_write_to_init_cmd_trdid = r_write_upt_index.read(); |
1815 | | r_write_to_init_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; |
1816 | | r_write_to_init_cmd_index = 0; |
1817 | | r_write_to_init_cmd_count = 0; |
1818 | | |
1819 | | for(size_t i=0; i<m_words ; i++){ |
1820 | | r_write_to_init_cmd_be[i]=0; |
1821 | | r_write_to_init_cmd_data[i] = 0; |
1822 | | } |
1823 | | r_write_fsm = WRITE_XRAM_SEND; |
1824 | | // all inval responses |
1825 | | } |
1826 | | |
1827 | | break; |
1828 | | } |
1829 | | //////////////////// |
1830 | | case WRITE_XRAM_SEND: |
1831 | | { |
1832 | | if ( !r_write_to_ixr_cmd_req ) { |
1833 | | r_write_to_ixr_cmd_req = true; |
1834 | | r_write_to_ixr_cmd_write = true; |
1835 | | r_write_to_ixr_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; |
1836 | | r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); |
1837 | | for(size_t i=0; i<m_words; i++){ |
1838 | | r_write_to_ixr_cmd_data[i] = r_write_data[i]; |
1839 | | } |
1840 | | r_write_fsm = WRITE_IDLE; |
1841 | | } |
1842 | | break; |
| 2248 | ///////////////// |
| 2249 | case WRITE_INVAL: // Post a coherence broadcast request to INIT_CMD FSM |
| 2250 | { |
| 2251 | if ( !r_write_to_init_cmd_multi_req.read() && !r_write_to_init_cmd_brdcast_req.read() ) |
| 2252 | { |
| 2253 | r_write_to_init_cmd_multi_req = false; |
| 2254 | r_write_to_init_cmd_brdcast_req = true; |
| 2255 | r_write_to_init_cmd_trdid = r_write_upt_index.read(); |
| 2256 | r_write_to_init_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; |
| 2257 | r_write_to_init_cmd_index = 0; |
| 2258 | r_write_to_init_cmd_count = 0; |
| 2259 | |
| 2260 | for(size_t i=0; i<m_words ; i++) |
| 2261 | { |
| 2262 | r_write_to_init_cmd_be[i]=0; |
| 2263 | r_write_to_init_cmd_data[i] = 0; |
| 2264 | } |
| 2265 | r_write_fsm = WRITE_XRAM_SEND; |
| 2266 | |
| 2267 | #if DEBUG_MEMC_WRITE |
| 2268 | if( m_debug_write_fsm ) |
| 2269 | { |
| 2270 | std::cout << " <MEMC.WRITE_INVAL> Post a broadcast request to INIT_CMD FSM" << std::endl; |
| 2271 | } |
| 2272 | #endif |
| 2273 | } |
| 2274 | break; |
| 2275 | } |
| 2276 | ///////////////////// |
| 2277 | case WRITE_XRAM_SEND: // Post a put request to IXR_CMD FSM |
| 2278 | { |
| 2279 | if ( !r_write_to_ixr_cmd_req ) |
| 2280 | { |
| 2281 | r_write_to_ixr_cmd_req = true; |
| 2282 | r_write_to_ixr_cmd_write = true; |
| 2283 | r_write_to_ixr_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; |
| 2284 | r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); |
| 2285 | |
| 2286 | for(size_t i=0; i<m_words; i++) r_write_to_ixr_cmd_data[i] = r_write_data[i]; |
| 2287 | |
| 2288 | r_write_fsm = WRITE_IDLE; |
| 2289 | |
| 2290 | #if DEBUG_MEMC_WRITE |
| 2291 | if( m_debug_write_fsm ) |
| 2292 | { |
| 2293 | std::cout << " <MEMC.WRITE_XRAM_SEND> Post a put request to IXR_CMD FSM" << std::endl; |
| 2294 | } |
| 2295 | #endif |
| 2296 | } |
| 2297 | break; |
2006 | | case IXR_RSP_TRT_READ: // write data in the TRT |
2007 | | { |
2008 | | if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) && p_vci_ixr.rspval ) { |
2009 | | bool eop = p_vci_ixr.reop.read(); |
2010 | | data_t data = p_vci_ixr.rdata.read(); |
2011 | | size_t index = r_ixr_rsp_trt_index.read(); |
2012 | | ASSERT(((eop == (r_ixr_rsp_cpt.read() == (m_words-1))) || |
2013 | | p_vci_ixr.rerror.read()) |
2014 | | ,"Error in VCI_MEM_CACHE : invalid length for a response from XRAM"); |
2015 | | m_transaction_tab.write_rsp(index, r_ixr_rsp_cpt.read(), data, p_vci_ixr.rerror.read()&0x1); |
2016 | | r_ixr_rsp_cpt = r_ixr_rsp_cpt.read() + 1; |
2017 | | if ( eop ) { |
2018 | | #ifdef TDEBUG |
2019 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
2020 | | std::cout << sc_time_stamp() << " " << name() << " IXR_RSP_TRT_READ transaction table : " << std::endl; |
2021 | | for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) |
2022 | | m_transaction_tab.print(i); |
2023 | | } |
2024 | | #endif |
2025 | | |
2026 | | r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()]=true; |
2027 | | r_ixr_rsp_fsm = IXR_RSP_IDLE; |
2028 | | } |
2029 | | } |
2030 | | break; |
| 2549 | case IXR_RSP_TRT_READ: // write data in the TRT |
| 2550 | { |
| 2551 | if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) && p_vci_ixr.rspval ) |
| 2552 | { |
| 2553 | size_t index = r_ixr_rsp_trt_index.read(); |
| 2554 | bool eop = p_vci_ixr.reop.read(); |
| 2555 | data_t data = p_vci_ixr.rdata.read(); |
| 2556 | bool error = (p_vci_ixr.rerror.read()&0x1 == 0); |
| 2557 | assert(((eop == (r_ixr_rsp_cpt.read() == (m_words-1))) || p_vci_ixr.rerror.read()) |
| 2558 | and "Error in VCI_MEM_CACHE : invalid length for a response from XRAM"); |
| 2559 | m_transaction_tab.write_rsp(index, |
| 2560 | r_ixr_rsp_cpt.read(), |
| 2561 | data, |
| 2562 | error); |
| 2563 | r_ixr_rsp_cpt = r_ixr_rsp_cpt.read() + 1; |
| 2564 | if ( eop ) |
| 2565 | { |
| 2566 | r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()]=true; |
| 2567 | r_ixr_rsp_fsm = IXR_RSP_IDLE; |
| 2568 | } |
| 2569 | |
| 2570 | #if DEBUG_MEMC_IXR_RSP |
| 2571 | if( m_debug_ixr_rsp_fsm ) |
| 2572 | { |
| 2573 | std::cout << " <MEMC.IXR_RSP_TRT_READ> Writing a word in TRT : " |
| 2574 | << " index = " << std::dec << index |
| 2575 | << " / word = " << r_ixr_rsp_cpt.read() |
| 2576 | << " / data = " << std::hex << data << std::endl; |
| 2577 | } |
| 2578 | #endif |
| 2579 | } |
| 2580 | break; |
2079 | | case XRAM_RSP_DIR_LOCK: // Take the lock on the directory |
2080 | | { |
2081 | | if( r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP ) { |
2082 | | r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; |
2083 | | #ifdef TDEBUG |
2084 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
2085 | | std::cout << "XRAM_RSP FSM in XRAM_RSP_DIR_LOCK state" << std::endl; |
| 2636 | case XRAM_RSP_DIR_LOCK: // Takes the lock on the directory |
| 2637 | { |
| 2638 | if( r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP ) |
| 2639 | { |
| 2640 | r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; |
| 2641 | |
| 2642 | #if DEBUG_MEMC_XRAM_RSP |
| 2643 | if( m_debug_xram_rsp_fsm ) |
| 2644 | { |
| 2645 | std::cout << " <MEMC.XRAM_RSP_DIR_LOCK> Get access to directory" << std::endl; |
| 2646 | } |
| 2647 | #endif |
| 2648 | } |
| 2649 | break; |
| 2650 | } |
| 2651 | /////////////////////// |
| 2652 | case XRAM_RSP_TRT_COPY: // Takes the lock on TRT |
| 2653 | // Copy the TRT entry in a local buffer |
| 2654 | // and select a victim cache line |
| 2655 | { |
| 2656 | if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) ) |
| 2657 | { |
| 2658 | // copy the TRT entry in the r_xram_rsp_trt_buf local buffer |
| 2659 | size_t index = r_xram_rsp_trt_index.read(); |
| 2660 | TransactionTabEntry trt_entry(m_transaction_tab.read(index)); |
| 2661 | r_xram_rsp_trt_buf.copy(trt_entry); // TRT entry local buffer |
| 2662 | |
| 2663 | // selects & extracts a victim line from cache |
| 2664 | size_t way = 0; |
| 2665 | size_t set = m_y[(vci_addr_t)(trt_entry.nline * m_words * 4)]; |
| 2666 | DirectoryEntry victim(m_cache_directory.select(set, way)); |
| 2667 | |
| 2668 | bool inval = (victim.count && victim.valid) ; |
| 2669 | |
| 2670 | // copy the victim line in a local buffer |
| 2671 | for (size_t i=0 ; i<m_words ; i++) |
| 2672 | r_xram_rsp_victim_data[i] = m_cache_data[way][set][i]; |
| 2673 | r_xram_rsp_victim_copy = victim.owner.srcid; |
| 2674 | #if L1_MULTI_CACHE |
| 2675 | r_xram_rsp_victim_copy_cache= victim.owner.cache_id; |
| 2676 | #endif |
| 2677 | r_xram_rsp_victim_copy_inst = victim.owner.inst; |
| 2678 | r_xram_rsp_victim_count = victim.count; |
| 2679 | r_xram_rsp_victim_ptr = victim.ptr; |
| 2680 | r_xram_rsp_victim_way = way; |
| 2681 | r_xram_rsp_victim_set = set; |
| 2682 | r_xram_rsp_victim_nline = victim.tag*m_sets + set; |
| 2683 | r_xram_rsp_victim_is_cnt = victim.is_cnt; |
| 2684 | r_xram_rsp_victim_inval = inval ; |
| 2685 | r_xram_rsp_victim_dirty = victim.dirty; |
| 2686 | |
| 2687 | if(!trt_entry.rerror) r_xram_rsp_fsm = XRAM_RSP_INVAL_LOCK; |
| 2688 | else r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; |
| 2689 | |
| 2690 | #if DEBUG_MEMC_XRAM_RSP |
| 2691 | if( m_debug_xram_rsp_fsm ) |
| 2692 | { |
| 2693 | std::cout << " <MEMC.XRAM_RSP_TRT_COPY> Select a slot: " |
| 2694 | << " way = " << std::dec << way |
| 2695 | << " / set = " << set |
| 2696 | << " / inval_required = " << inval << std::endl; |
| 2697 | } |
| 2698 | #endif |
| 2699 | } |
| 2700 | break; |
| 2701 | } |
| 2702 | ///////////////////////// |
| 2703 | case XRAM_RSP_INVAL_LOCK: // check a possible pending inval |
| 2704 | { |
| 2705 | if ( r_alloc_upt_fsm == ALLOC_UPT_XRAM_RSP ) |
| 2706 | { |
| 2707 | size_t index; |
| 2708 | if (m_update_tab.search_inval(r_xram_rsp_trt_buf.nline, index)) |
| 2709 | { |
| 2710 | r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; |
| 2711 | |
| 2712 | #if DEBUG_MEMC_XRAM_RSP |
| 2713 | if( m_debug_xram_rsp_fsm ) |
| 2714 | { |
| 2715 | std::cout << " <MEMC.XRAM_RSP_INVAL_LOCK> Get acces to UPT," |
| 2716 | << " but an invalidation is already registered at this address" << std::endl; |
| 2717 | m_update_tab.print(); |
| 2718 | } |
| 2719 | #endif |
| 2720 | |
| 2721 | } |
| 2722 | else if (m_update_tab.is_full() && r_xram_rsp_victim_inval.read()) |
| 2723 | { |
| 2724 | r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; |
| 2725 | |
| 2726 | #if DEBUG_MEMC_XRAM_RSP |
| 2727 | if( m_debug_xram_rsp_fsm ) |
| 2728 | { |
| 2729 | std::cout << " <MEMC.XRAM_RSP_INVAL_LOCK> Get acces to UPT," |
| 2730 | << " but the table is full" << std::endl; |
| 2731 | m_update_tab.print(); |
| 2732 | } |
| 2733 | #endif |
| 2734 | } |
| 2735 | else |
| 2736 | { |
| 2737 | r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; |
| 2738 | |
| 2739 | #if DEBUG_MEMC_XRAM_RSP |
| 2740 | if( m_debug_xram_rsp_fsm ) |
| 2741 | { |
| 2742 | std::cout << " <MEMC.XRAM_RSP_INVAL_LOCK> Get acces to UPT" << std::endl; |
| 2743 | } |
| 2744 | #endif |
| 2745 | } |
| 2746 | } |
| 2747 | break; |
| 2748 | } |
| 2749 | ///////////////////////// |
| 2750 | case XRAM_RSP_INVAL_WAIT: // returns to DIR_LOCK to retry |
| 2751 | { |
| 2752 | r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; |
| 2753 | break; |
| 2754 | } |
| 2755 | /////////////////////// |
| 2756 | case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory) |
| 2757 | // and possibly set an inval request in UPT |
| 2758 | { |
| 2759 | // signals generation |
| 2760 | bool inst_read = (r_xram_rsp_trt_buf.trdid & 0x2) && r_xram_rsp_trt_buf.proc_read; |
| 2761 | bool cached_read = (r_xram_rsp_trt_buf.trdid & 0x1) && r_xram_rsp_trt_buf.proc_read; |
| 2762 | // update data |
| 2763 | size_t set = r_xram_rsp_victim_set.read(); |
| 2764 | size_t way = r_xram_rsp_victim_way.read(); |
| 2765 | for(size_t i=0; i<m_words ; i++) m_cache_data[way][set][i] = r_xram_rsp_trt_buf.wdata[i]; |
| 2766 | // compute dirty |
| 2767 | bool dirty = false; |
| 2768 | for(size_t i=0; i<m_words;i++) dirty = dirty || (r_xram_rsp_trt_buf.wdata_be[i] != 0); |
| 2769 | // update directory |
| 2770 | DirectoryEntry entry; |
| 2771 | entry.valid = true; |
| 2772 | entry.is_cnt = false; |
| 2773 | entry.lock = false; |
| 2774 | entry.dirty = dirty; |
| 2775 | entry.tag = r_xram_rsp_trt_buf.nline / m_sets; |
| 2776 | entry.ptr = 0; |
| 2777 | if(cached_read) |
| 2778 | { |
| 2779 | entry.owner.srcid = r_xram_rsp_trt_buf.srcid; |
| 2780 | #if L1_MULTI_CACHE |
| 2781 | entry.owner.cache_id= r_xram_rsp_trt_buf.pktid; |
| 2782 | #endif |
| 2783 | entry.owner.inst = inst_read; |
| 2784 | entry.count = 1; |
| 2785 | } |
| 2786 | else |
| 2787 | { |
| 2788 | entry.owner.srcid = 0; |
| 2789 | #if L1_MULTI_CACHE |
| 2790 | entry.owner.cache_id = 0; |
| 2791 | #endif |
| 2792 | entry.owner.inst = 0; |
| 2793 | entry.count = 0; |
| 2794 | } |
| 2795 | m_cache_directory.write(set, way, entry); |
| 2796 | |
| 2797 | if (r_xram_rsp_victim_inval.read()) |
| 2798 | { |
| 2799 | bool brdcast = r_xram_rsp_victim_is_cnt.read(); |
| 2800 | size_t index = 0; |
| 2801 | size_t count_copies = r_xram_rsp_victim_count.read(); |
| 2802 | |
| 2803 | bool wok = m_update_tab.set( false, // it's an inval transaction |
| 2804 | brdcast, // set brdcast bit |
| 2805 | false, // it does not need a response |
| 2806 | 0, // srcid |
| 2807 | 0, // trdid |
| 2808 | 0, // pktid |
| 2809 | r_xram_rsp_victim_nline.read(), |
| 2810 | count_copies, |
| 2811 | index); |
| 2812 | r_xram_rsp_upt_index = index; |
| 2813 | |
| 2814 | if (!wok) |
| 2815 | { |
| 2816 | std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_HEAP_LAST state" << std::endl; |
| 2817 | std::cout << "an update_tab entry was free but write is unsuccessful" << std::endl; |
| 2818 | exit(0); |
| 2819 | } |
| 2820 | } |
| 2821 | |
| 2822 | #if DEBUG_MEMC_XRAM_RSP |
| 2823 | if( m_debug_xram_rsp_fsm ) |
| 2824 | { |
| 2825 | std::cout << " <MEMC.XRAM_RSP_DIR_UPDT> Directory update: " |
| 2826 | << " way = " << std::dec << way |
| 2827 | << " / set = " << set |
| 2828 | << " / count = " << entry.count |
| 2829 | << " / is_cnt = " << entry.is_cnt << std::endl; |
| 2830 | if (r_xram_rsp_victim_inval.read()) |
| 2831 | std::cout << " Invalidation request for victim line " |
| 2832 | << std::hex << r_xram_rsp_victim_nline.read() |
| 2833 | << " / broadcast = " << r_xram_rsp_victim_is_cnt.read() << std::endl; |
| 2834 | } |
| 2835 | #endif |
| 2836 | |
| 2837 | // If the victim is not dirty, we don't need another XRAM put transaction, |
| 2838 | // and we canwe erase the TRT entry |
| 2839 | if (!r_xram_rsp_victim_dirty.read()) m_transaction_tab.erase(r_xram_rsp_trt_index.read()); |
| 2840 | |
| 2841 | // Next state |
| 2842 | if ( r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; |
| 2843 | else if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; |
| 2844 | else if ( r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; |
| 2845 | else r_xram_rsp_fsm = XRAM_RSP_IDLE; |
| 2846 | break; |
| 2847 | } |
| 2848 | //////////////////////// |
| 2849 | case XRAM_RSP_TRT_DIRTY: // set the TRT entry (write to XRAM) if the victim is dirty |
| 2850 | { |
| 2851 | if ( r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP ) |
| 2852 | { |
| 2853 | m_transaction_tab.set( r_xram_rsp_trt_index.read(), |
| 2854 | false, // write to XRAM |
| 2855 | r_xram_rsp_victim_nline.read(), // line index |
| 2856 | 0, |
| 2857 | 0, |
| 2858 | 0, |
| 2859 | false, |
| 2860 | 0, |
| 2861 | 0, |
| 2862 | std::vector<be_t>(m_words,0), |
| 2863 | std::vector<data_t>(m_words,0) ); |
| 2864 | |
| 2865 | #if DEBUG_MEMC_XRAM_RSP |
| 2866 | if( m_debug_xram_rsp_fsm ) |
| 2867 | { |
| 2868 | std::cout << " <MEMC.XRAM_RSP_TRT_DIRTY> Set TRT entry for the put transaction:" |
| 2869 | << " dirty victim line = " << r_xram_rsp_victim_nline.read() << std::endl; |
| 2870 | } |
| 2871 | #endif |
| 2872 | if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; |
| 2873 | else if ( r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; |
| 2874 | else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; |
| 2875 | } |
| 2876 | break; |
| 2877 | } |
| 2878 | ////////////////////// |
| 2879 | case XRAM_RSP_DIR_RSP: // Request a response to TGT_RSP FSM |
| 2880 | { |
| 2881 | if ( !r_xram_rsp_to_tgt_rsp_req.read() ) |
| 2882 | { |
| 2883 | r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; |
| 2884 | r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; |
| 2885 | r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; |
| 2886 | for (size_t i=0; i < m_words; i++) r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; |
| 2887 | r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; |
| 2888 | r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; |
| 2889 | r_xram_rsp_to_tgt_rsp_rerror = false; |
| 2890 | r_xram_rsp_to_tgt_rsp_req = true; |
| 2891 | |
| 2892 | if ( r_xram_rsp_victim_inval ) r_xram_rsp_fsm = XRAM_RSP_INVAL; |
| 2893 | else if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; |
| 2894 | else r_xram_rsp_fsm = XRAM_RSP_IDLE; |
| 2895 | |
| 2896 | |
| 2897 | #if DEBUG_MEMC_XRAM_RSP |
| 2898 | if( m_debug_xram_rsp_fsm ) |
| 2899 | { |
| 2900 | std::cout << " <MEMC.XRAM_RSP_DIR_RSP> Request the TGT_RSP FSM to return data:" |
| 2901 | << " rsrcid = " << std::hex << r_xram_rsp_trt_buf.srcid |
| 2902 | << " / address = " << r_xram_rsp_trt_buf.nline*m_words*4 |
| 2903 | << " / nwords = " << std::dec << r_xram_rsp_trt_buf.read_length << std::endl; |
| 2904 | } |
| 2905 | #endif |
| 2906 | } |
| 2907 | break; |
| 2908 | } |
| 2909 | //////////////////// |
| 2910 | case XRAM_RSP_INVAL: // send invalidate request to INIT_CMD FSM |
| 2911 | { |
| 2912 | if( !r_xram_rsp_to_init_cmd_multi_req.read() && |
| 2913 | !r_xram_rsp_to_init_cmd_brdcast_req.read() ) |
| 2914 | { |
| 2915 | bool multi_req = !r_xram_rsp_victim_is_cnt.read(); |
| 2916 | bool last_multi_req = multi_req && (r_xram_rsp_victim_count.read() == 1); |
| 2917 | bool not_last_multi_req = multi_req && (r_xram_rsp_victim_count.read() != 1); |
| 2918 | |
| 2919 | r_xram_rsp_to_init_cmd_multi_req = last_multi_req; |
| 2920 | r_xram_rsp_to_init_cmd_brdcast_req = r_xram_rsp_victim_is_cnt.read(); |
| 2921 | r_xram_rsp_to_init_cmd_nline = r_xram_rsp_victim_nline.read(); |
| 2922 | r_xram_rsp_to_init_cmd_trdid = r_xram_rsp_upt_index; |
| 2923 | xram_rsp_to_init_cmd_fifo_srcid = r_xram_rsp_victim_copy.read(); |
| 2924 | xram_rsp_to_init_cmd_fifo_inst = r_xram_rsp_victim_copy_inst.read(); |
| 2925 | #if L1_MULTI_CACHE |
| 2926 | xram_rsp_to_init_cmd_fifo_cache_id = r_xram_rsp_victim_copy_cache.read(); |
| 2927 | #endif |
| 2928 | xram_rsp_to_init_cmd_fifo_put = multi_req; |
| 2929 | r_xram_rsp_next_ptr = r_xram_rsp_victim_ptr.read(); |
| 2930 | |
| 2931 | if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; |
| 2932 | else if (not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; |
| 2933 | else r_xram_rsp_fsm = XRAM_RSP_IDLE; |
| 2934 | |
| 2935 | #if DEBUG_MEMC_XRAM_RSP |
| 2936 | if( m_debug_xram_rsp_fsm ) |
| 2937 | { |
| 2938 | std::cout << " <MEMC.XRAM_RSP_INVAL> Send an inval request to INIT_CMD FSM:" |
| 2939 | << " victim line = " << r_xram_rsp_victim_nline.read() << std::endl; |
2111 | | r_xram_rsp_victim_copy_cache= victim.owner.cache_id; |
2112 | | #endif |
2113 | | r_xram_rsp_victim_copy_inst = victim.owner.inst; |
2114 | | r_xram_rsp_victim_count = victim.count; |
2115 | | r_xram_rsp_victim_ptr = victim.ptr; |
2116 | | r_xram_rsp_victim_way = way; |
2117 | | r_xram_rsp_victim_set = set; |
2118 | | r_xram_rsp_victim_nline = victim.tag*m_sets + set; |
2119 | | r_xram_rsp_victim_is_cnt = victim.is_cnt; |
2120 | | r_xram_rsp_victim_inval = inval ; |
2121 | | r_xram_rsp_victim_dirty = victim.dirty; |
2122 | | |
2123 | | if(!trt_entry.rerror) |
2124 | | r_xram_rsp_fsm = XRAM_RSP_INVAL_LOCK; |
2125 | | else |
2126 | | r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; |
2127 | | #ifdef TDEBUG |
2128 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
2129 | | std::cout << "XRAM_RSP FSM in XRAM_RSP_TRT_COPY state" << std::endl; |
2130 | | std::cout << "Victim way : " << std::hex << way << " set " << set << std::dec << std::endl; |
2131 | | victim.print(); |
2132 | | } |
2133 | | #endif |
2134 | | } |
2135 | | break; |
2136 | | } |
2137 | | /////////////////////// |
2138 | | case XRAM_RSP_INVAL_LOCK: |
2139 | | { |
2140 | | if ( r_alloc_upt_fsm == ALLOC_UPT_XRAM_RSP ) { |
2141 | | #ifdef IDEBUG |
2142 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
2143 | | std::cout << "XRAM_RSP FSM in XRAM_RSP_INVAL_LOCK state" << std::endl; |
2144 | | } |
2145 | | #endif |
2146 | | size_t index; |
2147 | | if(m_update_tab.search_inval(r_xram_rsp_trt_buf.nline, index)){ |
2148 | | r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; |
2149 | | #ifdef IDEBUG |
2150 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
2151 | | std::cout << "XRAM_RSP FSM in XRAM_RSP_INVAL_LOCK state to XRAM_RSP_INVAL_WAIT state" << std::endl; |
2152 | | std::cout << "A invalidation is already registered at this address" << std::endl; |
2153 | | m_update_tab.print(); |
2154 | | } |
2155 | | #endif |
2156 | | |
2157 | | } |
2158 | | else if(m_update_tab.is_full() && r_xram_rsp_victim_inval.read()){ |
2159 | | r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; |
2160 | | #ifdef IDEBUG |
2161 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
2162 | | std::cout << "XRAM_RSP FSM in XRAM_RSP_INVAL_LOCK state to XRAM_RSP_INVAL_WAIT state" << std::endl; |
2163 | | std::cout << "The inval tab is full" << std::endl; |
2164 | | m_update_tab.print(); |
2165 | | } |
2166 | | #endif |
2167 | | } |
2168 | | else { |
2169 | | r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; |
2170 | | #ifdef IDEBUG |
2171 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
2172 | | std::cout << "XRAM_RSP FSM in XRAM_RSP_INVAL_LOCK state to XRAM_RSP_DIR_UPDT state" << std::endl; |
2173 | | m_update_tab.print(); |
2174 | | } |
2175 | | #endif |
2176 | | } |
2177 | | } |
2178 | | break; |
2179 | | } |
2180 | | /////////////////////// |
2181 | | case XRAM_RSP_INVAL_WAIT: |
2182 | | { |
2183 | | r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; |
2184 | | break; |
2185 | | #ifdef IDEBUG |
2186 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
2187 | | std::cout << "XRAM_RSP FSM in XRAM_RSP_INVAL_WAIT state" << std::endl; |
2188 | | } |
2189 | | #endif |
2190 | | } |
2191 | | /////////////////////// |
2192 | | case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory) |
2193 | | { |
2194 | | // signals generation |
2195 | | bool inst_read = (r_xram_rsp_trt_buf.trdid & 0x2) && r_xram_rsp_trt_buf.proc_read; // It is an instruction read |
2196 | | bool cached_read = (r_xram_rsp_trt_buf.trdid & 0x1) && r_xram_rsp_trt_buf.proc_read ; |
2197 | | // update data |
2198 | | size_t set = r_xram_rsp_victim_set.read(); |
2199 | | size_t way = r_xram_rsp_victim_way.read(); |
2200 | | for(size_t i=0; i<m_words ; i++){ |
2201 | | m_cache_data[way][set][i] = r_xram_rsp_trt_buf.wdata[i]; |
2202 | | } |
2203 | | // compute dirty |
2204 | | bool dirty = false; |
2205 | | for(size_t i=0; i<m_words;i++){ |
2206 | | dirty = dirty || (r_xram_rsp_trt_buf.wdata_be[i] != 0); |
2207 | | } |
2208 | | |
2209 | | // update directory |
2210 | | DirectoryEntry entry; |
2211 | | entry.valid = true; |
2212 | | entry.is_cnt = false; |
2213 | | entry.lock = false; |
2214 | | entry.dirty = dirty; |
2215 | | entry.tag = r_xram_rsp_trt_buf.nline / m_sets; |
2216 | | entry.ptr = 0; |
2217 | | if(cached_read) { |
2218 | | entry.owner.srcid = r_xram_rsp_trt_buf.srcid; |
| 2980 | xram_rsp_to_init_cmd_fifo_cache_id = entry.owner.cache_id; |
| 2981 | #endif |
| 2982 | xram_rsp_to_init_cmd_fifo_inst = entry.owner.inst; |
| 2983 | xram_rsp_to_init_cmd_fifo_put = true; |
| 2984 | if( m_xram_rsp_to_init_cmd_inst_fifo.wok() ) |
| 2985 | { |
| 2986 | r_xram_rsp_next_ptr = entry.next; |
| 2987 | if( entry.next == r_xram_rsp_next_ptr.read() ) // last copy |
| 2988 | { |
| 2989 | r_xram_rsp_to_init_cmd_multi_req = true; |
| 2990 | r_xram_rsp_fsm = XRAM_RSP_HEAP_LAST; |
| 2991 | } |
| 2992 | else |
| 2993 | { |
| 2994 | r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; |
| 2995 | } |
| 2996 | } |
| 2997 | else |
| 2998 | { |
| 2999 | r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; |
| 3000 | } |
| 3001 | |
| 3002 | #if DEBUG_MEMC_XRAM_RSP |
| 3003 | if( m_debug_xram_rsp_fsm ) |
| 3004 | { |
| 3005 | std::cout << " <MEMC.XRAM_RSP_HEAP_ERASE> Erase the list of copies:" |
| 3006 | << " srcid = " << entry.owner.srcid |
| 3007 | << " / inst = " << entry.owner.inst << std::endl; |
| 3008 | } |
| 3009 | #endif |
| 3010 | } |
| 3011 | break; |
| 3012 | } |
| 3013 | ///////////////////////// |
| 3014 | case XRAM_RSP_HEAP_LAST: // last member of the list |
| 3015 | { |
| 3016 | if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_XRAM_RSP ) |
| 3017 | { |
| 3018 | std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_HEAP_LAST state" << std::endl; |
| 3019 | std::cout << "bad HEAP allocation" << std::endl; |
| 3020 | exit(0); |
| 3021 | } |
| 3022 | size_t free_pointer = m_heap.next_free_ptr(); |
| 3023 | |
| 3024 | HeapEntry last_entry; |
| 3025 | last_entry.owner.srcid = 0; |
2220 | | entry.owner.cache_id= r_xram_rsp_trt_buf.pktid; |
2221 | | #endif |
2222 | | entry.owner.inst = inst_read; |
2223 | | entry.count = 1; |
2224 | | } else { |
2225 | | entry.owner.srcid = 0; |
2226 | | #if L1_MULTI_CACHE |
2227 | | entry.owner.cache_id = 0; |
2228 | | #endif |
2229 | | entry.owner.inst = 0; |
2230 | | entry.count = 0; |
2231 | | } |
2232 | | m_cache_directory.write(set, way, entry); |
2233 | | #ifdef DDEBUG |
2234 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
2235 | | std::cout << "printing the entry : " << std::endl; |
2236 | | entry.print(); |
2237 | | std::cout << "done" << std::endl; |
2238 | | } |
2239 | | #endif |
2240 | | |
2241 | | #ifdef TDEBUG |
2242 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
2243 | | std::cout << sc_time_stamp() << " " << name() << " XRAM_RSP_DIR_UPDT transaction table : " << std::endl; |
2244 | | for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) |
2245 | | m_transaction_tab.print(i); |
2246 | | } |
2247 | | #endif |
2248 | | |
2249 | | if(r_xram_rsp_victim_inval.read()){ |
2250 | | bool brdcast = r_xram_rsp_victim_is_cnt.read(); |
2251 | | size_t index = 0; |
2252 | | size_t count_copies = r_xram_rsp_victim_count.read(); |
2253 | | |
2254 | | //@@ |
2255 | | bool wok = m_update_tab.set(false, // it's an inval transaction |
2256 | | brdcast, // set brdcast bit |
2257 | | false, // it does not need a response |
2258 | | 0,//srcid |
2259 | | 0,//trdid |
2260 | | 0,//pktid |
2261 | | r_xram_rsp_victim_nline.read(), |
2262 | | count_copies, |
2263 | | index); |
2264 | | |
2265 | | #ifdef IDEBUG |
2266 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
2267 | | std::cout << "xram_rsp : record invalidation, time = " << std::dec << m_cpt_cycles << std::endl; |
2268 | | m_update_tab.print(); |
2269 | | } |
2270 | | #endif |
2271 | | r_xram_rsp_upt_index = index; |
2272 | | if(!wok) { |
2273 | | ASSERT(false,"mem_cache error : xram_rsp_dir_upt, an update_tab entry was free but write unsuccessful"); |
2274 | | } |
2275 | | } |
2276 | | // If the victim is not dirty, we erase the entry in the TRT |
2277 | | if (!r_xram_rsp_victim_dirty.read()){ |
2278 | | m_transaction_tab.erase(r_xram_rsp_trt_index.read()); |
2279 | | |
2280 | | } |
2281 | | // Next state |
2282 | | if ( r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; |
2283 | | else if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; |
2284 | | else if ( r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; |
2285 | | else r_xram_rsp_fsm = XRAM_RSP_IDLE; |
2286 | | break; |
| 3027 | last_entry.owner.cache_id = 0; |
| 3028 | #endif |
| 3029 | last_entry.owner.inst = false; |
| 3030 | if(m_heap.is_full()) |
| 3031 | { |
| 3032 | last_entry.next = r_xram_rsp_next_ptr.read(); |
| 3033 | m_heap.unset_full(); |
| 3034 | } |
| 3035 | else |
| 3036 | { |
| 3037 | last_entry.next = free_pointer; |
| 3038 | } |
| 3039 | |
| 3040 | m_heap.write_free_ptr(r_xram_rsp_victim_ptr.read()); |
| 3041 | m_heap.write(r_xram_rsp_next_ptr.read(),last_entry); |
| 3042 | |
| 3043 | r_xram_rsp_fsm = XRAM_RSP_IDLE; |
| 3044 | |
| 3045 | #if DEBUG_MEMC_XRAM_RSP |
| 3046 | if( m_debug_xram_rsp_fsm ) |
| 3047 | { |
| 3048 | std::cout << " <MEMC.XRAM_RSP_HEAP_LAST> Heap housekeeping" << std::endl; |
| 3049 | } |
| 3050 | #endif |
| 3051 | break; |
| 3052 | } |
| 3053 | // /////////////////////// |
| 3054 | case XRAM_RSP_ERROR_ERASE: // erase TRT entry in case of error |
| 3055 | { |
| 3056 | m_transaction_tab.erase(r_xram_rsp_trt_index.read()); |
| 3057 | |
| 3058 | // Next state |
| 3059 | if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_ERROR_RSP; |
| 3060 | else r_xram_rsp_fsm = XRAM_RSP_IDLE; |
| 3061 | |
| 3062 | #if DEBUG_MEMC_XRAM_RSP |
| 3063 | if( m_debug_xram_rsp_fsm ) |
| 3064 | { |
| 3065 | std::cout << " <MEMC.XRAM_RSP_ERROR_ERASE> Error reported by XRAM / erase the TRT entry" << std::endl; |
| 3066 | } |
| 3067 | #endif |
| 3068 | break; |
2289 | | case XRAM_RSP_TRT_DIRTY: // set the TRT entry (write line to XRAM) if the victim is dirty |
2290 | | { |
2291 | | if ( r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP ) { |
2292 | | m_transaction_tab.set(r_xram_rsp_trt_index.read(), |
2293 | | false, // write to XRAM |
2294 | | r_xram_rsp_victim_nline.read(), // line index |
2295 | | 0, |
2296 | | 0, |
2297 | | 0, |
2298 | | false, |
2299 | | 0, |
2300 | | 0, |
2301 | | std::vector<be_t>(m_words,0), |
2302 | | std::vector<data_t>(m_words,0) ); |
2303 | | #ifdef TDEBUG |
2304 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
2305 | | std::cout << sc_time_stamp() << " " << name() << " XRAM_RSP_TRT_DIRTY transaction table : " << std::endl; |
2306 | | for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) |
2307 | | m_transaction_tab.print(i); |
2308 | | } |
2309 | | #endif |
2310 | | |
2311 | | if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; |
2312 | | else if ( r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; |
2313 | | else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; |
2314 | | } |
2315 | | break; |
2316 | | } |
2317 | | ////////////////////// |
2318 | | case XRAM_RSP_DIR_RSP: // send a request to TGT_RSP FSM in case of read |
2319 | | { |
2320 | | if ( !r_xram_rsp_to_tgt_rsp_req.read() ) { |
2321 | | r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; |
2322 | | r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; |
2323 | | r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; |
2324 | | for (size_t i=0; i < m_words; i++) { |
2325 | | r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; |
2326 | | } |
2327 | | r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; |
2328 | | r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; |
2329 | | r_xram_rsp_to_tgt_rsp_rerror = false; |
2330 | | r_xram_rsp_to_tgt_rsp_req = true; |
2331 | | |
2332 | | if ( r_xram_rsp_victim_inval ) r_xram_rsp_fsm = XRAM_RSP_INVAL; |
2333 | | else if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; |
2334 | | else r_xram_rsp_fsm = XRAM_RSP_IDLE; |
2335 | | |
2336 | | #ifdef DDEBUG |
2337 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
2338 | | std::cout << "XRAM_RSP FSM in XRAM_RSP_DIR_RSP state" << std::endl; |
2339 | | } |
2340 | | #endif |
2341 | | } |
2342 | | break; |
2343 | | } |
2344 | | //////////////////// |
2345 | | case XRAM_RSP_INVAL: // send invalidate request to INIT_CMD FSM |
2346 | | { |
2347 | | if( !r_xram_rsp_to_init_cmd_multi_req.read() && |
2348 | | !r_xram_rsp_to_init_cmd_brdcast_req.read() ) { |
2349 | | |
2350 | | bool multi_req = !r_xram_rsp_victim_is_cnt.read(); |
2351 | | bool last_multi_req = multi_req && (r_xram_rsp_victim_count.read() == 1); |
2352 | | bool not_last_multi_req = multi_req && (r_xram_rsp_victim_count.read() != 1); |
2353 | | |
2354 | | r_xram_rsp_to_init_cmd_multi_req = last_multi_req; |
2355 | | r_xram_rsp_to_init_cmd_brdcast_req = r_xram_rsp_victim_is_cnt.read(); |
2356 | | r_xram_rsp_to_init_cmd_nline = r_xram_rsp_victim_nline.read(); |
2357 | | r_xram_rsp_to_init_cmd_trdid = r_xram_rsp_upt_index; |
2358 | | xram_rsp_to_init_cmd_fifo_srcid = r_xram_rsp_victim_copy.read(); |
2359 | | xram_rsp_to_init_cmd_fifo_inst = r_xram_rsp_victim_copy_inst.read(); |
2360 | | #if L1_MULTI_CACHE |
2361 | | xram_rsp_to_init_cmd_fifo_cache_id = r_xram_rsp_victim_copy_cache.read(); |
2362 | | #endif |
2363 | | xram_rsp_to_init_cmd_fifo_put = multi_req; |
2364 | | |
2365 | | r_xram_rsp_next_ptr = r_xram_rsp_victim_ptr.read(); |
2366 | | |
2367 | | if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; |
2368 | | else if (not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; |
2369 | | else r_xram_rsp_fsm = XRAM_RSP_IDLE; |
2370 | | #ifdef IDEBUG |
2371 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
2372 | | std::cout << "XRAM_RSP FSM in XRAM_RSP_INVAL state" << std::endl; |
2373 | | } |
2374 | | #endif |
2375 | | } |
2376 | | break; |
2377 | | } |
2378 | | ////////////////////////// |
2379 | | case XRAM_RSP_WRITE_DIRTY: // send a write request to IXR_CMD FSM |
2380 | | { |
2381 | | if ( !r_xram_rsp_to_ixr_cmd_req.read() ) { |
2382 | | r_xram_rsp_to_ixr_cmd_req = true; |
2383 | | r_xram_rsp_to_ixr_cmd_nline = r_xram_rsp_victim_nline.read(); |
2384 | | r_xram_rsp_to_ixr_cmd_trdid = r_xram_rsp_trt_index.read(); |
2385 | | for(size_t i=0; i<m_words ; i++) { |
2386 | | r_xram_rsp_to_ixr_cmd_data[i] = r_xram_rsp_victim_data[i]; |
2387 | | } |
2388 | | m_cpt_write_dirty++; |
2389 | | bool multi_req = !r_xram_rsp_victim_is_cnt.read() && r_xram_rsp_victim_inval.read(); |
2390 | | bool not_last_multi_req = multi_req && (r_xram_rsp_victim_count.read() != 1); |
2391 | | if ( not_last_multi_req ) r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; |
2392 | | else r_xram_rsp_fsm = XRAM_RSP_IDLE; |
2393 | | #ifdef TDEBUG |
2394 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
2395 | | std::cout << "XRAM_RSP FSM in XRAM_RSP_WRITE_DIRTY state" << std::endl; |
2396 | | } |
2397 | | #endif |
2398 | | } |
2399 | | break; |
2400 | | } |
2401 | | ////////////////////////// |
2402 | | case XRAM_RSP_HEAP_ERASE: // erase the list of copies and sent invalidations |
2403 | | { |
2404 | | if( r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP ) { |
2405 | | HeapEntry entry = m_heap_directory.read(r_xram_rsp_next_ptr.read()); |
2406 | | xram_rsp_to_init_cmd_fifo_srcid = entry.owner.srcid; |
2407 | | #if L1_MULTI_CACHE |
2408 | | xram_rsp_to_init_cmd_fifo_cache_id = entry.owner.cache_id; |
2409 | | #endif |
2410 | | xram_rsp_to_init_cmd_fifo_inst = entry.owner.inst; |
2411 | | xram_rsp_to_init_cmd_fifo_put = true; |
2412 | | if( m_xram_rsp_to_init_cmd_inst_fifo.wok() ){ |
2413 | | r_xram_rsp_next_ptr = entry.next; |
2414 | | if( entry.next == r_xram_rsp_next_ptr.read() ){ // last copy |
2415 | | r_xram_rsp_to_init_cmd_multi_req = true; |
2416 | | r_xram_rsp_fsm = XRAM_RSP_HEAP_LAST; |
2417 | | } else { |
2418 | | r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; |
2419 | | } |
2420 | | } else { |
2421 | | r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; |
2422 | | } |
2423 | | } |
2424 | | break; |
2425 | | } |
2426 | | ////////////////////////// |
2427 | | case XRAM_RSP_HEAP_LAST: // last member of the list |
2428 | | { |
2429 | | ASSERT((r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) |
2430 | | ,"MemCache ERROR : bad HEAP allocation"); |
2431 | | size_t free_pointer = m_heap_directory.next_free_ptr(); |
2432 | | |
2433 | | HeapEntry last_entry; |
2434 | | last_entry.owner.srcid = 0; |
2435 | | #if L1_MULTI_CACHE |
2436 | | last_entry.owner.cache_id = 0; |
2437 | | #endif |
2438 | | last_entry.owner.inst = false; |
2439 | | if(m_heap_directory.is_full()){ |
2440 | | last_entry.next = r_xram_rsp_next_ptr.read(); |
2441 | | m_heap_directory.unset_full(); |
2442 | | } else { |
2443 | | last_entry.next = free_pointer; |
2444 | | } |
2445 | | |
2446 | | m_heap_directory.write_free_ptr(r_xram_rsp_victim_ptr.read()); |
2447 | | m_heap_directory.write(r_xram_rsp_next_ptr.read(),last_entry); |
2448 | | |
2449 | | r_xram_rsp_fsm = XRAM_RSP_IDLE; |
2450 | | |
2451 | | break; |
2452 | | } |
2453 | | /////////////////////// |
2454 | | case XRAM_RSP_ERROR_ERASE: // erase xram transaction |
2455 | | { |
2456 | | |
2457 | | #ifdef TDEBUG |
2458 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
2459 | | std::cout << sc_time_stamp() << " " << name() << " XRAM_RSP_ERROR_ERASE transaction table : " << std::endl; |
2460 | | for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) |
2461 | | m_transaction_tab.print(i); |
2462 | | } |
2463 | | #endif |
2464 | | |
2465 | | m_transaction_tab.erase(r_xram_rsp_trt_index.read()); |
2466 | | |
2467 | | // Next state |
2468 | | if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_ERROR_RSP; |
2469 | | else r_xram_rsp_fsm = XRAM_RSP_IDLE; |
2470 | | break; |
2471 | | } |
2472 | | ////////////////////// |
2473 | | case XRAM_RSP_ERROR_RSP: // send a request to TGT_RSP FSM in case of read |
2474 | | { |
2475 | | if ( !r_xram_rsp_to_tgt_rsp_req.read() ) { |
2476 | | r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; |
2477 | | r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; |
2478 | | r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; |
2479 | | for (size_t i=0; i < m_words; i++) { |
2480 | | r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; |
2481 | | } |
2482 | | r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; |
2483 | | r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; |
2484 | | r_xram_rsp_to_tgt_rsp_rerror = true; |
2485 | | r_xram_rsp_to_tgt_rsp_req = true; |
2486 | | |
2487 | | r_xram_rsp_fsm = XRAM_RSP_IDLE; |
2488 | | |
2489 | | #ifdef DDEBUG |
2490 | | if(m_cpt_cycles > DEBUG_START_CYCLE){ |
2491 | | std::cout << "XRAM_RSP FSM in XRAM_RSP_DIR_RSP state" << std::endl; |
2492 | | } |
2493 | | #endif |
2494 | | } |
2495 | | break; |
| 3071 | case XRAM_RSP_ERROR_RSP: // Request an error response to TGT_RSP FSM |
| 3072 | { |
| 3073 | if ( !r_xram_rsp_to_tgt_rsp_req.read() ) |
| 3074 | { |
| 3075 | r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; |
| 3076 | r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; |
| 3077 | r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; |
| 3078 | for (size_t i=0; i < m_words; i++) r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; |
| 3079 | r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; |
| 3080 | r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; |
| 3081 | r_xram_rsp_to_tgt_rsp_rerror = true; |
| 3082 | r_xram_rsp_to_tgt_rsp_req = true; |
| 3083 | |
| 3084 | r_xram_rsp_fsm = XRAM_RSP_IDLE; |
| 3085 | |
| 3086 | #if DEBUG_MEMC_XRAM_RSP |
| 3087 | if( m_debug_xram_rsp_fsm ) |
| 3088 | { |
| 3089 | std::cout << " <MEMC.XRAM_RSP_ERROR_RSP> Request a response error to TGT_RSP FSM:" |
| 3090 | << " srcid = " << r_xram_rsp_trt_buf.srcid << std::endl; |
| 3091 | } |
| 3092 | #endif |
| 3093 | } |
| 3094 | break; |
2506 | | switch ( r_cleanup_fsm.read() ) { |
2507 | | |
2508 | | /////////////////// |
2509 | | case CLEANUP_IDLE: |
2510 | | { |
2511 | | if ( p_vci_tgt_cleanup.cmdval.read() ) { |
2512 | | ASSERT((p_vci_tgt_cleanup.srcid.read() < m_initiators) |
2513 | | ,"VCI_MEM_CACHE error in a cleanup request : received SRCID is larger than the number of initiators"); |
2514 | | |
2515 | | bool reached = false; |
2516 | | for ( size_t index = 0 ; index < ncseg && !reached ; index++ ){ |
2517 | | if ( m_cseg[index]->contains((addr_t)(p_vci_tgt_cleanup.address.read())) ){ |
2518 | | reached = true; |
2519 | | } |
2520 | | } |
2521 | | // only write request to a mapped address that are not broadcast are handled |
2522 | | if ( (p_vci_tgt_cleanup.cmd.read() == vci_param::CMD_WRITE) && |
2523 | | ((p_vci_tgt_cleanup.address.read() & 0x3) == 0) && |
2524 | | reached) |
2525 | | { |
2526 | | PRINTF(" * <MEM_CACHE.CLEANUP> Request from %d.%d at address %llx\n",(uint32_t)p_vci_tgt_cleanup.srcid.read(),(uint32_t)p_vci_tgt_cleanup.pktid.read(),(uint64_t)p_vci_tgt_cleanup.address.read()); |
2527 | | |
2528 | | m_cpt_cleanup++; |
2529 | | |
2530 | | r_cleanup_nline = (addr_t)(m_nline[(vci_addr_t)(p_vci_tgt_cleanup.address.read())]) ; |
2531 | | r_cleanup_srcid = p_vci_tgt_cleanup.srcid.read(); |
2532 | | r_cleanup_trdid = p_vci_tgt_cleanup.trdid.read(); |
2533 | | r_cleanup_pktid = p_vci_tgt_cleanup.pktid.read(); |
2534 | | |
2535 | | r_cleanup_fsm = CLEANUP_DIR_LOCK; |
2536 | | } |
2537 | | } |
2538 | | break; |
| 3104 | |
| 3105 | |
| 3106 | switch ( r_cleanup_fsm.read() ) |
| 3107 | { |
| 3108 | ////////////////// |
| 3109 | case CLEANUP_IDLE: |
| 3110 | { |
| 3111 | if ( p_vci_tgt_cleanup.cmdval.read() ) |
| 3112 | { |
| 3113 | if (p_vci_tgt_cleanup.srcid.read() >= m_initiators ) |
| 3114 | { |
| 3115 | std::cout << "VCI_MEM_CACHE ERROR " << name() |
| 3116 | << " CLEANUP_IDLE state" << std::endl; |
| 3117 | std::cout << "illegal srcid for cleanup request" << std::endl; |
| 3118 | exit(0); |
| 3119 | } |
| 3120 | |
| 3121 | bool reached = false; |
| 3122 | for ( size_t index = 0 ; index < ncseg && !reached ; index++ ) |
| 3123 | { |
| 3124 | if ( m_cseg[index]->contains((addr_t)(p_vci_tgt_cleanup.address.read())) ) |
| 3125 | reached = true; |
| 3126 | } |
| 3127 | // only write request to a mapped address that are not broadcast are handled |
| 3128 | if ( (p_vci_tgt_cleanup.cmd.read() == vci_param::CMD_WRITE) && |
| 3129 | ((p_vci_tgt_cleanup.address.read() & 0x3) == 0) && reached) |
| 3130 | { |
| 3131 | addr_t line = (addr_t)(m_nline[(vci_addr_t)(p_vci_tgt_cleanup.address.read())]); |
| 3132 | |
| 3133 | r_cleanup_nline = line; |
| 3134 | r_cleanup_srcid = p_vci_tgt_cleanup.srcid.read(); |
| 3135 | r_cleanup_trdid = p_vci_tgt_cleanup.trdid.read(); |
| 3136 | r_cleanup_pktid = p_vci_tgt_cleanup.pktid.read(); |
| 3137 | r_cleanup_fsm = CLEANUP_DIR_LOCK; |
| 3138 | |
| 3139 | #if DEBUG_MEMC_CLEANUP |
| 3140 | if( m_debug_cleanup_fsm ) |
| 3141 | { |
| 3142 | std::cout << " <MEMC.CLEANUP_IDLE> Cleanup request:" << std::hex |
| 3143 | << " line = " << line * m_words * 4 |
| 3144 | << " / owner_id = " << p_vci_tgt_cleanup.srcid.read() |
| 3145 | << " / owner_ins = " << (p_vci_tgt_cleanup.trdid.read()&0x1) |
| 3146 | << std::endl; |
| 3147 | } |
| 3148 | #endif |
| 3149 | m_cpt_cleanup++; |
| 3150 | } |
| 3151 | } |
| 3152 | break; |
2612 | | entry.owner.cache_id= 0; |
2613 | | #endif |
2614 | | entry.owner.inst = 0; |
2615 | | // response to the cache |
| 3245 | entry.owner.cache_id= 0; |
| 3246 | #endif |
| 3247 | entry.owner.inst = 0; |
| 3248 | // response to the cache |
| 3249 | r_cleanup_fsm = CLEANUP_RSP; |
| 3250 | } |
| 3251 | else // linked_list mode |
| 3252 | { |
| 3253 | if ( match ) // hit |
| 3254 | { |
| 3255 | entry.count = 0; // no more copy |
| 3256 | entry.owner.srcid = 0; |
| 3257 | #if L1_MULTI_CACHE |
| 3258 | entry.owner.cache_id=0; |
| 3259 | #endif |
| 3260 | entry.owner.inst = 0; |
| 3261 | r_cleanup_fsm = CLEANUP_RSP; |
| 3262 | } |
| 3263 | else // miss |
| 3264 | { |
| 3265 | entry.count = r_cleanup_count.read(); |
| 3266 | entry.owner.srcid = r_cleanup_copy.read(); |
| 3267 | #if L1_MULTI_CACHE |
| 3268 | entry.owner.cache_id = r_cleanup_copy_cache.read(); |
| 3269 | #endif |
| 3270 | entry.owner.inst = r_cleanup_copy_inst.read(); |
| 3271 | r_cleanup_fsm = CLEANUP_UPT_LOCK; |
| 3272 | } |
| 3273 | } |
| 3274 | m_cache_directory.write(set, way, entry); |
| 3275 | |
| 3276 | #if DEBUG_MEMC_CLEANUP |
| 3277 | if( m_debug_cleanup_fsm ) |
| 3278 | { |
| 3279 | std::cout << " <MEMC.CLEANUP_DIR_WRITE> Update directory:" << std::hex |
| 3280 | << " line = " << r_cleanup_nline.read() * m_words * 4 |
| 3281 | << " / dir_id = " << entry.owner.srcid |
| 3282 | << " / dir_ins = " << entry.owner.inst |
| 3283 | << " / count = " << entry.count |
| 3284 | << " / is_cnt = " << entry.is_cnt << std::endl; |
| 3285 | } |
| 3286 | #endif |
| 3287 | |
| 3288 | break; |
| 3289 | } |
| 3290 | /////////////////////// |
| 3291 | case CLEANUP_HEAP_LOCK: // two cases are handled in this state: |
| 3292 | // - the matching copy is directly in the directory |
| 3293 | // - the matching copy is the first copy in the heap |
| 3294 | { |
| 3295 | if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP ) |
| 3296 | { |
| 3297 | size_t way = r_cleanup_way.read(); |
| 3298 | size_t set = m_y[(vci_addr_t)(r_cleanup_nline.read()*m_words*4)]; |
| 3299 | HeapEntry heap_entry = m_heap.read(r_cleanup_ptr.read()); |
| 3300 | bool last = (heap_entry.next == r_cleanup_ptr.read()); |
| 3301 | bool cleanup_inst = r_cleanup_trdid.read() & 0x1; |
| 3302 | |
| 3303 | // match_dir computation |
| 3304 | bool match_dir_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); |
| 3305 | bool match_dir_inst = (r_cleanup_copy_inst.read() == cleanup_inst); |
| 3306 | bool match_dir = match_dir_srcid and match_dir_inst; |
| 3307 | #if L1_MULTI_CACHE |
| 3308 | match_dir = match_dir and (r_cleanup_copy_cache.read() == r_cleanup_pktid.read()); |
| 3309 | #endif |
| 3310 | |
| 3311 | // match_heap computation |
| 3312 | bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); |
| 3313 | bool match_heap_inst = (heap_entry.owner.inst == cleanup_inst); |