Changeset 545
- Timestamp:
- Oct 4, 2013, 4:31:55 PM (11 years ago)
- Location:
- branches/RWT/modules/vci_mem_cache/caba/source
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
branches/RWT/modules/vci_mem_cache/caba/source/include/vci_mem_cache.h
r542 r545 80 80 { 81 81 TGT_CMD_IDLE, 82 TGT_CMD_ERROR,83 82 TGT_CMD_READ, 84 83 TGT_CMD_WRITE, 85 84 TGT_CMD_CAS, 85 TGT_CMD_ERROR, 86 86 TGT_CMD_CONFIG 87 87 }; … … 90 90 enum tgt_rsp_fsm_state_e 91 91 { 92 TGT_RSP_CONFIG_IDLE,93 TGT_RSP_TGT_CMD_IDLE,94 92 TGT_RSP_READ_IDLE, 95 93 TGT_RSP_WRITE_IDLE, … … 98 96 TGT_RSP_MULTI_ACK_IDLE, 99 97 TGT_RSP_CLEANUP_IDLE, 100 TGT_RSP_CONFIG ,101 TGT_RSP_TGT_CMD ,98 TGT_RSP_CONFIG_IDLE, 99 TGT_RSP_TGT_CMD_IDLE, 102 100 TGT_RSP_READ, 103 101 TGT_RSP_WRITE, … … 105 103 TGT_RSP_XRAM, 106 104 TGT_RSP_MULTI_ACK, 107 TGT_RSP_CLEANUP 105 TGT_RSP_CLEANUP, 106 TGT_RSP_CONFIG, 107 TGT_RSP_TGT_CMD 108 108 }; 109 109 … … 120 120 enum cc_send_fsm_state_e 121 121 { 122 CC_SEND_CONFIG_IDLE,123 122 CC_SEND_XRAM_RSP_IDLE, 124 123 CC_SEND_WRITE_IDLE, 125 124 CC_SEND_CAS_IDLE, 126 CC_SEND_CONFIG_INVAL_HEADER, 127 CC_SEND_CONFIG_INVAL_NLINE, 128 CC_SEND_CONFIG_BRDCAST_HEADER, 129 CC_SEND_CONFIG_BRDCAST_NLINE, 125 CC_SEND_CONFIG_IDLE, 130 126 CC_SEND_XRAM_RSP_BRDCAST_HEADER, 131 127 CC_SEND_XRAM_RSP_BRDCAST_NLINE, … … 146 142 CC_SEND_CAS_UPDT_NLINE, 147 143 CC_SEND_CAS_UPDT_DATA, 148 CC_SEND_CAS_UPDT_DATA_HIGH 144 CC_SEND_CAS_UPDT_DATA_HIGH, 145 CC_SEND_CONFIG_INVAL_HEADER, 146 CC_SEND_CONFIG_INVAL_NLINE, 147 CC_SEND_CONFIG_BRDCAST_HEADER, 148 CC_SEND_CONFIG_BRDCAST_NLINE 149 149 }; 150 150 … … 335 335 { 336 336 ALLOC_DIR_RESET, 337 ALLOC_DIR_CONFIG,338 337 ALLOC_DIR_READ, 339 338 ALLOC_DIR_WRITE, 340 339 ALLOC_DIR_CAS, 341 340 ALLOC_DIR_CLEANUP, 342 ALLOC_DIR_XRAM_RSP 341 ALLOC_DIR_XRAM_RSP, 342 ALLOC_DIR_CONFIG 343 343 }; 344 344 … … 426 426 uint32_t m_cpt_cycles; // Counter of cycles 427 427 428 uint32_t m_cpt_read; // Number of READ transactions 428 // Counters accessible in software (not yet but eventually) 429 uint32_t m_cpt_read_local; // Number of local READ transactions 429 430 uint32_t m_cpt_read_remote; // number of remote READ transactions 430 uint32_t m_cpt_read_flits; // number of flits for READs431 431 uint32_t m_cpt_read_cost; // Number of (flits * distance) for READs 432 432 433 uint32_t m_cpt_read_miss; // Number of MISS READ 434 435 uint32_t m_cpt_write; // Number of WRITE transactions 433 uint32_t m_cpt_write_local; // Number of local WRITE transactions 436 434 uint32_t m_cpt_write_remote; // number of remote WRITE transactions 437 uint32_t m_cpt_write_flits; // number of flits for WRITEs 435 uint32_t m_cpt_write_flits_local; // number of flits for local WRITEs 436 uint32_t m_cpt_write_flits_remote; // number of flits for remote WRITEs 438 437 uint32_t m_cpt_write_cost; // Number of (flits * distance) for WRITEs 439 438 439 uint32_t m_cpt_ll_local; // Number of local LL transactions 440 uint32_t m_cpt_ll_remote; // number of remote LL transactions 441 uint32_t m_cpt_ll_cost; // Number of (flits * distance) for LLs 442 443 uint32_t m_cpt_sc_local; // Number of local SC transactions 444 uint32_t m_cpt_sc_remote; // number of remote SC transactions 445 uint32_t m_cpt_sc_cost; // Number of (flits * distance) for SCs 446 447 uint32_t m_cpt_cas_local; // Number of local SC transactions 448 uint32_t m_cpt_cas_remote; // number of remote SC transactions 449 uint32_t m_cpt_cas_cost; // Number of (flits * distance) for SCs 450 451 uint32_t m_cpt_update; // Number of requests causing an UPDATE 452 uint32_t m_cpt_update_local; // Number of local UPDATE transactions 453 uint32_t m_cpt_update_remote; // Number of remote UPDATE transactions 454 uint32_t m_cpt_update_cost; // Number of (flits * distance) for UPDT 455 456 uint32_t m_cpt_m_inval; // Number of requests causing M_INV 457 uint32_t m_cpt_m_inval_local; // Number of local M_INV transactions 458 uint32_t m_cpt_m_inval_remote; // Number of remote M_INV transactions 459 uint32_t m_cpt_m_inval_cost; // Number of (flits * distance) for M_INV 460 461 uint32_t m_cpt_br_inval; // Number of BROADCAST INVAL 462 463 uint32_t m_cpt_cleanup_local; // Number of local CLEANUP transactions 464 uint32_t m_cpt_cleanup_remote; // Number of remote CLEANUP transactions 465 uint32_t m_cpt_cleanup_cost; // Number of (flits * distance) for CLEANUPs 466 467 // Counters not accessible by software 468 uint32_t m_cpt_read_miss; // Number of MISS READ 440 469 uint32_t m_cpt_write_miss; // Number of MISS WRITE 441 uint32_t m_cpt_write_cells; // Cumulated length for WRITE transactions442 470 uint32_t m_cpt_write_dirty; // Cumulated length for WRITE transactions 443 uint32_t m_cpt_update; // Number of UPDATE transactions 471 uint32_t m_cpt_write_broadcast;// Number of BROADCAST INVAL because write 472 444 473 uint32_t m_cpt_trt_rb; // Read blocked by a hit in trt 445 474 uint32_t m_cpt_trt_full; // Transaction blocked due to a full trt 446 uint32_t m_cpt_update_mult; // Number of targets for UPDATE447 uint32_t m_cpt_inval; // Number of INVAL transactions448 uint32_t m_cpt_inval_mult; // Number of targets for INVAL449 uint32_t m_cpt_inval_brdcast; // Number of BROADCAST INVAL450 uint32_t m_cpt_cleanup; // Number of CLEANUP transactions451 uint32_t m_cpt_ll; // Number of LL transactions452 uint32_t m_cpt_sc; // Number of SC transactions453 uint32_t m_cpt_cas; // Number of CAS transactions454 475 455 476 uint32_t m_cpt_read_fsm_dir_lock; // wait DIR LOCK … … 547 568 uint32_t m_cpt_read_WTF; 548 569 549 uint32_t m_cpt_cleanup_cost; // Number of (flits * distance) for CLEANUPs550 551 570 uint32_t m_cpt_update_flits; // Number of flits for UPDATEs 552 uint32_t m_cpt_update_cost; // Number of (flits * distance) for UPDATEs553 554 571 uint32_t m_cpt_inval_cost; // Number of (flits * distance) for INVALs 555 572 556 573 uint32_t m_cpt_get; 557 558 574 uint32_t m_cpt_put; 559 575 … … 573 589 soclib::caba::DspinOutput<dspin_out_width> p_dspin_clack; 574 590 591 #if MONITOR_MEMCACHE_FSM == 1 592 sc_out<int> p_read_fsm; 593 sc_out<int> p_write_fsm; 594 sc_out<int> p_xram_rsp_fsm; 595 sc_out<int> p_cas_fsm; 596 sc_out<int> p_cleanup_fsm; 597 sc_out<int> p_config_fsm; 598 sc_out<int> p_alloc_heap_fsm; 599 sc_out<int> p_alloc_dir_fsm; 600 sc_out<int> p_alloc_trt_fsm; 601 sc_out<int> p_alloc_upt_fsm; 602 sc_out<int> p_alloc_ivt_fsm; 603 sc_out<int> p_tgt_cmd_fsm; 604 sc_out<int> p_tgt_rsp_fsm; 605 sc_out<int> p_ixr_cmd_fsm; 606 sc_out<int> p_ixr_rsp_fsm; 607 sc_out<int> p_cc_send_fsm; 608 sc_out<int> p_cc_receive_fsm; 609 sc_out<int> p_multi_ack_fsm; 610 #endif 611 575 612 VciMemCache( 576 613 sc_module_name name, // Instance Name … … 580 617 const soclib::common::IntTab &tgtid_d, // global index INT network 581 618 const size_t cc_global_id, // global index CC network 619 const size_t x_width, // X width in platform 620 const size_t y_width, // Y width in platform 582 621 const size_t nways, // Number of ways per set 583 622 const size_t nsets, // Number of sets … … 593 632 ~VciMemCache(); 594 633 595 void clear_stats(); 596 void print_stats(); 634 void print_stats(bool activity_counters, bool stats); 597 635 void print_trace(); 598 636 void cache_monitor(addr_t addr); … … 605 643 void genMoore(); 606 644 void check_monitor(addr_t addr, data_t data, bool read); 645 uint32_t req_distance(uint32_t req_srcid); 646 bool is_local_req(uint32_t req_srcid); 607 647 608 648 // Component attributes … … 618 658 const size_t m_words; // Number of words in a line 619 659 const size_t m_cc_global_id; // global_index on cc network 660 const size_t m_xwidth; // number of x bits in platform 661 const size_t m_ywidth; // number of y bits in platform 620 662 size_t m_debug_start_cycle; 621 663 bool m_debug_ok; … … 692 734 693 735 sc_signal<int> r_tgt_cmd_fsm; 694 sc_signal<size_t> r_tgt_cmd_srcid; // srcid for response to config695 sc_signal<size_t> r_tgt_cmd_trdid; // trdid for response to config696 sc_signal<size_t> r_tgt_cmd_pktid; // pktid for response to config697 736 698 737 /////////////////////////////////////////////////////// … … 723 762 sc_signal<bool> r_config_to_ixr_cmd_req; // valid request 724 763 sc_signal<size_t> r_config_to_ixr_cmd_index; // TRT index 725 726 764 727 765 // Buffer between CONFIG fsm and TGT_RSP fsm (send a done response to L1 cache) … … 824 862 sc_signal<size_t> r_write_upt_index; // index in Update Table 825 863 sc_signal<bool> r_write_sc_fail; // sc command failed 826 //sc_signal<bool> r_write_pending_sc; // sc command pending827 sc_signal< data_t> r_write_sc_key; // sc key864 sc_signal<data_t> r_write_sc_key; // sc command key 865 sc_signal<bool> r_write_bc_data_we; // Write enable for data buffer 828 866 829 867 // Buffer between WRITE fsm and TGT_RSP fsm (acknowledge a write command from L1) … … 836 874 // Buffer between WRITE fsm and IXR_CMD fsm 837 875 sc_signal<bool> r_write_to_ixr_cmd_req; // valid request 838 sc_signal<bool> r_write_to_ixr_cmd_put; // request type (GET/PUT)839 876 sc_signal<size_t> r_write_to_ixr_cmd_index; // TRT index 840 877 … … 965 1002 // Buffer between CAS fsm and IXR_CMD fsm (XRAM write) 966 1003 sc_signal<bool> r_cas_to_ixr_cmd_req; // valid request 967 sc_signal<bool> r_cas_to_ixr_cmd_put; // request type (GET/PUT)968 1004 sc_signal<size_t> r_cas_to_ixr_cmd_index; // TRT index 969 1005 -
branches/RWT/modules/vci_mem_cache/caba/source/src/vci_mem_cache.cpp
- Property svn:mergeinfo changed
/trunk/modules/vci_mem_cache/caba/source/src/vci_mem_cache.cpp merged: 499,504-505,507,509,512,523-524,527,529-531,535,537
r540 r545 58 58 namespace soclib { namespace caba { 59 59 60 const char *tgt_cmd_fsm_str[] =61 {62 "TGT_CMD_IDLE",63 "TGT_CMD_ERROR",64 "TGT_CMD_READ",65 "TGT_CMD_WRITE",66 "TGT_CMD_CAS",67 "TGT_CMD_CONFIG"68 };69 const char *tgt_rsp_fsm_str[] =70 {71 "TGT_RSP_CONFIG_IDLE",72 "TGT_RSP_TGT_CMD_IDLE",73 "TGT_RSP_READ_IDLE",74 "TGT_RSP_WRITE_IDLE",75 "TGT_RSP_CAS_IDLE",76 "TGT_RSP_XRAM_IDLE",77 "TGT_RSP_MULTI_ACK_IDLE",78 "TGT_RSP_CLEANUP_IDLE",79 "TGT_RSP_CONFIG",80 "TGT_RSP_TGT_CMD",81 "TGT_RSP_READ",82 "TGT_RSP_WRITE",83 "TGT_RSP_CAS",84 "TGT_RSP_XRAM",85 "TGT_RSP_MULTI_ACK",86 "TGT_RSP_CLEANUP"87 };88 const char *cc_receive_fsm_str[] =89 {90 "CC_RECEIVE_IDLE",91 "CC_RECEIVE_CLEANUP",92 "CC_RECEIVE_CLEANUP_EOP",93 "CC_RECEIVE_MULTI_ACK"94 };95 const char *cc_send_fsm_str[] =96 {97 "CC_SEND_CONFIG_IDLE",98 "CC_SEND_XRAM_RSP_IDLE",99 "CC_SEND_WRITE_IDLE",100 "CC_SEND_CAS_IDLE",101 "CC_SEND_CONFIG_INVAL_HEADER",102 "CC_SEND_CONFIG_INVAL_NLINE",103 "CC_SEND_CONFIG_BRDCAST_HEADER",104 "CC_SEND_CONFIG_BRDCAST_NLINE",105 "CC_SEND_XRAM_RSP_BRDCAST_HEADER",106 "CC_SEND_XRAM_RSP_BRDCAST_NLINE",107 "CC_SEND_XRAM_RSP_INVAL_HEADER",108 "CC_SEND_XRAM_RSP_INVAL_NLINE",109 "CC_SEND_READ_NCC_INVAL_HEADER",110 "CC_SEND_READ_NCC_INVAL_NLINE",111 "CC_SEND_WRITE_NCC_INVAL_HEADER",112 "CC_SEND_WRITE_NCC_INVAL_NLINE",113 "CC_SEND_WRITE_BRDCAST_HEADER",114 "CC_SEND_WRITE_BRDCAST_NLINE",115 "CC_SEND_WRITE_UPDT_HEADER",116 "CC_SEND_WRITE_UPDT_NLINE",117 "CC_SEND_WRITE_UPDT_DATA",118 "CC_SEND_CAS_BRDCAST_HEADER",119 "CC_SEND_CAS_BRDCAST_NLINE",120 "CC_SEND_CAS_UPDT_HEADER",121 "CC_SEND_CAS_UPDT_NLINE",122 "CC_SEND_CAS_UPDT_DATA",123 "CC_SEND_CAS_UPDT_DATA_HIGH"124 };125 const char *multi_ack_fsm_str[] =126 {127 "MULTI_ACK_IDLE",128 "MULTI_ACK_UPT_LOCK",129 "MULTI_ACK_UPT_CLEAR",130 "MULTI_ACK_WRITE_RSP",131 };132 const char *config_fsm_str[] =133 {134 "CONFIG_IDLE",135 "CONFIG_LOOP",136 "CONFIG_WAIT"137 "CONFIG_RSP",138 "CONFIG_DIR_REQ",139 "CONFIG_DIR_ACCESS",140 "CONFIG_IVT_LOCK",141 "CONFIG_BC_SEND",142 "CONFIG_INVAL_SEND"143 "CONFIG_HEAP_REQ",144 "CONFIG_HEAP_SCAN",145 "CONFIG_HEAP_LAST",146 "CONFIG_TRT_LOCK",147 "CONFIG_TRT_SET",148 "CONFIG_PUT_REQ"149 };150 const char *read_fsm_str[] =151 {152 "READ_IDLE",153 "READ_DIR_REQ",154 "READ_DIR_LOCK",155 "READ_IVT_LOCK",156 "READ_WAIT",157 "READ_DIR_HIT",158 "READ_HEAP_REQ",159 "READ_HEAP_LOCK",160 "READ_HEAP_WRITE",161 "READ_HEAP_ERASE",162 "READ_HEAP_LAST",163 "READ_RSP",164 "READ_TRT_LOCK",165 "READ_TRT_SET",166 "READ_TRT_REQ"167 };168 const char *write_fsm_str[] =169 {170 "WRITE_IDLE",171 "WRITE_NEXT",172 "WRITE_DIR_REQ",173 "WRITE_DIR_LOCK",174 "WRITE_IVT_LOCK_HIT_WB",175 "WRITE_DIR_HIT",176 "WRITE_UPT_LOCK",177 "WRITE_UPT_HEAP_LOCK",178 "WRITE_UPT_REQ",179 "WRITE_UPT_NEXT",180 "WRITE_UPT_DEC",181 "WRITE_RSP",182 "WRITE_MISS_IVT_LOCK",183 "WRITE_MISS_TRT_LOCK",184 "WRITE_MISS_TRT_DATA",185 "WRITE_MISS_TRT_SET",186 "WRITE_MISS_XRAM_REQ",187 "WRITE_BC_DIR_READ",188 "WRITE_BC_TRT_LOCK",189 "WRITE_BC_IVT_LOCK",190 "WRITE_BC_DIR_INVAL",191 "WRITE_BC_CC_SEND",192 "WRITE_BC_XRAM_REQ",193 "WRITE_WAIT"194 };195 const char *ixr_rsp_fsm_str[] =196 {197 "IXR_RSP_IDLE",198 "IXR_RSP_ACK",199 "IXR_RSP_TRT_ERASE",200 "IXR_RSP_TRT_READ"201 };202 const char *xram_rsp_fsm_str[] =203 {204 "XRAM_RSP_IDLE",205 "XRAM_RSP_TRT_COPY",206 "XRAM_RSP_TRT_DIRTY",207 "XRAM_RSP_DIR_LOCK",208 "XRAM_RSP_DIR_UPDT",209 "XRAM_RSP_DIR_RSP",210 "XRAM_RSP_IVT_LOCK",211 "XRAM_RSP_INVAL_WAIT",212 "XRAM_RSP_INVAL",213 "XRAM_RSP_WRITE_DIRTY",214 "XRAM_RSP_HEAP_REQ",215 "XRAM_RSP_HEAP_ERASE",216 "XRAM_RSP_HEAP_LAST",217 "XRAM_RSP_ERROR_ERASE",218 "XRAM_RSP_ERROR_RSP"219 };220 const char *ixr_cmd_fsm_str[] =221 {222 "IXR_CMD_READ_IDLE",223 "IXR_CMD_WRITE_IDLE",224 "IXR_CMD_CAS_IDLE",225 "IXR_CMD_XRAM_IDLE",226 "IXR_CMD_CLEANUP_IDLE",227 "IXR_CMD_CONFIG_IDLE",228 "IXR_CMD_READ_TRT",229 "IXR_CMD_WRITE_TRT",230 "IXR_CMD_CAS_TRT",231 "IXR_CMD_XRAM_TRT",232 "IXR_CMD_CLEANUP_TRT",233 "IXR_CMD_CONFIG_TRT",234 "IXR_CMD_READ_SEND",235 "IXR_CMD_WRITE_SEND",236 "IXR_CMD_CAS_SEND",237 "IXR_CMD_XRAM_SEND",238 "IXR_CMD_CLEANUP_DATA_SEND",239 "IXR_CMD_CONFIG_SEND"240 };241 const char *cas_fsm_str[] =242 {243 "CAS_IDLE",244 "CAS_DIR_REQ",245 "CAS_DIR_LOCK",246 "CAS_DIR_HIT_READ",247 "CAS_DIR_HIT_COMPARE",248 "CAS_DIR_HIT_WRITE",249 "CAS_UPT_LOCK",250 "CAS_UPT_HEAP_LOCK",251 "CAS_UPT_REQ",252 "CAS_UPT_NEXT",253 "CAS_BC_TRT_LOCK",254 "CAS_BC_IVT_LOCK",255 "CAS_BC_DIR_INVAL",256 "CAS_BC_CC_SEND",257 "CAS_BC_XRAM_REQ",258 "CAS_RSP_FAIL",259 "CAS_RSP_SUCCESS",260 "CAS_MISS_TRT_LOCK",261 "CAS_MISS_TRT_SET",262 "CAS_MISS_XRAM_REQ",263 "CAS_WAIT"264 };265 const char *cleanup_fsm_str[] =266 {267 "CLEANUP_IDLE",268 "CLEANUP_GET_NLINE",269 "CLEANUP_GET_DATA",270 "CLEANUP_DIR_REQ",271 "CLEANUP_DIR_LOCK",272 "CLEANUP_DIR_WRITE",273 "CLEANUP_IVT_LOCK_DATA",274 "CLEANUP_IVT_CLEAR_DATA",275 "CLEANUP_READ_RSP",276 "CLEANUP_HEAP_REQ",277 "CLEANUP_HEAP_LOCK",278 "CLEANUP_HEAP_SEARCH",279 "CLEANUP_HEAP_CLEAN",280 "CLEANUP_HEAP_FREE",281 "CLEANUP_IVT_LOCK",282 "CLEANUP_IVT_DECREMENT",283 "CLEANUP_IVT_CLEAR",284 "CLEANUP_WRITE_RSP",285 "CLEANUP_IXR_REQ",286 "CLEANUP_WAIT",287 "CLEANUP_SEND_CLACK"288 };289 const char *alloc_dir_fsm_str[] =290 {291 "ALLOC_DIR_RESET",292 "ALLOC_DIR_CONFIG",293 "ALLOC_DIR_READ",294 "ALLOC_DIR_WRITE",295 "ALLOC_DIR_CAS",296 "ALLOC_DIR_CLEANUP",297 "ALLOC_DIR_XRAM_RSP"298 };299 const char *alloc_trt_fsm_str[] =300 {301 "ALLOC_TRT_READ",302 "ALLOC_TRT_WRITE",303 "ALLOC_TRT_CAS",304 "ALLOC_TRT_XRAM_RSP",305 "ALLOC_TRT_IXR_RSP",306 "ALLOC_TRT_CLEANUP",307 "ALLOC_TRT_IXR_CMD",308 "ALLOC_TRT_CONFIG"309 };310 const char *alloc_upt_fsm_str[] =311 {312 "ALLOC_UPT_WRITE",313 "ALLOC_UPT_CAS",314 "ALLOC_UPT_MULTI_ACK"315 };316 const char *alloc_ivt_fsm_str[] =317 {318 "ALLOC_IVT_WRITE",319 "ALLOC_IVT_READ",320 "ALLOC_IVT_XRAM_RSP",321 "ALLOC_IVT_CLEANUP",322 "ALLOC_IVT_CAS",323 "ALLOC_IVT_CONFIG"324 };325 const char *alloc_heap_fsm_str[] =326 {327 "ALLOC_HEAP_RESET",328 "ALLOC_HEAP_READ",329 "ALLOC_HEAP_WRITE",330 "ALLOC_HEAP_CAS",331 "ALLOC_HEAP_CLEANUP",332 "ALLOC_HEAP_XRAM_RSP",333 "ALLOC_HEAP_CONFIG"334 };60 const char *tgt_cmd_fsm_str[] = 61 { 62 "TGT_CMD_IDLE", 63 "TGT_CMD_READ", 64 "TGT_CMD_WRITE", 65 "TGT_CMD_CAS", 66 "TGT_CMD_ERROR", 67 "TGT_CMD_CONFIG" 68 }; 69 const char *tgt_rsp_fsm_str[] = 70 { 71 "TGT_RSP_READ_IDLE", 72 "TGT_RSP_WRITE_IDLE", 73 "TGT_RSP_CAS_IDLE", 74 "TGT_RSP_XRAM_IDLE", 75 "TGT_RSP_MULTI_ACK_IDLE", 76 "TGT_RSP_CLEANUP_IDLE", 77 "TGT_RSP_CONFIG_IDLE", 78 "TGT_RSP_TGT_CMD_IDLE", 79 "TGT_RSP_READ", 80 "TGT_RSP_WRITE", 81 "TGT_RSP_CAS", 82 "TGT_RSP_XRAM", 83 "TGT_RSP_MULTI_ACK", 84 "TGT_RSP_CLEANUP", 85 "TGT_RSP_CONFIG", 86 "TGT_RSP_TGT_CMD" 87 }; 88 const char *cc_receive_fsm_str[] = 89 { 90 "CC_RECEIVE_IDLE", 91 "CC_RECEIVE_CLEANUP", 92 "CC_RECEIVE_CLEANUP_EOP", 93 "CC_RECEIVE_MULTI_ACK" 94 }; 95 const char *cc_send_fsm_str[] = 96 { 97 "CC_SEND_XRAM_RSP_IDLE", 98 "CC_SEND_WRITE_IDLE", 99 "CC_SEND_CAS_IDLE", 100 "CC_SEND_CONFIG_IDLE", 101 "CC_SEND_XRAM_RSP_BRDCAST_HEADER", 102 "CC_SEND_XRAM_RSP_BRDCAST_NLINE", 103 "CC_SEND_XRAM_RSP_INVAL_HEADER", 104 "CC_SEND_XRAM_RSP_INVAL_NLINE", 105 "CC_SEND_READ_NCC_INVAL_HEADER", 106 "CC_SEND_READ_NCC_INVAL_NLINE", 107 "CC_SEND_WRITE_NCC_INVAL_HEADER", 108 "CC_SEND_WRITE_NCC_INVAL_NLINE", 109 "CC_SEND_WRITE_BRDCAST_HEADER", 110 "CC_SEND_WRITE_BRDCAST_NLINE", 111 "CC_SEND_WRITE_UPDT_HEADER", 112 "CC_SEND_WRITE_UPDT_NLINE", 113 "CC_SEND_WRITE_UPDT_DATA", 114 "CC_SEND_CAS_BRDCAST_HEADER", 115 "CC_SEND_CAS_BRDCAST_NLINE", 116 "CC_SEND_CAS_UPDT_HEADER", 117 "CC_SEND_CAS_UPDT_NLINE", 118 "CC_SEND_CAS_UPDT_DATA", 119 "CC_SEND_CAS_UPDT_DATA_HIGH", 120 "CC_SEND_CONFIG_INVAL_HEADER", 121 "CC_SEND_CONFIG_INVAL_NLINE", 122 "CC_SEND_CONFIG_BRDCAST_HEADER", 123 "CC_SEND_CONFIG_BRDCAST_NLINE" 124 }; 125 const char *multi_ack_fsm_str[] = 126 { 127 "MULTI_ACK_IDLE", 128 "MULTI_ACK_UPT_LOCK", 129 "MULTI_ACK_UPT_CLEAR", 130 "MULTI_ACK_WRITE_RSP", 131 }; 132 const char *config_fsm_str[] = 133 { 134 "CONFIG_IDLE", 135 "CONFIG_LOOP", 136 "CONFIG_WAIT" 137 "CONFIG_RSP", 138 "CONFIG_DIR_REQ", 139 "CONFIG_DIR_ACCESS", 140 "CONFIG_IVT_LOCK", 141 "CONFIG_BC_SEND", 142 "CONFIG_INVAL_SEND" 143 "CONFIG_HEAP_REQ", 144 "CONFIG_HEAP_SCAN", 145 "CONFIG_HEAP_LAST", 146 "CONFIG_TRT_LOCK", 147 "CONFIG_TRT_SET", 148 "CONFIG_PUT_REQ" 149 }; 150 const char *read_fsm_str[] = 151 { 152 "READ_IDLE", 153 "READ_DIR_REQ", 154 "READ_DIR_LOCK", 155 "READ_IVT_LOCK", 156 "READ_WAIT", 157 "READ_DIR_HIT", 158 "READ_HEAP_REQ", 159 "READ_HEAP_LOCK", 160 "READ_HEAP_WRITE", 161 "READ_HEAP_ERASE", 162 "READ_HEAP_LAST", 163 "READ_RSP", 164 "READ_TRT_LOCK", 165 "READ_TRT_SET", 166 "READ_TRT_REQ" 167 }; 168 const char *write_fsm_str[] = 169 { 170 "WRITE_IDLE", 171 "WRITE_NEXT", 172 "WRITE_DIR_REQ", 173 "WRITE_DIR_LOCK", 174 "WRITE_IVT_LOCK_HIT_WB", 175 "WRITE_DIR_HIT", 176 "WRITE_UPT_LOCK", 177 "WRITE_UPT_HEAP_LOCK", 178 "WRITE_UPT_REQ", 179 "WRITE_UPT_NEXT", 180 "WRITE_UPT_DEC", 181 "WRITE_RSP", 182 "WRITE_MISS_IVT_LOCK", 183 "WRITE_MISS_TRT_LOCK", 184 "WRITE_MISS_TRT_DATA", 185 "WRITE_MISS_TRT_SET", 186 "WRITE_MISS_XRAM_REQ", 187 "WRITE_BC_DIR_READ", 188 "WRITE_BC_TRT_LOCK", 189 "WRITE_BC_IVT_LOCK", 190 "WRITE_BC_DIR_INVAL", 191 "WRITE_BC_CC_SEND", 192 "WRITE_BC_XRAM_REQ", 193 "WRITE_WAIT" 194 }; 195 const char *ixr_rsp_fsm_str[] = 196 { 197 "IXR_RSP_IDLE", 198 "IXR_RSP_ACK", 199 "IXR_RSP_TRT_ERASE", 200 "IXR_RSP_TRT_READ" 201 }; 202 const char *xram_rsp_fsm_str[] = 203 { 204 "XRAM_RSP_IDLE", 205 "XRAM_RSP_TRT_COPY", 206 "XRAM_RSP_TRT_DIRTY", 207 "XRAM_RSP_DIR_LOCK", 208 "XRAM_RSP_DIR_UPDT", 209 "XRAM_RSP_DIR_RSP", 210 "XRAM_RSP_IVT_LOCK", 211 "XRAM_RSP_INVAL_WAIT", 212 "XRAM_RSP_INVAL", 213 "XRAM_RSP_WRITE_DIRTY", 214 "XRAM_RSP_HEAP_REQ", 215 "XRAM_RSP_HEAP_ERASE", 216 "XRAM_RSP_HEAP_LAST", 217 "XRAM_RSP_ERROR_ERASE", 218 "XRAM_RSP_ERROR_RSP" 219 }; 220 const char *ixr_cmd_fsm_str[] = 221 { 222 "IXR_CMD_READ_IDLE", 223 "IXR_CMD_WRITE_IDLE", 224 "IXR_CMD_CAS_IDLE", 225 "IXR_CMD_XRAM_IDLE", 226 "IXR_CMD_CLEANUP_IDLE", 227 "IXR_CMD_CONFIG_IDLE", 228 "IXR_CMD_READ_TRT", 229 "IXR_CMD_WRITE_TRT", 230 "IXR_CMD_CAS_TRT", 231 "IXR_CMD_XRAM_TRT", 232 "IXR_CMD_CLEANUP_TRT", 233 "IXR_CMD_CONFIG_TRT", 234 "IXR_CMD_READ_SEND", 235 "IXR_CMD_WRITE_SEND", 236 "IXR_CMD_CAS_SEND", 237 "IXR_CMD_XRAM_SEND", 238 "IXR_CMD_CLEANUP_DATA_SEND", 239 "IXR_CMD_CONFIG_SEND" 240 }; 241 const char *cas_fsm_str[] = 242 { 243 "CAS_IDLE", 244 "CAS_DIR_REQ", 245 "CAS_DIR_LOCK", 246 "CAS_DIR_HIT_READ", 247 "CAS_DIR_HIT_COMPARE", 248 "CAS_DIR_HIT_WRITE", 249 "CAS_UPT_LOCK", 250 "CAS_UPT_HEAP_LOCK", 251 "CAS_UPT_REQ", 252 "CAS_UPT_NEXT", 253 "CAS_BC_TRT_LOCK", 254 "CAS_BC_IVT_LOCK", 255 "CAS_BC_DIR_INVAL", 256 "CAS_BC_CC_SEND", 257 "CAS_BC_XRAM_REQ", 258 "CAS_RSP_FAIL", 259 "CAS_RSP_SUCCESS", 260 "CAS_MISS_TRT_LOCK", 261 "CAS_MISS_TRT_SET", 262 "CAS_MISS_XRAM_REQ", 263 "CAS_WAIT" 264 }; 265 const char *cleanup_fsm_str[] = 266 { 267 "CLEANUP_IDLE", 268 "CLEANUP_GET_NLINE", 269 "CLEANUP_GET_DATA", 270 "CLEANUP_DIR_REQ", 271 "CLEANUP_DIR_LOCK", 272 "CLEANUP_DIR_WRITE", 273 "CLEANUP_IVT_LOCK_DATA", 274 "CLEANUP_IVT_CLEAR_DATA", 275 "CLEANUP_READ_RSP", 276 "CLEANUP_HEAP_REQ", 277 "CLEANUP_HEAP_LOCK", 278 "CLEANUP_HEAP_SEARCH", 279 "CLEANUP_HEAP_CLEAN", 280 "CLEANUP_HEAP_FREE", 281 "CLEANUP_IVT_LOCK", 282 "CLEANUP_IVT_DECREMENT", 283 "CLEANUP_IVT_CLEAR", 284 "CLEANUP_WRITE_RSP", 285 "CLEANUP_IXR_REQ", 286 "CLEANUP_WAIT", 287 "CLEANUP_SEND_CLACK" 288 }; 289 const char *alloc_dir_fsm_str[] = 290 { 291 "ALLOC_DIR_RESET", 292 "ALLOC_DIR_READ", 293 "ALLOC_DIR_WRITE", 294 "ALLOC_DIR_CAS", 295 "ALLOC_DIR_CLEANUP", 296 "ALLOC_DIR_XRAM_RSP", 297 "ALLOC_DIR_CONFIG" 298 }; 299 const char *alloc_trt_fsm_str[] = 300 { 301 "ALLOC_TRT_READ", 302 "ALLOC_TRT_WRITE", 303 "ALLOC_TRT_CAS", 304 "ALLOC_TRT_XRAM_RSP", 305 "ALLOC_TRT_IXR_RSP", 306 "ALLOC_TRT_CLEANUP", 307 "ALLOC_TRT_IXR_CMD", 308 "ALLOC_TRT_CONFIG" 309 }; 310 const char *alloc_upt_fsm_str[] = 311 { 312 "ALLOC_UPT_WRITE", 313 "ALLOC_UPT_CAS", 314 "ALLOC_UPT_MULTI_ACK" 315 }; 316 const char *alloc_ivt_fsm_str[] = 317 { 318 "ALLOC_IVT_WRITE", 319 "ALLOC_IVT_READ", 320 "ALLOC_IVT_XRAM_RSP", 321 "ALLOC_IVT_CLEANUP", 322 "ALLOC_IVT_CAS", 323 "ALLOC_IVT_CONFIG" 324 }; 325 const char *alloc_heap_fsm_str[] = 326 { 327 "ALLOC_HEAP_RESET", 328 "ALLOC_HEAP_READ", 329 "ALLOC_HEAP_WRITE", 330 "ALLOC_HEAP_CAS", 331 "ALLOC_HEAP_CLEANUP", 332 "ALLOC_HEAP_XRAM_RSP", 333 "ALLOC_HEAP_CONFIG" 334 }; 335 335 336 336 #define tmpl(x) \ 337 template<typename vci_param_int, \ 338 typename vci_param_ext, \ 339 size_t dspin_in_width, \ 340 size_t dspin_out_width> x \ 341 VciMemCache<vci_param_int, vci_param_ext, dspin_in_width, dspin_out_width> 342 343 using namespace soclib::common; 344 345 //////////////////////////////// 346 // Constructor 347 //////////////////////////////// 348 349 tmpl(/**/) ::VciMemCache( 350 sc_module_name name, 351 const MappingTable &mtp, // mapping table for direct network 352 const MappingTable &mtx, // mapping table for external network 353 const IntTab &srcid_x, // global index on external network 354 const IntTab &tgtid_d, // global index on direct network 355 const size_t cc_global_id, // global index on cc network 356 const size_t nways, // number of ways per set 357 const size_t nsets, // number of associative sets 358 const size_t nwords, // number of words in cache line 359 const size_t max_copies, // max number of copies in heap 360 const size_t heap_size, // number of heap entries 361 const size_t trt_lines, // number of TRT entries 362 const size_t upt_lines, // number of UPT entries 363 const size_t ivt_lines, // number of IVT entries 364 const size_t debug_start_cycle, 365 const bool debug_ok) 366 367 : soclib::caba::BaseModule(name), 368 369 p_clk( "p_clk" ), 370 p_resetn( "p_resetn" ), 371 p_vci_tgt( "p_vci_tgt" ), 372 p_vci_ixr( "p_vci_ixr" ), 373 p_dspin_p2m( "p_dspin_p2m" ), 374 p_dspin_m2p( "p_dspin_m2p" ), 375 p_dspin_clack( "p_dspin_clack" ), 376 377 m_seglist( mtp.getSegmentList(tgtid_d) ), 378 m_nseg( 0 ), 379 m_srcid_x( mtx.indexForId(srcid_x) ), 380 m_initiators( 1 << vci_param_int::S ), 381 m_heap_size( heap_size ), 382 m_ways( nways ), 383 m_sets( nsets ), 384 m_words( nwords ), 385 m_cc_global_id( cc_global_id ), 386 m_debug_start_cycle( debug_start_cycle ), 387 m_debug_ok( debug_ok ), 388 m_trt_lines(trt_lines), 389 m_trt(this->name(), trt_lines, nwords), 390 m_upt_lines(upt_lines), 391 m_upt(upt_lines), 392 m_ivt(ivt_lines), 393 m_cache_directory(nways, nsets, nwords, vci_param_int::N), 394 m_cache_data(nways, nsets, nwords), 395 m_heap(m_heap_size), 396 m_max_copies( max_copies ), 397 m_llsc_table(), 337 template<typename vci_param_int, \ 338 typename vci_param_ext, \ 339 size_t dspin_in_width, \ 340 size_t dspin_out_width> x \ 341 VciMemCache<vci_param_int, vci_param_ext, dspin_in_width, dspin_out_width> 342 343 using namespace soclib::common; 344 345 //////////////////////////////// 346 // Constructor 347 //////////////////////////////// 348 349 tmpl(/**/) ::VciMemCache( 350 sc_module_name name, 351 const MappingTable &mtp, // mapping table for direct network 352 const MappingTable &mtx, // mapping table for external network 353 const IntTab &srcid_x, // global index on external network 354 const IntTab &tgtid_d, // global index on direct network 355 const size_t cc_global_id, // global index on cc network 356 const size_t x_width, // number of x bits in platform 357 const size_t y_width, // number of x bits in platform 358 const size_t nways, // number of ways per set 359 const size_t nsets, // number of associative sets 360 const size_t nwords, // number of words in cache line 361 const size_t max_copies, // max number of copies in heap 362 const size_t heap_size, // number of heap entries 363 const size_t trt_lines, // number of TRT entries 364 const size_t upt_lines, // number of UPT entries 365 const size_t ivt_lines, // number of IVT entries 366 const size_t debug_start_cycle, 367 const bool debug_ok) 368 369 : soclib::caba::BaseModule(name), 370 371 p_clk( "p_clk" ), 372 p_resetn( "p_resetn" ), 373 p_vci_tgt( "p_vci_tgt" ), 374 p_vci_ixr( "p_vci_ixr" ), 375 p_dspin_p2m( "p_dspin_p2m" ), 376 p_dspin_m2p( "p_dspin_m2p" ), 377 p_dspin_clack( "p_dspin_clack" ), 378 379 m_seglist( mtp.getSegmentList(tgtid_d) ), 380 m_nseg( 0 ), 381 m_srcid_x( mtx.indexForId(srcid_x) ), 382 m_initiators( 1 << vci_param_int::S ), 383 m_heap_size( heap_size ), 384 m_ways( nways ), 385 m_sets( nsets ), 386 m_words( nwords ), 387 m_cc_global_id( cc_global_id ), 388 m_xwidth(x_width), 389 m_ywidth(y_width), 390 m_debug_start_cycle( debug_start_cycle ), 391 m_debug_ok( debug_ok ), 392 m_trt_lines(trt_lines), 393 m_trt(this->name(), trt_lines, nwords), 394 m_upt_lines(upt_lines), 395 m_upt(upt_lines), 396 m_ivt(ivt_lines), 397 m_cache_directory(nways, nsets, nwords, vci_param_int::N), 398 m_cache_data(nways, nsets, nwords), 399 m_heap(m_heap_size), 400 m_max_copies( max_copies ), 401 m_llsc_table(), 398 402 399 403 #define L2 soclib::common::uint32_log2 400 m_x(L2(m_words), 2),401 m_y(L2(m_sets), L2(m_words) + 2),402 m_z(vci_param_int::N - L2(m_sets) - L2(m_words) - 2, L2(m_sets) + L2(m_words) + 2),403 m_nline(vci_param_int::N - L2(m_words) - 2, L2(m_words) + 2),404 m_x(L2(m_words), 2), 405 m_y(L2(m_sets), L2(m_words) + 2), 406 m_z(vci_param_int::N - L2(m_sets) - L2(m_words) - 2, L2(m_sets) + L2(m_words) + 2), 407 m_nline(vci_param_int::N - L2(m_words) - 2, L2(m_words) + 2), 404 408 #undef L2 405 409 406 // XMIN(5 bits) / XMAX(5 bits) / YMIN(5 bits) / YMAX(5 bits) 407 // 0b00000 / 0b11111 / 0b00000 / 0b11111 408 m_broadcast_boundaries(0x7C1F), 409 410 r_tgt_cmd_fsm("r_tgt_cmd_fsm"), 411 412 // FIFOs 413 m_cmd_read_addr_fifo("m_cmd_read_addr_fifo", 4), 414 m_cmd_read_length_fifo("m_cmd_read_length_fifo", 4), 415 m_cmd_read_srcid_fifo("m_cmd_read_srcid_fifo", 4), 416 m_cmd_read_trdid_fifo("m_cmd_read_trdid_fifo", 4), 417 m_cmd_read_pktid_fifo("m_cmd_read_pktid_fifo", 4), 418 419 m_cmd_write_addr_fifo("m_cmd_write_addr_fifo",8), 420 m_cmd_write_eop_fifo("m_cmd_write_eop_fifo",8), 421 m_cmd_write_srcid_fifo("m_cmd_write_srcid_fifo",8), 422 m_cmd_write_trdid_fifo("m_cmd_write_trdid_fifo",8), 423 m_cmd_write_pktid_fifo("m_cmd_write_pktid_fifo",8), 424 m_cmd_write_data_fifo("m_cmd_write_data_fifo",8), 425 m_cmd_write_be_fifo("m_cmd_write_be_fifo",8), 426 427 m_cmd_cas_addr_fifo("m_cmd_cas_addr_fifo",4), 428 m_cmd_cas_eop_fifo("m_cmd_cas_eop_fifo",4), 429 m_cmd_cas_srcid_fifo("m_cmd_cas_srcid_fifo",4), 430 m_cmd_cas_trdid_fifo("m_cmd_cas_trdid_fifo",4), 431 m_cmd_cas_pktid_fifo("m_cmd_cas_pktid_fifo",4), 432 m_cmd_cas_wdata_fifo("m_cmd_cas_wdata_fifo",4), 433 434 m_cc_receive_to_cleanup_fifo("m_cc_receive_to_cleanup_fifo", 4), 435 m_cc_receive_to_multi_ack_fifo("m_cc_receive_to_multi_ack_fifo", 4), 436 437 r_config_fsm( "r_config_fsm" ), 438 439 m_config_to_cc_send_inst_fifo( "m_config_to_cc_send_inst_fifo", 8 ), 440 m_config_to_cc_send_srcid_fifo( "m_config_to_cc_send_srcid_fifo", 8 ), 441 442 r_read_fsm( "r_read_fsm" ), 443 444 r_write_fsm( "r_write_fsm" ), 445 446 m_write_to_cc_send_inst_fifo("m_write_to_cc_send_inst_fifo",8), 447 m_write_to_cc_send_srcid_fifo("m_write_to_cc_send_srcid_fifo",8), 448 449 r_multi_ack_fsm("r_multi_ack_fsm"), 450 451 r_cleanup_fsm("r_cleanup_fsm"), 452 453 r_cas_fsm("r_cas_fsm"), 454 455 m_cas_to_cc_send_inst_fifo("m_cas_to_cc_send_inst_fifo",8), 456 m_cas_to_cc_send_srcid_fifo("m_cas_to_cc_send_srcid_fifo",8), 457 458 r_ixr_rsp_fsm("r_ixr_rsp_fsm"), 459 r_xram_rsp_fsm("r_xram_rsp_fsm"), 460 461 m_xram_rsp_to_cc_send_inst_fifo("m_xram_rsp_to_cc_send_inst_fifo",8), 462 m_xram_rsp_to_cc_send_srcid_fifo("m_xram_rsp_to_cc_send_srcid_fifo",8), 463 464 r_ixr_cmd_fsm("r_ixr_cmd_fsm"), 465 466 r_tgt_rsp_fsm("r_tgt_rsp_fsm"), 467 468 r_cc_send_fsm("r_cc_send_fsm"), 469 r_cc_receive_fsm("r_cc_receive_fsm"), 470 471 r_alloc_dir_fsm("r_alloc_dir_fsm"), 472 r_alloc_dir_reset_cpt("r_alloc_dir_reset_cpt"), 473 r_alloc_trt_fsm("r_alloc_trt_fsm"), 474 r_alloc_upt_fsm("r_alloc_upt_fsm"), 475 r_alloc_ivt_fsm("r_alloc_ivt_fsm"), 476 r_alloc_heap_fsm("r_alloc_heap_fsm"), 477 r_alloc_heap_reset_cpt("r_alloc_heap_reset_cpt") 478 { 479 std::cout << " - Building VciMemCache : " << name << std::endl; 480 481 assert(IS_POW_OF_2(nsets)); 482 assert(IS_POW_OF_2(nwords)); 483 assert(IS_POW_OF_2(nways)); 484 assert(nsets); 485 assert(nwords); 486 assert(nways); 487 488 // check Transaction table size 489 assert((uint32_log2(trt_lines) <= vci_param_ext::T) and 490 "MEMC ERROR : Need more bits for VCI TRDID field"); 491 492 // check internal and external data width 493 assert( (vci_param_int::B == 4 ) and 494 "MEMC ERROR : VCI internal data width must be 32 bits"); 495 496 assert( (vci_param_ext::B == 8) and 497 "MEMC ERROR : VCI external data width must be 64 bits"); 498 499 // Check coherence between internal & external addresses 500 assert( (vci_param_int::N == vci_param_ext::N) and 501 "MEMC ERROR : VCI internal & external addresses must have the same width"); 502 503 // Get the segments associated to the MemCache 504 std::list<soclib::common::Segment>::iterator seg; 505 size_t i = 0; 506 507 for(seg = m_seglist.begin(); seg != m_seglist.end() ; seg++) 410 // XMIN(5 bits) / XMAX(5 bits) / YMIN(5 bits) / YMAX(5 bits) 411 // 0b00000 / 0b11111 / 0b00000 / 0b11111 412 m_broadcast_boundaries(0x7C1F), 413 414 r_tgt_cmd_fsm("r_tgt_cmd_fsm"), 415 416 // FIFOs 417 m_cmd_read_addr_fifo("m_cmd_read_addr_fifo", 4), 418 m_cmd_read_length_fifo("m_cmd_read_length_fifo", 4), 419 m_cmd_read_srcid_fifo("m_cmd_read_srcid_fifo", 4), 420 m_cmd_read_trdid_fifo("m_cmd_read_trdid_fifo", 4), 421 m_cmd_read_pktid_fifo("m_cmd_read_pktid_fifo", 4), 422 423 m_cmd_write_addr_fifo("m_cmd_write_addr_fifo",8), 424 m_cmd_write_eop_fifo("m_cmd_write_eop_fifo",8), 425 m_cmd_write_srcid_fifo("m_cmd_write_srcid_fifo",8), 426 m_cmd_write_trdid_fifo("m_cmd_write_trdid_fifo",8), 427 m_cmd_write_pktid_fifo("m_cmd_write_pktid_fifo",8), 428 m_cmd_write_data_fifo("m_cmd_write_data_fifo",8), 429 m_cmd_write_be_fifo("m_cmd_write_be_fifo",8), 430 431 m_cmd_cas_addr_fifo("m_cmd_cas_addr_fifo",4), 432 m_cmd_cas_eop_fifo("m_cmd_cas_eop_fifo",4), 433 m_cmd_cas_srcid_fifo("m_cmd_cas_srcid_fifo",4), 434 m_cmd_cas_trdid_fifo("m_cmd_cas_trdid_fifo",4), 435 m_cmd_cas_pktid_fifo("m_cmd_cas_pktid_fifo",4), 436 m_cmd_cas_wdata_fifo("m_cmd_cas_wdata_fifo",4), 437 438 m_cc_receive_to_cleanup_fifo("m_cc_receive_to_cleanup_fifo", 4), 439 m_cc_receive_to_multi_ack_fifo("m_cc_receive_to_multi_ack_fifo", 4), 440 441 r_config_fsm( "r_config_fsm" ), 442 443 m_config_to_cc_send_inst_fifo( "m_config_to_cc_send_inst_fifo", 8 ), 444 m_config_to_cc_send_srcid_fifo( "m_config_to_cc_send_srcid_fifo", 8 ), 445 446 r_read_fsm( "r_read_fsm" ), 447 448 r_write_fsm( "r_write_fsm" ), 449 450 m_write_to_cc_send_inst_fifo("m_write_to_cc_send_inst_fifo",8), 451 m_write_to_cc_send_srcid_fifo("m_write_to_cc_send_srcid_fifo",8), 452 453 r_multi_ack_fsm("r_multi_ack_fsm"), 454 455 r_cleanup_fsm("r_cleanup_fsm"), 456 457 r_cas_fsm("r_cas_fsm"), 458 459 m_cas_to_cc_send_inst_fifo("m_cas_to_cc_send_inst_fifo",8), 460 m_cas_to_cc_send_srcid_fifo("m_cas_to_cc_send_srcid_fifo",8), 461 462 r_ixr_rsp_fsm("r_ixr_rsp_fsm"), 463 r_xram_rsp_fsm("r_xram_rsp_fsm"), 464 465 m_xram_rsp_to_cc_send_inst_fifo("m_xram_rsp_to_cc_send_inst_fifo",8), 466 m_xram_rsp_to_cc_send_srcid_fifo("m_xram_rsp_to_cc_send_srcid_fifo",8), 467 468 r_ixr_cmd_fsm("r_ixr_cmd_fsm"), 469 470 r_tgt_rsp_fsm("r_tgt_rsp_fsm"), 471 472 r_cc_send_fsm("r_cc_send_fsm"), 473 r_cc_receive_fsm("r_cc_receive_fsm"), 474 475 r_alloc_dir_fsm("r_alloc_dir_fsm"), 476 r_alloc_dir_reset_cpt("r_alloc_dir_reset_cpt"), 477 r_alloc_trt_fsm("r_alloc_trt_fsm"), 478 r_alloc_upt_fsm("r_alloc_upt_fsm"), 479 r_alloc_ivt_fsm("r_alloc_ivt_fsm"), 480 r_alloc_heap_fsm("r_alloc_heap_fsm"), 481 r_alloc_heap_reset_cpt("r_alloc_heap_reset_cpt") 482 #if MONITOR_MEMCACHE_FSM == 1 483 , 484 p_read_fsm("p_read_fsm"), 485 p_write_fsm("p_write_fsm"), 486 p_xram_rsp_fsm("p_xram_rsp_fsm"), 487 p_cas_fsm("p_cas_fsm"), 488 p_cleanup_fsm("p_cleanup_fsm"), 489 p_config_fsm("p_config_fsm"), 490 p_alloc_heap_fsm("p_alloc_heap_fsm"), 491 p_alloc_dir_fsm("p_alloc_dir_fsm"), 492 p_alloc_trt_fsm("p_alloc_trt_fsm"), 493 p_alloc_upt_fsm("p_alloc_upt_fsm"), 494 p_alloc_ivt_fsm("p_alloc_ivt_fsm"), 495 p_tgt_cmd_fsm("p_tgt_cmd_fsm"), 496 p_tgt_rsp_fsm("p_tgt_rsp_fsm"), 497 p_ixr_cmd_fsm("p_ixr_cmd_fsm"), 498 p_ixr_rsp_fsm("p_ixr_rsp_fsm"), 499 p_cc_send_fsm("p_cc_send_fsm"), 500 p_cc_receive_fsm("p_cc_receive_fsm"), 501 p_multi_ack_fsm("p_multi_ack_fsm") 502 #endif 503 { 504 std::cout << " - Building VciMemCache : " << name << std::endl; 505 506 assert(IS_POW_OF_2(nsets)); 507 assert(IS_POW_OF_2(nwords)); 508 assert(IS_POW_OF_2(nways)); 509 assert(nsets); 510 assert(nwords); 511 assert(nways); 512 513 // check Transaction table size 514 assert((uint32_log2(trt_lines) <= vci_param_ext::T) and 515 "MEMC ERROR : Need more bits for VCI TRDID field"); 516 517 // check internal and external data width 518 assert( (vci_param_int::B == 4 ) and 519 "MEMC ERROR : VCI internal data width must be 32 bits"); 520 521 assert( (vci_param_ext::B == 8) and 522 "MEMC ERROR : VCI external data width must be 64 bits"); 523 524 // Check coherence between internal & external addresses 525 assert( (vci_param_int::N == vci_param_ext::N) and 526 "MEMC ERROR : VCI internal & external addresses must have the same width"); 527 528 // Get the segments associated to the MemCache 529 std::list<soclib::common::Segment>::iterator seg; 530 size_t i = 0; 531 532 for(seg = m_seglist.begin(); seg != m_seglist.end() ; seg++) 533 { 534 std::cout << " => segment " << seg->name() 535 << " / base = " << std::hex << seg->baseAddress() 536 << " / size = " << seg->size() << std::endl; 537 m_nseg++; 538 } 539 540 m_seg = new soclib::common::Segment*[m_nseg]; 541 542 for(seg = m_seglist.begin() ; seg != m_seglist.end() ; seg++) 543 { 544 if ( seg->special() ) m_seg_config = i; 545 m_seg[i] = & (*seg); 546 i++; 547 } 548 549 // Allocation for IXR_RSP FSM 550 r_ixr_rsp_to_xram_rsp_rok = new sc_signal<bool>[m_trt_lines]; 551 552 // Allocation for XRAM_RSP FSM 553 r_xram_rsp_victim_data = new sc_signal<data_t>[nwords]; 554 r_xram_rsp_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 555 //r_xram_rsp_to_ixr_cmd_data = new sc_signal<data_t>[nwords]; 556 557 // Allocation for READ FSM 558 r_read_data = new sc_signal<data_t>[nwords]; 559 r_read_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 560 561 // Allocation for WRITE FSM 562 r_write_data = new sc_signal<data_t>[nwords]; 563 r_write_be = new sc_signal<be_t>[nwords]; 564 r_write_to_cc_send_data = new sc_signal<data_t>[nwords]; 565 r_write_to_cc_send_be = new sc_signal<be_t>[nwords]; 566 //r_write_to_ixr_cmd_data = new sc_signal<data_t>[nwords]; 567 568 // Allocation for CAS FSM 569 //r_cas_to_ixr_cmd_data = new sc_signal<data_t>[nwords]; 570 r_cas_data = new sc_signal<data_t>[nwords]; 571 r_cas_rdata = new sc_signal<data_t>[2]; 572 573 // Allocation for IXR_CMD FSM 574 r_ixr_cmd_wdata = new sc_signal<data_t>[nwords]; 575 576 // Allocation for ODCCP 577 r_cleanup_data = new sc_signal<data_t>[nwords]; 578 r_cleanup_to_ixr_cmd_data = new sc_signal<data_t>[nwords]; 579 r_cleanup_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 580 r_cleanup_old_data = new sc_signal<data_t>[nwords]; 581 582 // Allocation for debug 583 m_debug_previous_data = new data_t[nwords]; 584 m_debug_data = new data_t[nwords]; 585 586 SC_METHOD(transition); 587 dont_initialize(); 588 sensitive << p_clk.pos(); 589 590 SC_METHOD(genMoore); 591 dont_initialize(); 592 sensitive << p_clk.neg(); 593 } // end constructor 594 595 ///////////////////////////////////////////////////// 596 tmpl(void) ::cache_monitor(addr_t addr) 597 ///////////////////////////////////////////////////// 508 598 { 509 std::cout << " => segment " << seg->name() 510 << " / base = " << std::hex << seg->baseAddress() 511 << " / size = " << seg->size() << std::endl; 512 m_nseg++; 599 size_t way = 0; 600 size_t set = 0; 601 DirectoryEntry entry = m_cache_directory.read_neutral(addr, &way, &set ); 602 603 bool data_change = false; 604 605 if ( entry.valid ) 606 { 607 for ( size_t word = 0 ; word<m_words ; word++ ) 608 { 609 m_debug_data[word] = m_cache_data.read(way, set, word); 610 if ( m_debug_previous_valid and 611 (m_debug_data[word] != m_debug_previous_data[word]) ) 612 { 613 data_change = true; 614 } 615 } 616 } 617 618 // print values if any change 619 if ( (entry.valid != m_debug_previous_valid) or 620 (entry.valid and (entry.count != m_debug_previous_count)) or 621 (entry.valid and (entry.dirty != m_debug_previous_dirty)) or data_change ) 622 { 623 std::cout << "Monitor MEMC " << name() 624 << " at cycle " << std::dec << m_cpt_cycles 625 << " for address " << std::hex << addr 626 << " / VAL = " << std::dec << entry.valid 627 << " / WAY = " << way 628 << " / COUNT = " << entry.count 629 << " / DIRTY = " << entry.dirty 630 << " / DATA_CHANGE = " << data_change 631 << std::endl; 632 std::cout << std::hex << " /0:" << m_debug_data[0] 633 << "/1:" << m_debug_data[1] 634 << "/2:" << m_debug_data[2] 635 << "/3:" << m_debug_data[3] 636 << "/4:" << m_debug_data[4] 637 << "/5:" << m_debug_data[5] 638 << "/6:" << m_debug_data[6] 639 << "/7:" << m_debug_data[7] 640 << "/8:" << m_debug_data[8] 641 << "/9:" << m_debug_data[9] 642 << "/A:" << m_debug_data[10] 643 << "/B:" << m_debug_data[11] 644 << "/C:" << m_debug_data[12] 645 << "/D:" << m_debug_data[13] 646 << "/E:" << m_debug_data[14] 647 << "/F:" << m_debug_data[15] 648 << std::endl; 649 } 650 m_debug_previous_count = entry.count; 651 m_debug_previous_valid = entry.valid; 652 m_debug_previous_dirty = entry.dirty; 653 for( size_t word=0 ; word<m_words ; word++ ) 654 m_debug_previous_data[word] = m_debug_data[word]; 513 655 } 514 656 515 m_seg = new soclib::common::Segment*[m_nseg]; 516 517 for(seg = m_seglist.begin() ; seg != m_seglist.end() ; seg++) 657 658 ///////////////////////////////////////////////////// 659 tmpl(uint32_t)::req_distance(uint32_t req_srcid) 660 ///////////////////////////////////////////////////// 518 661 { 519 if ( seg->special() ) m_seg_config = i; 520 m_seg[i] = & (*seg); 521 i++; 662 const uint32_t srcid_width = vci_param_int::S; 663 uint8_t self_x_srcid = m_cc_global_id >> (srcid_width - m_xwidth); 664 uint8_t self_y_srcid = (m_cc_global_id >> (srcid_width - m_ywidth)) & ((1 << m_xwidth) - 1); 665 666 uint8_t x_srcid = req_srcid >> (srcid_width - m_xwidth); 667 uint8_t y_srcid = (req_srcid >> (srcid_width - m_ywidth - m_xwidth)) & ((1 << m_xwidth) - 1); 668 return abs(self_x_srcid - x_srcid) + abs(self_y_srcid - y_srcid); 522 669 } 523 670 524 // Allocation for IXR_RSP FSM 525 r_ixr_rsp_to_xram_rsp_rok = new sc_signal<bool>[m_trt_lines]; 526 527 // Allocation for XRAM_RSP FSM 528 r_xram_rsp_victim_data = new sc_signal<data_t>[nwords]; 529 r_xram_rsp_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 530 //r_xram_rsp_to_ixr_cmd_data = new sc_signal<data_t>[nwords]; 531 532 // Allocation for READ FSM 533 r_read_data = new sc_signal<data_t>[nwords]; 534 r_read_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 535 536 // Allocation for WRITE FSM 537 r_write_data = new sc_signal<data_t>[nwords]; 538 r_write_be = new sc_signal<be_t>[nwords]; 539 r_write_to_cc_send_data = new sc_signal<data_t>[nwords]; 540 r_write_to_cc_send_be = new sc_signal<be_t>[nwords]; 541 //r_write_to_ixr_cmd_data = new sc_signal<data_t>[nwords]; 542 543 // Allocation for CAS FSM 544 //r_cas_to_ixr_cmd_data = new sc_signal<data_t>[nwords]; 545 r_cas_data = new sc_signal<data_t>[nwords]; 546 r_cas_rdata = new sc_signal<data_t>[2]; 547 548 // Allocation for IXR_CMD FSM 549 r_ixr_cmd_wdata = new sc_signal<data_t>[nwords]; 550 551 // Allocation for ODCCP 552 r_cleanup_data = new sc_signal<data_t>[nwords]; 553 r_cleanup_to_ixr_cmd_data = new sc_signal<data_t>[nwords]; 554 r_cleanup_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 555 r_cleanup_old_data = new sc_signal<data_t>[nwords]; 556 557 // Allocation for debug 558 m_debug_previous_data = new data_t[nwords]; 559 m_debug_data = new data_t[nwords]; 560 561 SC_METHOD(transition); 562 dont_initialize(); 563 sensitive << p_clk.pos(); 564 565 SC_METHOD(genMoore); 566 dont_initialize(); 567 sensitive << p_clk.neg(); 568 } // end constructor 569 570 ///////////////////////////////////////////////////// 571 tmpl(void) ::cache_monitor(addr_t addr) 572 ///////////////////////////////////////////////////// 573 { 574 size_t way = 0; 575 size_t set = 0; 576 DirectoryEntry entry = m_cache_directory.read_neutral(addr, &way, &set ); 577 578 bool data_change = false; 579 580 if ( entry.valid ) 581 { 582 for ( size_t word = 0 ; word<m_words ; word++ ) 583 { 584 m_debug_data[word] = m_cache_data.read(way, set, word); 585 if ( m_debug_previous_valid and 586 (m_debug_data[word] != m_debug_previous_data[word]) ) 587 { 588 data_change = true; 589 } 590 } 591 } 592 593 // print values if any change 594 if ( (entry.valid != m_debug_previous_valid) or 595 (entry.valid and (entry.count != m_debug_previous_count)) or 596 (entry.valid and (entry.dirty != m_debug_previous_dirty)) or data_change ) 671 672 ///////////////////////////////////////////////////// 673 tmpl(bool)::is_local_req(uint32_t req_srcid) 674 ///////////////////////////////////////////////////// 597 675 { 598 std::cout << "Monitor MEMC " << name() 599 << " at cycle " << std::dec << m_cpt_cycles 600 << " for address " << std::hex << addr 601 << " / VAL = " << std::dec << entry.valid 602 << " / WAY = " << way 603 << " / COUNT = " << entry.count 604 << " / DIRTY = " << entry.dirty 605 << " / DATA_CHANGE = " << data_change 606 << std::endl; 607 std::cout << std::hex << " /0:" << m_debug_data[0] 608 << "/1:" << m_debug_data[1] 609 << "/2:" << m_debug_data[2] 610 << "/3:" << m_debug_data[3] 611 << "/4:" << m_debug_data[4] 612 << "/5:" << m_debug_data[5] 613 << "/6:" << m_debug_data[6] 614 << "/7:" << m_debug_data[7] 615 << "/8:" << m_debug_data[8] 616 << "/9:" << m_debug_data[9] 617 << "/A:" << m_debug_data[10] 618 << "/B:" << m_debug_data[11] 619 << "/C:" << m_debug_data[12] 620 << "/D:" << m_debug_data[13] 621 << "/E:" << m_debug_data[14] 622 << "/F:" << m_debug_data[15] 623 << std::endl; 676 return req_distance(req_srcid) == 0; 624 677 } 625 m_debug_previous_count = entry.count; 626 m_debug_previous_valid = entry.valid; 627 m_debug_previous_dirty = entry.dirty; 628 for( size_t word=0 ; word<m_words ; word++ ) 629 m_debug_previous_data[word] = m_debug_data[word]; 630 } 631 632 ////////////////////////////////////////////////// 633 tmpl(void) ::print_trace() 634 ////////////////////////////////////////////////// 635 { 636 std::cout << "MEMC " << name() << std::endl; 637 std::cout << " " << tgt_cmd_fsm_str[r_tgt_cmd_fsm.read()] 678 679 680 ////////////////////////////////////////////////// 681 tmpl(void) ::print_trace() 682 ////////////////////////////////////////////////// 683 { 684 std::cout << "MEMC " << name() << std::endl; 685 std::cout << " " << tgt_cmd_fsm_str[r_tgt_cmd_fsm.read()] 638 686 << " | " << tgt_rsp_fsm_str[r_tgt_rsp_fsm.read()] 639 687 << " | " << read_fsm_str[r_read_fsm.read()] … … 642 690 << " | " << config_fsm_str[r_config_fsm.read()] 643 691 << " | " << cleanup_fsm_str[r_cleanup_fsm.read()] << std::endl; 644 std::cout << " " << cc_send_fsm_str[r_cc_send_fsm.read()]692 std::cout << " " << cc_send_fsm_str[r_cc_send_fsm.read()] 645 693 << " | " << cc_receive_fsm_str[r_cc_receive_fsm.read()] 646 694 << " | " << multi_ack_fsm_str[r_multi_ack_fsm.read()] … … 648 696 << " | " << ixr_rsp_fsm_str[r_ixr_rsp_fsm.read()] 649 697 << " | " << xram_rsp_fsm_str[r_xram_rsp_fsm.read()] << std::endl; 650 std::cout << " " << alloc_dir_fsm_str[r_alloc_dir_fsm.read()]698 std::cout << " " << alloc_dir_fsm_str[r_alloc_dir_fsm.read()] 651 699 << " | " << alloc_trt_fsm_str[r_alloc_trt_fsm.read()] 652 700 << " | " << alloc_upt_fsm_str[r_alloc_upt_fsm.read()] 653 701 << " | " << alloc_ivt_fsm_str[r_alloc_ivt_fsm.read()] 654 702 << " | " << alloc_heap_fsm_str[r_alloc_heap_fsm.read()] << std::endl; 655 } 656 657 ///////////////////////////////////////// 658 tmpl(void) ::clear_stats() 659 ///////////////////////////////////////// 660 { 661 662 m_cpt_cycles = 0; 663 m_cpt_read = 0; 664 m_cpt_read_miss = 0; 665 m_cpt_write = 0; 666 m_cpt_write_miss = 0; 667 m_cpt_write_cells = 0; 668 m_cpt_write_dirty = 0; 669 m_cpt_update = 0; 670 m_cpt_update_mult = 0; 671 m_cpt_inval_brdcast = 0; 672 m_cpt_inval = 0; 673 m_cpt_inval_mult = 0; 674 m_cpt_cleanup = 0; 675 m_cpt_ll = 0; 676 m_cpt_sc = 0; 677 m_cpt_cas = 0; 678 m_cpt_trt_full = 0; 679 m_cpt_trt_rb = 0; 680 m_cpt_dir_unused = 0; 681 m_cpt_ivt_unused = 0; 682 m_cpt_upt_unused = 0; 683 m_cpt_heap_unused = 0; 684 m_cpt_trt_unused = 0; 685 m_cpt_read_fsm_n_dir_lock = 0; 686 m_cpt_read_fsm_dir_lock = 0; 687 m_cpt_read_fsm_dir_used = 0; 688 m_cpt_read_fsm_trt_lock = 0; 689 m_cpt_read_fsm_heap_lock = 0; 690 m_cpt_write_fsm_dir_lock = 0; 691 m_cpt_write_fsm_n_dir_lock = 0; 692 m_cpt_write_fsm_upt_lock = 0; 693 m_cpt_write_fsm_heap_lock = 0; 694 m_cpt_write_fsm_dir_used = 0; 695 m_cpt_write_fsm_trt_lock = 0; 696 m_cpt_cas_fsm_n_dir_lock = 0; 697 m_cpt_cas_fsm_dir_lock = 0; 698 m_cpt_cas_fsm_upt_lock = 0; 699 m_cpt_cas_fsm_heap_lock = 0; 700 m_cpt_cas_fsm_trt_lock = 0; 701 m_cpt_cas_fsm_dir_used = 0; 702 m_cpt_xram_rsp_fsm_n_dir_lock = 0; 703 m_cpt_xram_rsp_fsm_dir_lock = 0; 704 m_cpt_xram_rsp_fsm_trt_lock = 0; 705 m_cpt_xram_rsp_fsm_upt_lock = 0; 706 m_cpt_xram_rsp_fsm_heap_lock = 0; 707 m_cpt_xram_rsp_fsm_dir_used = 0; 708 m_cpt_cleanup_fsm_dir_lock = 0; 709 m_cpt_cleanup_fsm_n_dir_lock = 0; 710 m_cpt_cleanup_fsm_heap_lock = 0; 711 m_cpt_cleanup_fsm_ivt_lock = 0; 712 m_cpt_cleanup_fsm_dir_used = 0; 713 m_cpt_ixr_fsm_trt_lock = 0; 714 m_cpt_multi_ack_fsm_upt_lock = 0; 715 m_cpt_read_data_unc = 0; 716 m_cpt_read_data_miss_CC = 0; 717 m_cpt_read_ins_unc = 0; 718 m_cpt_read_ins_miss = 0; 719 m_cpt_read_ll_CC = 0; 720 m_cpt_read_data_miss_NCC = 0; 721 m_cpt_read_ll_NCC = 0; 722 m_cpt_read_WTF = 0; 723 m_cpt_cleanup_data = 0; 724 m_cpt_ncc_to_cc_read = 0; 725 m_cpt_ncc_to_cc_write = 0; 726 m_cpt_ncc_to_cc = 0; 727 } 728 729 ///////////////////////////////////////// 730 tmpl(void) ::print_stats() 731 ///////////////////////////////////////// 732 { 733 std::cout << "----------------------------------" << std::dec << std::endl; 734 std::cout 735 << "MEM_CACHE " << name() << " / Time = " << m_cpt_cycles << std::endl 736 << "- READ RATE = " << (double) m_cpt_read/m_cpt_cycles << std::endl 737 << "- READ TOTAL = " << m_cpt_read << std::endl 738 << "- READ MISS RATE = " << (double) m_cpt_read_miss/m_cpt_read << std::endl 739 << "- WRITE RATE = " << (double) m_cpt_write/m_cpt_cycles << std::endl 740 << "- WRITE TOTAL = " << m_cpt_write << std::endl 741 << "- WRITE MISS RATE = " << (double) m_cpt_write_miss/m_cpt_write << std::endl 742 << "- WRITE BURST LENGTH = " << (double) m_cpt_write_cells/m_cpt_write << std::endl 743 << "- WRITE BURST TOTAL = " << m_cpt_write_cells << std::endl 744 << "- REQUESTS TRT FULL = " << m_cpt_trt_full << std::endl 745 << "- READ TRT BLOKED HIT = " << m_cpt_trt_rb << std::endl 746 << "- UPDATE RATE = " << (double) m_cpt_update/m_cpt_cycles << std::endl 747 << "- UPDATE ARITY = " << (double) m_cpt_update_mult/m_cpt_update << std::endl 748 << "- INVAL MULTICAST RATE = " << (double)(m_cpt_inval-m_cpt_inval_brdcast) /m_cpt_cycles << std::endl 749 << "- INVAL MULTICAST ARITY = " << (double) m_cpt_inval_mult/ (m_cpt_inval-m_cpt_inval_brdcast) << std::endl 750 << "- INVAL BROADCAST RATE = " << (double) m_cpt_inval_brdcast/m_cpt_cycles << std::endl 751 << "- SAVE DIRTY RATE = " << (double) m_cpt_write_dirty/m_cpt_cycles << std::endl 752 << "- CLEANUP RATE = " << (double) m_cpt_cleanup/m_cpt_cycles << std::endl 753 << "- LL RATE = " << (double) m_cpt_ll/m_cpt_cycles << std::endl 754 << "- SC RATE = " << (double) m_cpt_sc/m_cpt_cycles << std::endl 755 << "- CAS RATE = " << (double) m_cpt_cas/m_cpt_cycles << std::endl << std::endl 756 757 << "- WAIT DIR LOCK in READ_FSM = " << (double) m_cpt_read_fsm_dir_lock/m_cpt_read_fsm_n_dir_lock << std::endl 758 << "- NB CYCLES IN DIR LOCK in READ_FSM = " << (double) m_cpt_read_fsm_dir_used/m_cpt_read_fsm_n_dir_lock << std::endl 759 << "- WAIT DIR LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_dir_lock/m_cpt_write_fsm_n_dir_lock << std::endl 760 << "- NB CYCLES IN DIR LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_dir_used/m_cpt_write_fsm_n_dir_lock << std::endl 761 << "- WAIT DIR LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_dir_lock/m_cpt_xram_rsp_fsm_n_dir_lock << std::endl 762 << "- NB CYCLES IN DIR LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_dir_used/m_cpt_xram_rsp_fsm_n_dir_lock << std::endl 763 << "- WAIT DIR LOCK in CLEANUP_FSM = " << (double) m_cpt_cleanup_fsm_dir_lock/m_cpt_cleanup_fsm_n_dir_lock << std::endl 764 << "- NB CYCLES IN DIR LOCK in CLEANUP_FSM = " << (double) m_cpt_cleanup_fsm_dir_used/m_cpt_cleanup_fsm_n_dir_lock << std::endl 765 << "- WAIT DIR LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_dir_lock/m_cpt_cas_fsm_n_dir_lock << std::endl 766 << "- NB CYCLES IN LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_dir_used/m_cpt_cas_fsm_n_dir_lock << std::endl 767 << "- DIR UNUSED RATE = " << (double) m_cpt_dir_unused/m_cpt_cycles << std::endl << std::endl 768 769 << "- WAIT TRT LOCK in READ_FSM = " << (double) m_cpt_read_fsm_trt_lock/m_cpt_read_fsm_n_trt_lock << std::endl 770 << "- NB CYCLES IN TRT LOCK in READ_FSM = " << (double) m_cpt_read_fsm_trt_used/m_cpt_read_fsm_n_trt_lock << std::endl 771 << "- WAIT TRT LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_trt_lock/m_cpt_write_fsm_n_trt_lock << std::endl 772 << "- NB CYCLES IN TRT LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_trt_used/m_cpt_write_fsm_n_trt_lock << std::endl 773 << "- WAIT TRT LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_trt_lock/m_cpt_cas_fsm_n_trt_lock << std::endl 774 << "- NB CYCLES IN TRT LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_trt_used/m_cpt_cas_fsm_n_trt_lock << std::endl 775 << "- WAIT TRT LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_trt_lock/m_cpt_xram_rsp_fsm_n_trt_lock << std::endl 776 << "- NB CYCLES IN TRT LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_trt_used/m_cpt_xram_rsp_fsm_n_trt_lock << std::endl 777 << "- WAIT TRT LOCK in IXR_FSM = " << (double) m_cpt_ixr_fsm_trt_lock/m_cpt_ixr_fsm_n_trt_lock << std::endl 778 << "- NB CYCLES IN TRT LOCK in IXR_FSM = " << (double) m_cpt_ixr_fsm_trt_used/m_cpt_ixr_fsm_n_trt_lock << std::endl 779 << "- TRT UNUSED RATE = " << (double) m_cpt_trt_unused/m_cpt_cycles << std::endl << std::endl 780 781 << "- WAIT UPT LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_upt_lock/m_cpt_write_fsm_n_upt_lock << std::endl 782 << "- NB CYCLES IN UPT LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_upt_used/m_cpt_write_fsm_n_upt_lock << std::endl 783 << "- WAIT UPT LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_upt_lock/m_cpt_xram_rsp_fsm_n_upt_lock << std::endl 784 << "- NB CYCLES IN UPT LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_upt_used/m_cpt_xram_rsp_fsm_n_upt_lock << std::endl 785 << "- WAIT UPT LOCK in MULTIACK_FSM = " << (double) m_cpt_multi_ack_fsm_upt_lock/m_cpt_multi_ack_fsm_n_upt_lock << std::endl 786 << "- NB CYCLES IN UPT LOCK in MULTIACK_FSM = " << (double) m_cpt_multi_ack_fsm_upt_used/m_cpt_multi_ack_fsm_n_upt_lock << std::endl 787 << "- WAIT UPT LOCK in CLEANUP_FSM = " << (double) m_cpt_cleanup_fsm_ivt_lock/m_cpt_cleanup_fsm_n_upt_lock << std::endl 788 << "- NB CYCLES IN UPT LOCK in CLEANUP_FSM = " << (double) m_cpt_cleanup_fsm_ivt_used/m_cpt_cleanup_fsm_n_upt_lock << std::endl 789 << "- WAIT UPT LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_upt_lock/m_cpt_cas_fsm_n_upt_lock << std::endl 790 << "- NB CYCLES IN UPT LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_upt_used/m_cpt_cas_fsm_n_upt_lock << std::endl 791 << "- UPT UNUSED RATE = " << (double) m_cpt_upt_unused/m_cpt_cycles << std::endl << std::endl 792 << "- IVT UNUSED RATE = " << (double) m_cpt_ivt_unused/m_cpt_cycles << std::endl << std::endl 793 794 << "- WAIT HEAP LOCK in READ_FSM = " << (double) m_cpt_read_fsm_heap_lock/m_cpt_read_fsm_n_heap_lock << std::endl 795 << "- NB CYCLES IN HEAP LOCK in READ_FSM = " << (double) m_cpt_read_fsm_heap_used/m_cpt_read_fsm_n_heap_lock << std::endl 796 << "- WAIT HEAP LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_heap_lock/m_cpt_write_fsm_n_heap_lock << std::endl 797 << "- NB CYCLES IN HEAP LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_heap_used/m_cpt_write_fsm_n_heap_lock << std::endl 798 << "- WAIT HEAP LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_heap_lock/m_cpt_xram_rsp_fsm_n_heap_lock << std::endl 799 << "- NB CYCLES IN HEAP LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_heap_used/m_cpt_xram_rsp_fsm_n_heap_lock << std::endl 800 << "- WAIT HEAP LOCK in CLEANUP_FSM = " << (double) m_cpt_cleanup_fsm_heap_lock/m_cpt_cleanup_fsm_n_heap_lock << std::endl 801 << "- NB CYCLES IN HEAP LOCK in CLEANUP_FSM = " << (double) m_cpt_cleanup_fsm_heap_used/m_cpt_cleanup_fsm_n_heap_lock << std::endl 802 << "- WAIT HEAP LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_heap_lock/m_cpt_cas_fsm_n_heap_lock << std::endl 803 << "- NB CYCLES IN HEAP LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_heap_used/m_cpt_cas_fsm_n_heap_lock << std::endl 804 << "- HEAP UNUSED RATE = " << (double) m_cpt_heap_unused/m_cpt_cycles << std::endl 805 806 << "- READ DATA UNC = " << (double) m_cpt_read_data_unc << std::endl 807 << "- READ DATA MISS CC = " << (double) m_cpt_read_data_miss_CC << std::endl 808 << "- READ INS UNC = " << (double) m_cpt_read_ins_unc << std::endl 809 << "- READ INS MISS = " << (double) m_cpt_read_ins_miss << std::endl 810 << "- READ LL CC = " << (double) m_cpt_read_ll_CC << std::endl 811 << "- READ DATA MISS NCC = " << (double) m_cpt_read_data_miss_NCC << std::endl 812 << "- READ LL NCC = " << (double) m_cpt_read_ll_NCC << std::endl 813 << "- READ OTHER = " << (double) m_cpt_read_WTF << std::endl 814 << "- CLEANUP + DATA = " << (double) m_cpt_cleanup_data << std::endl 815 << "- NCC TO CC READ = " << (double) m_cpt_ncc_to_cc_read << std::endl 816 << "- NCC TO CC WRITE = " << (double) m_cpt_ncc_to_cc_write << std::endl 817 << "- NCC TO CC = " << (double) m_cpt_ncc_to_cc << std::endl; 818 } 819 820 ///////////////////////////////// 821 tmpl(/**/) ::~VciMemCache() 822 ///////////////////////////////// 823 { 824 delete [] r_ixr_rsp_to_xram_rsp_rok; 825 826 delete [] r_xram_rsp_victim_data; 827 delete [] r_xram_rsp_to_tgt_rsp_data; 828 829 delete [] r_read_data; 830 delete [] r_read_to_tgt_rsp_data; 831 832 delete [] r_write_data; 833 delete [] r_write_be; 834 delete [] r_write_to_cc_send_data; 835 836 delete [] r_cleanup_data; 837 delete [] r_cleanup_to_ixr_cmd_data; 838 delete [] r_cleanup_to_tgt_rsp_data; 839 delete [] r_cleanup_old_data; 840 } 841 842 ////////////////////////////////// 843 tmpl(void) ::transition() 844 ////////////////////////////////// 845 { 846 using soclib::common::uint32_log2; 847 848 // RESET 849 if(! p_resetn.read()) 850 { 851 852 // Initializing FSMs 853 r_tgt_cmd_fsm = TGT_CMD_IDLE; 854 r_config_fsm = CONFIG_IDLE; 855 r_tgt_rsp_fsm = TGT_RSP_TGT_CMD_IDLE; 856 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 857 r_cc_receive_fsm = CC_RECEIVE_IDLE; 858 r_multi_ack_fsm = MULTI_ACK_IDLE; 859 r_read_fsm = READ_IDLE; 860 r_write_fsm = WRITE_IDLE; 861 r_cas_fsm = CAS_IDLE; 862 r_cleanup_fsm = CLEANUP_IDLE; 863 r_alloc_dir_fsm = ALLOC_DIR_RESET; 864 r_alloc_heap_fsm = ALLOC_HEAP_RESET; 865 r_alloc_trt_fsm = ALLOC_TRT_READ; 866 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 867 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 868 r_ixr_rsp_fsm = IXR_RSP_IDLE; 869 r_xram_rsp_fsm = XRAM_RSP_IDLE; 870 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 871 872 m_debug = false; 873 m_debug_previous_valid = false; 874 m_debug_previous_dirty = false; 875 m_debug_previous_count = 0; 876 877 // Initializing Tables 878 m_trt.init(); 879 m_upt.init(); 880 m_ivt.init(); 881 m_llsc_table.init(); 882 883 // initializing FIFOs and communication Buffers 884 885 m_cmd_read_addr_fifo.init(); 886 m_cmd_read_length_fifo.init(); 887 m_cmd_read_srcid_fifo.init(); 888 m_cmd_read_trdid_fifo.init(); 889 m_cmd_read_pktid_fifo.init(); 890 891 m_cmd_write_addr_fifo.init(); 892 m_cmd_write_eop_fifo.init(); 893 m_cmd_write_srcid_fifo.init(); 894 m_cmd_write_trdid_fifo.init(); 895 m_cmd_write_pktid_fifo.init(); 896 m_cmd_write_data_fifo.init(); 897 898 m_cmd_cas_addr_fifo.init() ; 899 m_cmd_cas_srcid_fifo.init() ; 900 m_cmd_cas_trdid_fifo.init() ; 901 m_cmd_cas_pktid_fifo.init() ; 902 m_cmd_cas_wdata_fifo.init() ; 903 m_cmd_cas_eop_fifo.init() ; 904 905 r_config_cmd = MEMC_CMD_NOP; 906 r_config_lock = false; 907 908 m_config_to_cc_send_inst_fifo.init(); 909 m_config_to_cc_send_srcid_fifo.init(); 910 911 r_tgt_cmd_to_tgt_rsp_req = false; 912 913 r_read_to_tgt_rsp_req = false; 914 r_read_to_ixr_cmd_req = false; 915 r_read_to_cc_send_req = false; 916 r_read_to_cleanup_req = false; 917 918 r_write_to_tgt_rsp_req = false; 919 r_write_to_ixr_cmd_req = false; 920 r_write_to_cc_send_multi_req = false; 921 r_write_to_cc_send_brdcast_req = false; 922 r_write_to_multi_ack_req = false; 923 924 m_write_to_cc_send_inst_fifo.init(); 925 m_write_to_cc_send_srcid_fifo.init(); 926 927 r_cleanup_to_tgt_rsp_req = false; 928 929 m_cc_receive_to_cleanup_fifo.init(); 930 931 r_multi_ack_to_tgt_rsp_req = false; 932 933 m_cc_receive_to_multi_ack_fifo.init(); 934 935 r_cas_to_tgt_rsp_req = false; 936 r_cas_cpt = 0 ; 937 r_cas_lfsr = -1 ; 938 r_cas_to_ixr_cmd_req = false; 939 r_cas_to_cc_send_multi_req = false; 940 r_cas_to_cc_send_brdcast_req = false; 941 942 m_cas_to_cc_send_inst_fifo.init(); 943 m_cas_to_cc_send_srcid_fifo.init(); 703 } 704 705 706 ///////////////////////////////////////// 707 tmpl(void)::print_stats(bool activity_counters = true, bool stats = true) 708 { 709 std::cout << "**********************************" << std::dec << std::endl; 710 std::cout << "*** MEM_CACHE " << name() << std::endl; 711 std::cout << "**********************************" << std::dec << std::endl; 712 if (activity_counters) { 713 std::cout << "----------------------------------" << std::dec << std::endl; 714 std::cout << "--- Activity Counters ---" << std::dec << std::endl; 715 std::cout << "----------------------------------" << std::dec << std::endl; 716 std::cout 717 << "[001] NUMBER OF CYCLES = " << m_cpt_cycles << std::endl 718 << std::endl 719 << "[002] LOCAL READ = " << m_cpt_read_local << std::endl 720 << "[003] REMOTE READ = " << m_cpt_read_remote << std::endl 721 << "[004] READ COST (FLITS * DIST) = " << m_cpt_read_cost << std::endl 722 << std::endl 723 << "[005] LOCAL WRITE = " << m_cpt_write_local << std::endl 724 << "[006] REMOTE WRITE = " << m_cpt_write_remote << std::endl 725 << "[007] WRITE FLITS LOCAL = " << m_cpt_write_flits_local << std::endl 726 << "[008] WRITE FLITS REMOTE = " << m_cpt_write_flits_remote << std::endl 727 << "[009] WRITE COST (FLITS * DIST) = " << m_cpt_write_cost << std::endl 728 << std::endl 729 << "[010] LOCAL LL = " << m_cpt_ll_local << std::endl 730 << "[011] REMOTE LL = " << m_cpt_ll_remote << std::endl 731 << "[012] LL COST (FLITS * DIST) = " << m_cpt_ll_cost << std::endl 732 << std::endl 733 << "[013] LOCAL SC = " << m_cpt_sc_local << std::endl 734 << "[014] REMOTE SC = " << m_cpt_sc_remote << std::endl 735 << "[015] SC COST (FLITS * DIST) = " << m_cpt_sc_cost << std::endl 736 << std::endl 737 << "[016] LOCAL CAS = " << m_cpt_cas_local << std::endl 738 << "[017] REMOTE CAS = " << m_cpt_cas_remote << std::endl 739 << "[018] CAS COST (FLITS * DIST) = " << m_cpt_cas_cost << std::endl 740 << std::endl 741 << "[019] REQUESTS TRIG. UPDATE = " << m_cpt_update << std::endl 742 << "[020] LOCAL UPDATE = " << m_cpt_update_local << std::endl 743 << "[021] REMOTE UPDATE = " << m_cpt_update_remote << std::endl 744 << "[022] UPDT COST (FLITS * DIST) = " << m_cpt_update_cost << std::endl 745 << std::endl 746 << "[023] REQUESTS TRIG. M_INV = " << m_cpt_m_inval << std::endl 747 << "[024] LOCAL M_INV = " << m_cpt_m_inval_local << std::endl 748 << "[025] REMOTE M_INV = " << m_cpt_m_inval_remote << std::endl 749 << "[026] M_INV COST (FLITS * DIST) = " << m_cpt_m_inval_cost << std::endl 750 << std::endl 751 << "[027] BROADCAT INVAL = " << m_cpt_br_inval << std::endl 752 << std::endl 753 << "[028] LOCAL CLEANUP = " << m_cpt_cleanup_local << std::endl 754 << "[029] REMOTE CLEANUP = " << m_cpt_cleanup_remote << std::endl 755 << "[030] CLNUP COST (FLITS * DIST) = " << m_cpt_cleanup_cost << std::endl 756 << std::endl 757 << std::endl 758 << "[031] READ MISS = " << m_cpt_read_miss << std::endl 759 << "[032] WRITE MISS = " << m_cpt_write_miss << std::endl 760 << "[033] WRITE DIRTY = " << m_cpt_write_dirty << std::endl 761 << "[034] RD BLOCKED BY HIT IN TRT = " << m_cpt_trt_rb << std::endl 762 << "[035] TRANS BLOCKED BY FULL TRT = " << m_cpt_trt_full << std::endl 763 << "[036] PUT (UNIMPLEMENTED) = " << m_cpt_put << std::endl 764 << "[037] GET (UNIMPLEMENTED) = " << m_cpt_get << std::endl 765 << "[038] WRITE BROADCAST = " << m_cpt_write_broadcast << std::endl 766 << std::endl; 767 } 768 769 if (stats) { 770 std::cout << "----------------------------------" << std::dec << std::endl; 771 std::cout << "--- Calculated Stats ---" << std::dec << std::endl; 772 std::cout << "----------------------------------" << std::dec << std::endl; 773 std::cout 774 << "[100] READ TOTAL = " << m_cpt_read_local + m_cpt_read_remote << std::endl 775 << "[101] READ RATE = " << (double) (m_cpt_read_local + m_cpt_read_remote) / m_cpt_cycles << std::endl 776 << "[102] LOCAL READ RATE = " << (double) m_cpt_read_local / m_cpt_cycles << std::endl 777 << "[103] REMOTE READ RATE = " << (double) m_cpt_read_remote / m_cpt_cycles << std::endl 778 << "[104] READ MISS RATE = " << (double) m_cpt_read_miss / (m_cpt_read_local + m_cpt_read_remote) << std::endl 779 << std::endl 780 << "[105] WRITE TOTAL = " << m_cpt_write_local + m_cpt_write_remote << std::endl 781 << "[106] WRITE RATE = " << (double) (m_cpt_write_local + m_cpt_write_remote) / m_cpt_cycles << std::endl 782 << "[107] LOCAL WRITE RATE = " << (double) m_cpt_write_local / m_cpt_cycles << std::endl 783 << "[108] REMOTE WRITE RATE = " << (double) m_cpt_write_remote / m_cpt_cycles << std::endl 784 << "[109] WRITE MISS RATE = " << (double) m_cpt_write_miss / (m_cpt_write_local + m_cpt_write_remote) << std::endl 785 << "[110] WRITE BURST TOTAL = " << m_cpt_write_flits_local + m_cpt_write_flits_remote << std::endl 786 << "[111] WRITE BURST AVERAGE = " << (double) (m_cpt_write_flits_local + m_cpt_write_flits_remote) / (m_cpt_write_local + m_cpt_write_remote) << std::endl 787 << "[112] LOCAL WRITE BURST AV. = " << (double) m_cpt_write_flits_local / (m_cpt_write_local + m_cpt_write_remote) << std::endl 788 << "[113] REMOTE WRITE BURST AV = " << (double) m_cpt_write_flits_remote / (m_cpt_write_local + m_cpt_write_remote) << std::endl 789 << std::endl 790 << "[114] UPDATE RATE = " << (double) m_cpt_update / m_cpt_cycles << std::endl 791 << "[115] AV. UPDATE PER UP REQ = " << (double) (m_cpt_update_local + m_cpt_update_remote) / m_cpt_update << std::endl 792 << "[116] AV. LOC UPDT PER UP REQ = " << (double) m_cpt_update_local / m_cpt_update << std::endl 793 << "[117] AV. REMOTE UPDT PER UP REQ = " << (double) m_cpt_update_remote / m_cpt_update << std::endl 794 << std::endl 795 << "[118] INVAL MULTICAST RATE = " << (double) m_cpt_m_inval / m_cpt_cycles << std::endl 796 << "[119] AVE. INVAL PER M_INV = " << (double) (m_cpt_m_inval_local + m_cpt_m_inval_remote) / m_cpt_m_inval << std::endl 797 << "[120] AV. LOC INV PER M_INV = " << (double) m_cpt_m_inval_local / m_cpt_m_inval << std::endl 798 << "[121] AV. REM INV PER M_INV = " << (double) m_cpt_m_inval_remote / m_cpt_m_inval << std::endl 799 << std::endl 800 << "[122] INVAL BROADCAST RATE = " << (double) m_cpt_br_inval / m_cpt_cycles << std::endl 801 << "[123] WRITE DIRTY RATE = " << (double) m_cpt_write_dirty / m_cpt_cycles << std::endl 802 << std::endl 803 << "[124] CLEANUP RATE = " << (double) (m_cpt_cleanup_local + m_cpt_cleanup_remote) / m_cpt_cycles << std::endl 804 << "[125] LOCAL CLEANUP RATE = " << (double) m_cpt_cleanup_local / m_cpt_cycles << std::endl 805 << "[126] REMOTE CLEANUP RATE = " << (double) m_cpt_cleanup_remote / m_cpt_cycles << std::endl 806 << "[127] LL RATE = " << (double) (m_cpt_ll_local + m_cpt_ll_remote) / m_cpt_cycles << std::endl 807 << "[128] LOCAL LL RATE = " << (double) m_cpt_ll_local / m_cpt_cycles << std::endl 808 << "[129] REMOTE LL RATE = " << (double) m_cpt_ll_remote / m_cpt_cycles << std::endl 809 << "[130] SC RATE = " << (double) (m_cpt_sc_local + m_cpt_sc_remote) / m_cpt_cycles << std::endl 810 << "[131] LOCAL SC RATE = " << (double) m_cpt_sc_local / m_cpt_cycles << std::endl 811 << "[132] REMOTE SC RATE = " << (double) m_cpt_sc_remote / m_cpt_cycles << std::endl 812 << "[133] CAS RATE = " << (double) (m_cpt_cas_local + m_cpt_cas_remote) / m_cpt_cycles << std::endl 813 << "[134] LOCAL CAS RATE = " << (double) m_cpt_cas_local / m_cpt_cycles << std::endl 814 << "[135] REMOTE CAS RATE = " << (double) m_cpt_cas_remote / m_cpt_cycles << std::endl 815 << std::endl 816 << std::endl; 817 } 818 } 819 820 821 ///////////////////////////////// 822 tmpl(/**/) ::~VciMemCache() 823 ///////////////////////////////// 824 { 825 delete [] m_seg; 826 827 delete [] r_ixr_rsp_to_xram_rsp_rok; 828 delete [] r_xram_rsp_victim_data; 829 delete [] r_xram_rsp_to_tgt_rsp_data; 830 831 delete [] r_read_data; 832 delete [] r_read_to_tgt_rsp_data; 833 834 delete [] r_write_data; 835 delete [] r_write_be; 836 delete [] r_write_to_cc_send_data; 837 delete [] r_write_to_cc_send_be; 838 delete [] r_cleanup_data; 839 delete [] r_cleanup_to_ixr_cmd_data; 840 delete [] r_cleanup_to_tgt_rsp_data; 841 delete [] r_cleanup_old_data; 842 843 delete [] r_cas_data; 844 delete [] r_cas_rdata; 845 846 delete [] r_ixr_cmd_wdata; 847 delete [] m_debug_previous_data; 848 delete [] m_debug_data; 849 850 print_stats(); 851 } 852 853 ////////////////////////////////// 854 tmpl(void) ::transition() 855 ////////////////////////////////// 856 { 857 using soclib::common::uint32_log2; 858 859 // RESET 860 if(! p_resetn.read()) 861 { 862 863 // Initializing FSMs 864 r_tgt_cmd_fsm = TGT_CMD_IDLE; 865 r_config_fsm = CONFIG_IDLE; 866 r_tgt_rsp_fsm = TGT_RSP_READ_IDLE; 867 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 868 r_cc_receive_fsm = CC_RECEIVE_IDLE; 869 r_multi_ack_fsm = MULTI_ACK_IDLE; 870 r_read_fsm = READ_IDLE; 871 r_write_fsm = WRITE_IDLE; 872 r_cas_fsm = CAS_IDLE; 873 r_cleanup_fsm = CLEANUP_IDLE; 874 r_alloc_dir_fsm = ALLOC_DIR_RESET; 875 r_alloc_heap_fsm = ALLOC_HEAP_RESET; 876 r_alloc_trt_fsm = ALLOC_TRT_READ; 877 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 878 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 879 r_ixr_rsp_fsm = IXR_RSP_IDLE; 880 r_xram_rsp_fsm = XRAM_RSP_IDLE; 881 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 882 883 m_debug = false; 884 m_debug_previous_valid = false; 885 m_debug_previous_dirty = false; 886 m_debug_previous_count = 0; 887 888 // Initializing Tables 889 m_trt.init(); 890 m_upt.init(); 891 m_ivt.init(); 892 m_llsc_table.init(); 893 894 // initializing FIFOs and communication Buffers 895 896 m_cmd_read_addr_fifo.init(); 897 m_cmd_read_length_fifo.init(); 898 m_cmd_read_srcid_fifo.init(); 899 m_cmd_read_trdid_fifo.init(); 900 m_cmd_read_pktid_fifo.init(); 901 902 m_cmd_write_addr_fifo.init(); 903 m_cmd_write_eop_fifo.init(); 904 m_cmd_write_srcid_fifo.init(); 905 m_cmd_write_trdid_fifo.init(); 906 m_cmd_write_pktid_fifo.init(); 907 m_cmd_write_data_fifo.init(); 908 909 m_cmd_cas_addr_fifo.init() ; 910 m_cmd_cas_srcid_fifo.init() ; 911 m_cmd_cas_trdid_fifo.init() ; 912 m_cmd_cas_pktid_fifo.init() ; 913 m_cmd_cas_wdata_fifo.init() ; 914 m_cmd_cas_eop_fifo.init() ; 915 916 r_config_cmd = MEMC_CMD_NOP; 917 r_config_lock = false; 918 919 m_config_to_cc_send_inst_fifo.init(); 920 m_config_to_cc_send_srcid_fifo.init(); 921 922 r_tgt_cmd_to_tgt_rsp_req = false; 923 924 r_read_to_tgt_rsp_req = false; 925 r_read_to_ixr_cmd_req = false; 926 r_read_to_cc_send_req = false; 927 r_read_to_cleanup_req = false; 928 929 r_write_to_tgt_rsp_req = false; 930 r_write_to_ixr_cmd_req = false; 931 r_write_to_cc_send_multi_req = false; 932 r_write_to_cc_send_brdcast_req = false; 933 r_write_to_multi_ack_req = false; 934 935 m_write_to_cc_send_inst_fifo.init(); 936 m_write_to_cc_send_srcid_fifo.init(); 937 938 r_cleanup_to_tgt_rsp_req = false; 939 940 m_cc_receive_to_cleanup_fifo.init(); 941 942 r_multi_ack_to_tgt_rsp_req = false; 943 944 m_cc_receive_to_multi_ack_fifo.init(); 945 946 r_cas_to_tgt_rsp_req = false; 947 r_cas_cpt = 0 ; 948 r_cas_lfsr = -1 ; 949 r_cas_to_ixr_cmd_req = false; 950 r_cas_to_cc_send_multi_req = false; 951 r_cas_to_cc_send_brdcast_req = false; 952 953 m_cas_to_cc_send_inst_fifo.init(); 954 m_cas_to_cc_send_srcid_fifo.init(); 944 955 #if L1_MULTI_CACHE 945 m_cas_to_cc_send_cache_id_fifo.init(); 946 #endif 947 948 for(size_t i=0; i<m_trt_lines ; i++) 949 { 950 r_ixr_rsp_to_xram_rsp_rok[i] = false; 951 } 952 953 r_xram_rsp_to_tgt_rsp_req = false; 954 r_xram_rsp_to_cc_send_multi_req = false; 955 r_xram_rsp_to_cc_send_brdcast_req = false; 956 r_xram_rsp_to_ixr_cmd_req = false; 957 r_xram_rsp_trt_index = 0; 958 959 m_xram_rsp_to_cc_send_inst_fifo.init(); 960 m_xram_rsp_to_cc_send_srcid_fifo.init(); 961 962 r_alloc_dir_reset_cpt = 0; 963 r_alloc_heap_reset_cpt = 0; 964 965 r_tgt_rsp_key_sent = false; 966 967 // ODCCP 968 r_cleanup_data_index = 0; 969 r_cleanup_trdid = 0; 970 r_cleanup_pktid = 0; 971 r_cleanup_contains_data = false; 972 r_cleanup_ncc = false; 973 r_cleanup_to_ixr_cmd_ncc_l1_dirty = false; 974 r_xram_rsp_to_ixr_cmd_inval_ncc_pending = false; 975 r_cleanup_to_ixr_cmd_req = false; 976 r_cleanup_to_ixr_cmd_srcid = 0; 977 r_cleanup_to_ixr_cmd_pktid = 0; 978 r_cleanup_to_ixr_cmd_nline = 0; 979 for (size_t word = 0; word < m_words; word ++) 980 { 981 r_cleanup_to_ixr_cmd_data[word] = 0; 982 r_cleanup_data[word] = 0; 983 } 984 985 986 // Activity counters 987 m_cpt_cycles = 0; 988 m_cpt_read = 0; 989 m_cpt_read_miss = 0; 990 m_cpt_write = 0; 991 m_cpt_write_miss = 0; 992 m_cpt_write_cells = 0; 993 m_cpt_write_dirty = 0; 994 m_cpt_update = 0; 995 m_cpt_update_mult = 0; 996 m_cpt_inval_brdcast = 0; 997 m_cpt_inval = 0; 998 m_cpt_inval_mult = 0; 999 m_cpt_cleanup = 0; 1000 m_cpt_ll = 0; 1001 m_cpt_sc = 0; 1002 m_cpt_cas = 0; 1003 m_cpt_trt_full = 0; 1004 m_cpt_trt_rb = 0; 1005 m_cpt_dir_unused = 0; 1006 m_cpt_upt_unused = 0; 1007 m_cpt_ivt_unused = 0; 1008 m_cpt_heap_unused = 0; 1009 m_cpt_trt_unused = 0; 1010 m_cpt_read_fsm_n_dir_lock = 0; 1011 m_cpt_read_fsm_dir_lock = 0; 1012 m_cpt_read_fsm_dir_used = 0; 1013 m_cpt_read_fsm_trt_lock = 0; 1014 m_cpt_read_fsm_heap_lock = 0; 1015 m_cpt_write_fsm_dir_lock = 0; 1016 m_cpt_write_fsm_n_dir_lock = 0; 1017 m_cpt_write_fsm_upt_lock = 0; 1018 m_cpt_write_fsm_heap_lock = 0; 1019 m_cpt_write_fsm_dir_used = 0; 1020 m_cpt_write_fsm_trt_lock = 0; 1021 m_cpt_cas_fsm_n_dir_lock = 0; 1022 m_cpt_cas_fsm_dir_lock = 0; 1023 m_cpt_cas_fsm_upt_lock = 0; 1024 m_cpt_cas_fsm_heap_lock = 0; 1025 m_cpt_cas_fsm_trt_lock = 0; 1026 m_cpt_cas_fsm_dir_used = 0; 1027 m_cpt_xram_rsp_fsm_n_dir_lock = 0; 1028 m_cpt_xram_rsp_fsm_dir_lock = 0; 1029 m_cpt_xram_rsp_fsm_trt_lock = 0; 1030 m_cpt_xram_rsp_fsm_upt_lock = 0; 1031 m_cpt_xram_rsp_fsm_heap_lock = 0; 1032 m_cpt_xram_rsp_fsm_dir_used = 0; 1033 m_cpt_cleanup_fsm_dir_lock = 0; 1034 m_cpt_cleanup_fsm_n_dir_lock = 0; 1035 m_cpt_cleanup_fsm_heap_lock = 0; 1036 m_cpt_cleanup_fsm_ivt_lock = 0; 1037 m_cpt_cleanup_fsm_dir_used = 0; 1038 m_cpt_ixr_fsm_trt_lock = 0; 1039 m_cpt_multi_ack_fsm_upt_lock = 0; 1040 m_cpt_read_data_unc = 0; 1041 m_cpt_read_data_miss_CC = 0; 1042 m_cpt_read_ins_unc = 0; 1043 m_cpt_read_ins_miss = 0; 1044 m_cpt_read_ll_CC = 0; 1045 m_cpt_read_data_miss_NCC = 0; 1046 m_cpt_read_ll_NCC = 0; 1047 m_cpt_read_WTF = 0; 1048 m_cpt_cleanup_data = 0; 1049 m_cpt_ncc_to_cc_read = 0; 1050 m_cpt_ncc_to_cc_write = 0; 1051 m_cpt_ncc_to_cc = 0; 1052 return; 1053 } 1054 1055 bool cmd_read_fifo_put = false; 1056 bool cmd_read_fifo_get = false; 1057 1058 bool cmd_write_fifo_put = false; 1059 bool cmd_write_fifo_get = false; 1060 1061 bool cmd_cas_fifo_put = false; 1062 bool cmd_cas_fifo_get = false; 1063 1064 bool cc_receive_to_cleanup_fifo_get = false; 1065 bool cc_receive_to_cleanup_fifo_put = false; 1066 1067 bool cc_receive_to_multi_ack_fifo_get = false; 1068 bool cc_receive_to_multi_ack_fifo_put = false; 1069 1070 bool write_to_cc_send_fifo_put = false; 1071 bool write_to_cc_send_fifo_get = false; 1072 bool write_to_cc_send_fifo_inst = false; 1073 size_t write_to_cc_send_fifo_srcid = 0; 1074 1075 bool xram_rsp_to_cc_send_fifo_put = false; 1076 bool xram_rsp_to_cc_send_fifo_get = false; 1077 bool xram_rsp_to_cc_send_fifo_inst = false; 1078 size_t xram_rsp_to_cc_send_fifo_srcid = 0; 1079 1080 bool config_to_cc_send_fifo_put = false; 1081 bool config_to_cc_send_fifo_get = false; 1082 bool config_to_cc_send_fifo_inst = false; 1083 size_t config_to_cc_send_fifo_srcid = 0; 1084 1085 bool cas_to_cc_send_fifo_put = false; 1086 bool cas_to_cc_send_fifo_get = false; 1087 bool cas_to_cc_send_fifo_inst = false; 1088 size_t cas_to_cc_send_fifo_srcid = 0; 1089 1090 m_debug = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 956 m_cas_to_cc_send_cache_id_fifo.init(); 957 #endif 958 959 for(size_t i=0; i<m_trt_lines ; i++) 960 { 961 r_ixr_rsp_to_xram_rsp_rok[i] = false; 962 } 963 964 r_xram_rsp_to_tgt_rsp_req = false; 965 r_xram_rsp_to_cc_send_multi_req = false; 966 r_xram_rsp_to_cc_send_brdcast_req = false; 967 r_xram_rsp_to_ixr_cmd_req = false; 968 r_xram_rsp_trt_index = 0; 969 970 m_xram_rsp_to_cc_send_inst_fifo.init(); 971 m_xram_rsp_to_cc_send_srcid_fifo.init(); 972 973 r_alloc_dir_reset_cpt = 0; 974 r_alloc_heap_reset_cpt = 0; 975 976 r_tgt_rsp_key_sent = false; 977 978 // ODCCP 979 r_cleanup_data_index = 0; 980 r_cleanup_trdid = 0; 981 r_cleanup_pktid = 0; 982 r_cleanup_contains_data = false; 983 r_cleanup_ncc = false; 984 r_cleanup_to_ixr_cmd_ncc_l1_dirty = false; 985 r_xram_rsp_to_ixr_cmd_inval_ncc_pending = false; 986 r_cleanup_to_ixr_cmd_req = false; 987 r_cleanup_to_ixr_cmd_srcid = 0; 988 r_cleanup_to_ixr_cmd_pktid = 0; 989 r_cleanup_to_ixr_cmd_nline = 0; 990 for (size_t word = 0; word < m_words; word ++) 991 { 992 r_cleanup_to_ixr_cmd_data[word] = 0; 993 r_cleanup_data[word] = 0; 994 } 995 996 997 // Activity counters 998 m_cpt_cycles = 0; 999 m_cpt_read_local = 0; 1000 m_cpt_read_remote = 0; 1001 m_cpt_read_cost = 0; 1002 m_cpt_write_local = 0; 1003 m_cpt_write_remote = 0; 1004 m_cpt_write_flits_local = 0; 1005 m_cpt_write_flits_remote = 0; 1006 m_cpt_write_cost = 0; 1007 m_cpt_ll_local = 0; 1008 m_cpt_ll_remote = 0; 1009 m_cpt_ll_cost = 0; 1010 m_cpt_sc_local = 0; 1011 m_cpt_sc_remote = 0; 1012 m_cpt_sc_cost = 0; 1013 m_cpt_cas_local = 0; 1014 m_cpt_cas_remote = 0; 1015 m_cpt_cas_cost = 0; 1016 m_cpt_update = 0; 1017 m_cpt_update_local = 0; 1018 m_cpt_update_remote = 0; 1019 m_cpt_update_cost = 0; 1020 m_cpt_m_inval = 0; 1021 m_cpt_m_inval_local = 0; 1022 m_cpt_m_inval_remote = 0; 1023 m_cpt_m_inval_cost = 0; 1024 m_cpt_br_inval = 0; 1025 m_cpt_cleanup_local = 0; 1026 m_cpt_cleanup_remote = 0; 1027 m_cpt_cleanup_cost = 0; 1028 1029 m_cpt_read_miss = 0; 1030 m_cpt_write_miss = 0; 1031 m_cpt_write_dirty = 0; 1032 m_cpt_write_broadcast = 0; 1033 m_cpt_trt_rb = 0; 1034 m_cpt_trt_full = 0; 1035 m_cpt_get = 0; 1036 m_cpt_put = 0; 1037 m_cpt_dir_unused = 0; 1038 m_cpt_upt_unused = 0; 1039 m_cpt_ivt_unused = 0; 1040 m_cpt_heap_unused = 0; 1041 m_cpt_trt_unused = 0; 1042 m_cpt_read_fsm_n_dir_lock = 0; 1043 m_cpt_read_fsm_dir_lock = 0; 1044 m_cpt_read_fsm_dir_used = 0; 1045 m_cpt_read_fsm_trt_lock = 0; 1046 m_cpt_read_fsm_heap_lock = 0; 1047 m_cpt_write_fsm_dir_lock = 0; 1048 m_cpt_write_fsm_n_dir_lock = 0; 1049 m_cpt_write_fsm_upt_lock = 0; 1050 m_cpt_write_fsm_heap_lock = 0; 1051 m_cpt_write_fsm_dir_used = 0; 1052 m_cpt_write_fsm_trt_lock = 0; 1053 m_cpt_cas_fsm_n_dir_lock = 0; 1054 m_cpt_cas_fsm_dir_lock = 0; 1055 m_cpt_cas_fsm_upt_lock = 0; 1056 m_cpt_cas_fsm_heap_lock = 0; 1057 m_cpt_cas_fsm_trt_lock = 0; 1058 m_cpt_cas_fsm_dir_used = 0; 1059 m_cpt_xram_rsp_fsm_n_dir_lock = 0; 1060 m_cpt_xram_rsp_fsm_dir_lock = 0; 1061 m_cpt_xram_rsp_fsm_trt_lock = 0; 1062 m_cpt_xram_rsp_fsm_upt_lock = 0; 1063 m_cpt_xram_rsp_fsm_heap_lock = 0; 1064 m_cpt_xram_rsp_fsm_dir_used = 0; 1065 m_cpt_cleanup_fsm_dir_lock = 0; 1066 m_cpt_cleanup_fsm_n_dir_lock = 0; 1067 m_cpt_cleanup_fsm_heap_lock = 0; 1068 m_cpt_cleanup_fsm_ivt_lock = 0; 1069 m_cpt_cleanup_fsm_dir_used = 0; 1070 m_cpt_ixr_fsm_trt_lock = 0; 1071 m_cpt_multi_ack_fsm_upt_lock = 0; 1072 m_cpt_read_data_unc = 0; 1073 m_cpt_read_data_miss_CC = 0; 1074 m_cpt_read_ins_unc = 0; 1075 m_cpt_read_ins_miss = 0; 1076 m_cpt_read_ll_CC = 0; 1077 m_cpt_read_data_miss_NCC = 0; 1078 m_cpt_read_ll_NCC = 0; 1079 m_cpt_read_WTF = 0; 1080 m_cpt_cleanup_data = 0; 1081 m_cpt_ncc_to_cc_read = 0; 1082 m_cpt_ncc_to_cc_write = 0; 1083 m_cpt_ncc_to_cc = 0; 1084 return; 1085 } 1086 1087 bool cmd_read_fifo_put = false; 1088 bool cmd_read_fifo_get = false; 1089 1090 bool cmd_write_fifo_put = false; 1091 bool cmd_write_fifo_get = false; 1092 1093 bool cmd_cas_fifo_put = false; 1094 bool cmd_cas_fifo_get = false; 1095 1096 bool cc_receive_to_cleanup_fifo_get = false; 1097 bool cc_receive_to_cleanup_fifo_put = false; 1098 1099 bool cc_receive_to_multi_ack_fifo_get = false; 1100 bool cc_receive_to_multi_ack_fifo_put = false; 1101 1102 bool write_to_cc_send_fifo_put = false; 1103 bool write_to_cc_send_fifo_get = false; 1104 bool write_to_cc_send_fifo_inst = false; 1105 size_t write_to_cc_send_fifo_srcid = 0; 1106 1107 bool xram_rsp_to_cc_send_fifo_put = false; 1108 bool xram_rsp_to_cc_send_fifo_get = false; 1109 bool xram_rsp_to_cc_send_fifo_inst = false; 1110 size_t xram_rsp_to_cc_send_fifo_srcid = 0; 1111 1112 bool config_to_cc_send_fifo_put = false; 1113 bool config_to_cc_send_fifo_get = false; 1114 bool config_to_cc_send_fifo_inst = false; 1115 size_t config_to_cc_send_fifo_srcid = 0; 1116 1117 bool cas_to_cc_send_fifo_put = false; 1118 bool cas_to_cc_send_fifo_get = false; 1119 bool cas_to_cc_send_fifo_inst = false; 1120 size_t cas_to_cc_send_fifo_srcid = 0; 1121 1122 m_debug = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 1091 1123 1092 1124 #if DEBUG_MEMC_GLOBAL 1093 if(m_debug) 1094 { 1095 std::cout 1096 << "---------------------------------------------" << std::dec << std::endl 1097 << "MEM_CACHE " << name() 1098 << " ; Time = " << m_cpt_cycles << std::endl 1099 << " - TGT_CMD FSM = " << tgt_cmd_fsm_str[r_tgt_cmd_fsm.read()] << std::endl 1100 << " - TGT_RSP FSM = " << tgt_rsp_fsm_str[r_tgt_rsp_fsm.read()] << std::endl 1101 << " - CC_SEND FSM = " << cc_send_fsm_str[r_cc_send_fsm.read()] << std::endl 1102 << " - CC_RECEIVE FSM = " << cc_receive_fsm_str[r_cc_receive_fsm.read()] << std::endl 1103 << " - MULTI_ACK FSM = " << multi_ack_fsm_str[r_multi_ack_fsm.read()] << std::endl 1104 << " - READ FSM = " << read_fsm_str[r_read_fsm.read()] << std::endl 1105 << " - WRITE FSM = " << write_fsm_str[r_write_fsm.read()] << std::endl 1106 << " - CAS FSM = " << cas_fsm_str[r_cas_fsm.read()] << std::endl 1107 << " - CLEANUP FSM = " << cleanup_fsm_str[r_cleanup_fsm.read()] << std::endl 1108 << " - IXR_CMD FSM = " << ixr_cmd_fsm_str[r_ixr_cmd_fsm.read()] << std::endl 1109 << " - IXR_RSP FSM = " << ixr_rsp_fsm_str[r_ixr_rsp_fsm.read()] << std::endl 1110 << " - XRAM_RSP FSM = " << xram_rsp_fsm_str[r_xram_rsp_fsm.read()] << std::endl 1111 << " - ALLOC_DIR FSM = " << alloc_dir_fsm_str[r_alloc_dir_fsm.read()] << std::endl 1112 << " - ALLOC_TRT FSM = " << alloc_trt_fsm_str[r_alloc_trt_fsm.read()] << std::endl 1113 << " - ALLOC_UPT FSM = " << alloc_upt_fsm_str[r_alloc_upt_fsm.read()] << std::endl 1114 << " - ALLOC_HEAP FSM = " << alloc_heap_fsm_str[r_alloc_heap_fsm.read()] << std::endl; 1115 } 1116 #endif 1117 1118 //////////////////////////////////////////////////////////////////////////////////// 1119 // TGT_CMD FSM 1120 //////////////////////////////////////////////////////////////////////////////////// 1121 // The TGT_CMD_FSM controls the incoming VCI command pakets from the processors, 1122 // and dispatch these commands to the proper FSM through dedicated FIFOs. 1123 // 1124 // There are 5 types of commands accepted in the XRAM segment: 1125 // - READ : A READ request has a length of 1 VCI flit. It can be a single word 1126 // or an entire cache line, depending on the PLEN value => READ FSM 1127 // - WRITE : A WRITE request has a maximum length of 16 flits, and can only 1128 // concern words in a same line => WRITE FSM 1129 // - CAS : A CAS request has a length of 2 flits or 4 flits => CAS FSM 1130 // - LL : An LL request has a length of 1 flit => READ FSM 1131 // - SC : An SC request has a length of 2 flits. First flit contains the 1132 // acces key, second flit the data to write => WRITE FSM. 1133 // 1134 // The READ/WRITE commands accepted in the configuration segment are targeting 1135 // configuration or status registers. They must contain one single flit. 1136 // - For almost all addressable registers, the response is returned immediately. 1137 // - For MEMC_CMD_TYPE, the response is delayed until the operation is completed. 1138 //////////////////////////////////////////////////////////////////////////////////// 1139 1140 switch(r_tgt_cmd_fsm.read()) 1141 { 1142 ////////////////// 1143 case TGT_CMD_IDLE: // waiting a VCI command (RAM or CONFIG) 1144 if(p_vci_tgt.cmdval) 1145 { 1125 if(m_debug) 1126 { 1127 std::cout 1128 << "---------------------------------------------" << std::dec << std::endl 1129 << "MEM_CACHE " << name() 1130 << " ; Time = " << m_cpt_cycles << std::endl 1131 << " - TGT_CMD FSM = " << tgt_cmd_fsm_str[r_tgt_cmd_fsm.read()] << std::endl 1132 << " - TGT_RSP FSM = " << tgt_rsp_fsm_str[r_tgt_rsp_fsm.read()] << std::endl 1133 << " - CC_SEND FSM = " << cc_send_fsm_str[r_cc_send_fsm.read()] << std::endl 1134 << " - CC_RECEIVE FSM = " << cc_receive_fsm_str[r_cc_receive_fsm.read()] << std::endl 1135 << " - MULTI_ACK FSM = " << multi_ack_fsm_str[r_multi_ack_fsm.read()] << std::endl 1136 << " - READ FSM = " << read_fsm_str[r_read_fsm.read()] << std::endl 1137 << " - WRITE FSM = " << write_fsm_str[r_write_fsm.read()] << std::endl 1138 << " - CAS FSM = " << cas_fsm_str[r_cas_fsm.read()] << std::endl 1139 << " - CLEANUP FSM = " << cleanup_fsm_str[r_cleanup_fsm.read()] << std::endl 1140 << " - IXR_CMD FSM = " << ixr_cmd_fsm_str[r_ixr_cmd_fsm.read()] << std::endl 1141 << " - IXR_RSP FSM = " << ixr_rsp_fsm_str[r_ixr_rsp_fsm.read()] << std::endl 1142 << " - XRAM_RSP FSM = " << xram_rsp_fsm_str[r_xram_rsp_fsm.read()] << std::endl 1143 << " - ALLOC_DIR FSM = " << alloc_dir_fsm_str[r_alloc_dir_fsm.read()] << std::endl 1144 << " - ALLOC_TRT FSM = " << alloc_trt_fsm_str[r_alloc_trt_fsm.read()] << std::endl 1145 << " - ALLOC_UPT FSM = " << alloc_upt_fsm_str[r_alloc_upt_fsm.read()] << std::endl 1146 << " - ALLOC_HEAP FSM = " << alloc_heap_fsm_str[r_alloc_heap_fsm.read()] << std::endl; 1147 } 1148 #endif 1149 1150 //////////////////////////////////////////////////////////////////////////////////// 1151 // TGT_CMD FSM 1152 //////////////////////////////////////////////////////////////////////////////////// 1153 // The TGT_CMD_FSM controls the incoming VCI command pakets from the processors, 1154 // and dispatch these commands to the proper FSM through dedicated FIFOs. 1155 // 1156 // There are 5 types of commands accepted in the XRAM segment: 1157 // - READ : A READ request has a length of 1 VCI flit. It can be a single word 1158 // or an entire cache line, depending on the PLEN value => READ FSM 1159 // - WRITE : A WRITE request has a maximum length of 16 flits, and can only 1160 // concern words in a same line => WRITE FSM 1161 // - CAS : A CAS request has a length of 2 flits or 4 flits => CAS FSM 1162 // - LL : An LL request has a length of 1 flit => READ FSM 1163 // - SC : An SC request has a length of 2 flits. First flit contains the 1164 // acces key, second flit the data to write => WRITE FSM. 1165 // 1166 // The READ/WRITE commands accepted in the configuration segment are targeting 1167 // configuration or status registers. They must contain one single flit. 1168 // - For almost all addressable registers, the response is returned immediately. 1169 // - For MEMC_CMD_TYPE, the response is delayed until the operation is completed. 1170 //////////////////////////////////////////////////////////////////////////////////// 1171 switch(r_tgt_cmd_fsm.read()) 1172 { 1173 ////////////////// 1174 case TGT_CMD_IDLE: // waiting a VCI command (RAM or CONFIG) 1175 if(p_vci_tgt.cmdval) 1176 { 1146 1177 1147 1178 1148 1179 #if DEBUG_MEMC_TGT_CMD 1149 if(m_debug) 1150 std::cout << " <MEMC " << name() 1151 << " TGT_CMD_IDLE> Receive command from srcid " 1152 << std::hex << p_vci_tgt.srcid.read() 1153 << " / address " << std::hex << p_vci_tgt.address.read() << std::endl; 1154 #endif 1155 // checking segmentation violation 1156 addr_t address = p_vci_tgt.address.read(); 1157 uint32_t plen = p_vci_tgt.plen.read(); 1158 bool found = false; 1159 bool config = false; 1160 1161 // register arguments for response (segmentation violation or config) 1162 r_tgt_cmd_to_tgt_rsp_srcid = p_vci_tgt.srcid.read(); 1163 r_tgt_cmd_to_tgt_rsp_trdid = p_vci_tgt.trdid.read(); 1164 r_tgt_cmd_to_tgt_rsp_pktid = p_vci_tgt.pktid.read(); 1165 1166 for(size_t seg_id = 0 ; (seg_id < m_nseg) and not found ; seg_id++) 1180 if(m_debug) 1181 std::cout << " <MEMC " << name() 1182 << " TGT_CMD_IDLE> Receive command from srcid " 1183 << std::hex << p_vci_tgt.srcid.read() 1184 << " / address " << std::hex << p_vci_tgt.address.read() << std::endl; 1185 #endif 1186 // checking segmentation violation 1187 addr_t address = p_vci_tgt.address.read(); 1188 uint32_t plen = p_vci_tgt.plen.read(); 1189 bool found = false; 1190 bool config = false; 1191 1192 for (size_t seg_id = 0; (seg_id < m_nseg) && !found; seg_id++) 1193 { 1194 if (m_seg[seg_id]->contains(address) && 1195 m_seg[seg_id]->contains(address + plen - vci_param_int::B) ) 1196 { 1197 found = true; 1198 if ( m_seg[seg_id]->special() ) config = true; 1199 } 1200 } 1201 1202 if (!found) /////////// out of segment error 1203 { 1204 r_tgt_cmd_fsm = TGT_CMD_ERROR; 1205 } 1206 else if ( config ) /////////// configuration command 1207 { 1208 if (!p_vci_tgt.eop.read()) r_tgt_cmd_fsm = TGT_CMD_ERROR; 1209 else r_tgt_cmd_fsm = TGT_CMD_CONFIG; 1210 } 1211 else //////////// memory access 1212 { 1213 if ( p_vci_tgt.cmd.read() == vci_param_int::CMD_READ ) 1214 { 1215 // check that the pktid is either : 1216 // TYPE_READ_DATA_UNC 1217 // TYPE_READ_DATA_MISS 1218 // TYPE_READ_INS_UNC 1219 // TYPE_READ_INS_MISS 1220 // ==> bit2 must be zero with the TSAR encoding 1221 // ==> mask = 0b0100 = 0x4 1222 assert( ((p_vci_tgt.pktid.read() & 0x4) == 0x0) and 1223 "The type specified in the pktid field is incompatible with the READ CMD"); 1224 r_tgt_cmd_fsm = TGT_CMD_READ; 1225 } 1226 else if(p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) 1227 { 1228 // check that the pktid is TYPE_WRITE 1229 // ==> TYPE_WRITE = X100 with the TSAR encoding 1230 // ==> mask = 0b0111 = 0x7 1231 assert(((p_vci_tgt.pktid.read() & 0x7) == 0x4) and 1232 "The type specified in the pktid field is incompatible with the WRITE CMD"); 1233 r_tgt_cmd_fsm = TGT_CMD_WRITE; 1234 } 1235 else if(p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) 1236 { 1237 // check that the pktid is TYPE_LL 1238 // ==> TYPE_LL = X110 with the TSAR encoding 1239 // ==> mask = 0b0111 = 0x7 1240 assert(((p_vci_tgt.pktid.read() & 0x7) == 0x6) and 1241 "The type specified in the pktid field is incompatible with the LL CMD"); 1242 r_tgt_cmd_fsm = TGT_CMD_READ; 1243 } 1244 else if(p_vci_tgt.cmd.read() == vci_param_int::CMD_NOP) 1245 { 1246 // check that the pktid is either : 1247 // TYPE_CAS 1248 // TYPE_SC 1249 // ==> TYPE_CAS = X101 with the TSAR encoding 1250 // ==> TYPE_SC = X111 with the TSAR encoding 1251 // ==> mask = 0b0101 = 0x5 1252 assert(((p_vci_tgt.pktid.read() & 0x5) == 0x5) and 1253 "The type specified in the pktid field is incompatible with the NOP CMD"); 1254 1255 if((p_vci_tgt.pktid.read() & 0x7) == TYPE_CAS) r_tgt_cmd_fsm = TGT_CMD_CAS; 1256 else r_tgt_cmd_fsm = TGT_CMD_WRITE; 1257 } 1258 else 1259 { 1260 r_tgt_cmd_fsm = TGT_CMD_ERROR; 1261 } 1262 } 1263 } 1264 break; 1265 1266 /////////////////// 1267 case TGT_CMD_ERROR: // response error must be sent 1268 1269 // wait if pending request 1270 if(r_tgt_cmd_to_tgt_rsp_req.read()) break; 1271 1272 // consume all the command packet flits before sending response error 1273 if ( p_vci_tgt.cmdval and p_vci_tgt.eop ) 1274 { 1275 r_tgt_cmd_to_tgt_rsp_srcid = p_vci_tgt.srcid.read(); 1276 r_tgt_cmd_to_tgt_rsp_trdid = p_vci_tgt.trdid.read(); 1277 r_tgt_cmd_to_tgt_rsp_pktid = p_vci_tgt.pktid.read(); 1278 r_tgt_cmd_to_tgt_rsp_req = true; 1279 r_tgt_cmd_to_tgt_rsp_error = 1; 1280 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1281 1282 #if DEBUG_MEMC_TGT_CMD 1283 if(m_debug) 1284 std::cout << " <MEMC " << name() 1285 << " TGT_CMD_ERROR> Segmentation violation:" 1286 << " address = " << std::hex << p_vci_tgt.address.read() 1287 << " / srcid = " << p_vci_tgt.srcid.read() 1288 << " / trdid = " << p_vci_tgt.trdid.read() 1289 << " / pktid = " << p_vci_tgt.pktid.read() 1290 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1291 #endif 1292 1293 } 1294 break; 1295 1296 //////////////////// 1297 case TGT_CMD_CONFIG: // execute config request and return response 1298 { 1299 addr_t seg_base = m_seg[m_seg_config]->baseAddress(); 1300 addr_t address = p_vci_tgt.address.read(); 1301 size_t cell = (address - seg_base)/vci_param_int::B; 1302 1303 bool need_rsp; 1304 size_t error; 1305 uint32_t rdata = 0; // default value 1306 uint32_t wdata = p_vci_tgt.wdata.read(); 1307 1308 if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_READ) // get lock 1309 and (cell == MEMC_LOCK) ) 1310 { 1311 rdata = (uint32_t)r_config_lock.read(); 1312 need_rsp = true; 1313 error = 0; 1314 r_config_lock = true; 1315 } 1316 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // release lock 1317 and (cell == MEMC_LOCK)) 1318 { 1319 need_rsp = true; 1320 error = 0; 1321 r_config_lock = false; 1322 } 1323 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_lo 1324 and (cell == MEMC_ADDR_LO)) 1325 { 1326 assert( ((wdata % (m_words*vci_param_int::B)) == 0) and 1327 "VCI_MEM_CACHE CONFIG ERROR: The buffer must be aligned on a cache line"); 1328 1329 need_rsp = true; 1330 error = 0; 1331 r_config_address = (r_config_address.read() & 0xFFFFFFFF00000000LL) | 1332 ((addr_t)wdata); 1333 } 1334 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_hi 1335 and (cell == MEMC_ADDR_HI)) 1336 { 1337 need_rsp = true; 1338 error = 0; 1339 r_config_address = (r_config_address.read() & 0x00000000FFFFFFFFLL) | 1340 (((addr_t) wdata) << 32); 1341 } 1342 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set buf_lines 1343 and (cell == MEMC_BUF_LENGTH)) 1344 { 1345 need_rsp = true; 1346 error = 0; 1347 size_t lines = wdata / (m_words << 2); 1348 if (wdata % (m_words << 2)) lines++; 1349 r_config_cmd_lines = lines; 1350 r_config_rsp_lines = lines; 1351 } 1352 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set cmd type 1353 and (cell == MEMC_CMD_TYPE)) 1354 { 1355 need_rsp = false; 1356 error = 0; 1357 r_config_cmd = wdata; 1358 r_config_srcid = p_vci_tgt.srcid.read(); 1359 r_config_trdid = p_vci_tgt.trdid.read(); 1360 r_config_pktid = p_vci_tgt.pktid.read(); 1361 } 1362 else 1363 { 1364 need_rsp = true; 1365 error = 1; 1366 } 1367 1368 if ( need_rsp ) 1369 { 1370 // blocked if previous pending request to TGT_RSP FSM 1371 if ( r_tgt_cmd_to_tgt_rsp_req.read() ) break; 1372 1373 r_tgt_cmd_to_tgt_rsp_srcid = p_vci_tgt.srcid.read(); 1374 r_tgt_cmd_to_tgt_rsp_trdid = p_vci_tgt.trdid.read(); 1375 r_tgt_cmd_to_tgt_rsp_pktid = p_vci_tgt.pktid.read(); 1376 r_tgt_cmd_to_tgt_rsp_req = true; 1377 r_tgt_cmd_to_tgt_rsp_error = error; 1378 r_tgt_cmd_to_tgt_rsp_rdata = rdata; 1379 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1380 } 1381 else 1382 { 1383 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1384 } 1385 1386 #if DEBUG_MEMC_TGT_CMD 1387 if(m_debug) 1388 std::cout << " <MEMC " << name() << " TGT_CMD_CONFIG> Configuration request:" 1389 << " address = " << std::hex << p_vci_tgt.address.read() 1390 << " / wdata = " << p_vci_tgt.wdata.read() 1391 << " / need_rsp = " << need_rsp 1392 << " / error = " << error << std::endl; 1393 #endif 1394 break; 1395 } 1396 ////////////////// 1397 case TGT_CMD_READ: // Push a read request into read fifo 1398 1399 // check that the read does not cross a cache line limit. 1400 if ( ((m_x[(addr_t) p_vci_tgt.address.read()]+ (p_vci_tgt.plen.read() >>2)) > 16) and 1401 (p_vci_tgt.cmd.read() != vci_param_int::CMD_LOCKED_READ)) 1402 { 1403 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 1404 << " illegal address/plen for VCI read command" << std::endl; 1405 exit(0); 1406 } 1407 // check single flit 1408 if(!p_vci_tgt.eop.read()) 1409 { 1410 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 1411 << " read command packet must contain one single flit" << std::endl; 1412 exit(0); 1413 } 1414 // check plen for LL 1415 if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) and 1416 (p_vci_tgt.plen.read() != 8) ) 1417 { 1418 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 1419 << " ll command packets must have a plen of 8" << std::endl; 1420 exit(0); 1421 } 1422 1423 if ( p_vci_tgt.cmdval and m_cmd_read_addr_fifo.wok() ) 1424 { 1425 1426 #if DEBUG_MEMC_TGT_CMD 1427 if(m_debug) 1428 std::cout << " <MEMC " << name() << " TGT_CMD_READ> Push into read_fifo:" 1429 << " address = " << std::hex << p_vci_tgt.address.read() 1430 << " / srcid = " << p_vci_tgt.srcid.read() 1431 << " / trdid = " << p_vci_tgt.trdid.read() 1432 << " / pktid = " << p_vci_tgt.pktid.read() 1433 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1434 #endif 1435 cmd_read_fifo_put = true; 1436 // <Activity counters> 1437 if (p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) { 1438 if (is_local_req(p_vci_tgt.srcid.read())) m_cpt_ll_local++; 1439 else m_cpt_ll_remote++; 1440 m_cpt_ll_cost += req_distance(p_vci_tgt.srcid.read()); // LL on a single word 1441 } 1442 else { 1443 if (is_local_req(p_vci_tgt.srcid.read())) m_cpt_read_local++; 1444 else m_cpt_read_remote++; 1445 m_cpt_read_cost += m_words * req_distance(p_vci_tgt.srcid.read()); 1446 } 1447 // </Activity counters> 1448 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1449 } 1450 break; 1451 1452 /////////////////// 1453 case TGT_CMD_WRITE: 1454 if(p_vci_tgt.cmdval and m_cmd_write_addr_fifo.wok()) 1455 { 1456 1457 #if DEBUG_MEMC_TGT_CMD 1458 if(m_debug) 1459 std::cout << " <MEMC " << name() << " TGT_CMD_WRITE> Push into write_fifo:" 1460 << " address = " << std::hex << p_vci_tgt.address.read() 1461 << " / srcid = " << p_vci_tgt.srcid.read() 1462 << " / trdid = " << p_vci_tgt.trdid.read() 1463 << " / pktid = " << p_vci_tgt.pktid.read() 1464 << " / wdata = " << p_vci_tgt.wdata.read() 1465 << " / be = " << p_vci_tgt.be.read() 1466 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1467 #endif 1468 cmd_write_fifo_put = true; 1469 // <Activity counters> 1470 if (p_vci_tgt.cmd.read() == vci_param_int::CMD_NOP) { 1471 m_cpt_sc_cost += req_distance(p_vci_tgt.srcid.read()); 1472 } 1473 else { 1474 if (is_local_req(p_vci_tgt.srcid.read())) m_cpt_write_flits_local++; 1475 else m_cpt_write_flits_remote++; 1476 m_cpt_write_cost += req_distance(p_vci_tgt.srcid.read()); 1477 } 1478 // </Activity counters> 1479 1480 if (p_vci_tgt.eop) { 1481 // <Activity counters> 1482 if (p_vci_tgt.cmd.read() == vci_param_int::CMD_NOP) { 1483 if (is_local_req(p_vci_tgt.srcid.read())) m_cpt_sc_local++; 1484 else m_cpt_sc_remote++; 1485 1486 } 1487 else { 1488 if (is_local_req(p_vci_tgt.srcid.read())) m_cpt_write_local++; 1489 else m_cpt_write_remote++; 1490 } 1491 // </Activity counters> 1492 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1493 } 1494 } 1495 break; 1496 1497 ///////////////// 1498 case TGT_CMD_CAS: 1499 if((p_vci_tgt.plen.read() != 8) and (p_vci_tgt.plen.read() != 16)) 1500 { 1501 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_CAS state" 1502 << "illegal format for CAS command " << std::endl; 1503 exit(0); 1504 } 1505 1506 if(p_vci_tgt.cmdval and m_cmd_cas_addr_fifo.wok()) 1507 { 1508 1509 #if DEBUG_MEMC_TGT_CMD 1510 if(m_debug) 1511 std::cout << " <MEMC " << name() << " TGT_CMD_CAS> Pushing command into cmd_cas_fifo:" 1512 << " address = " << std::hex << p_vci_tgt.address.read() 1513 << " srcid = " << p_vci_tgt.srcid.read() 1514 << " trdid = " << p_vci_tgt.trdid.read() 1515 << " pktid = " << p_vci_tgt.pktid.read() 1516 << " wdata = " << p_vci_tgt.wdata.read() 1517 << " be = " << p_vci_tgt.be.read() 1518 << " plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1519 #endif 1520 cmd_cas_fifo_put = true; 1521 if (p_vci_tgt.eop) { 1522 // <Activity counters> 1523 if (is_local_req(p_vci_tgt.srcid.read())) m_cpt_cas_local++; 1524 else m_cpt_cas_remote++; 1525 m_cpt_cas_cost += req_distance(p_vci_tgt.srcid.read()); 1526 // </Activity counters> 1527 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1528 } 1529 } 1530 break; 1531 } // end switch tgt_cmd_fsm 1532 1533 ///////////////////////////////////////////////////////////////////////// 1534 // MULTI_ACK FSM 1535 ///////////////////////////////////////////////////////////////////////// 1536 // This FSM controls the response to the multicast update requests sent 1537 // by the memory cache to the L1 caches and update the UPT. 1538 // 1539 // - The FSM decrements the proper entry in UPT, 1540 // and clear the UPT entry when all responses have been received. 1541 // - If required, it sends a request to the TGT_RSP FSM to complete 1542 // a pending write transaction. 1543 // 1544 // All those multi-ack packets are one flit packet. 1545 // The index in the UPT is defined in the TRDID field. 1546 //////////////////////////////////////////////////////////////////////// 1547 1548 switch(r_multi_ack_fsm.read()) 1167 1549 { 1168 if( m_seg[seg_id]->contains(address) and 1169 m_seg[seg_id]->contains(address + plen - vci_param_int::B) ) 1550 //////////////////// 1551 case MULTI_ACK_IDLE: 1552 { 1553 bool multi_ack_fifo_rok = m_cc_receive_to_multi_ack_fifo.rok(); 1554 1555 // No CC_RECEIVE FSM request and no WRITE FSM request 1556 if( not multi_ack_fifo_rok and not r_write_to_multi_ack_req.read()) 1557 break; 1558 1559 uint8_t updt_index; 1560 1561 // handling WRITE FSM request to decrement update table response 1562 // counter if no CC_RECEIVE FSM request 1563 if(not multi_ack_fifo_rok) 1564 { 1565 updt_index = r_write_to_multi_ack_upt_index.read(); 1566 r_write_to_multi_ack_req = false; 1567 } 1568 // Handling CC_RECEIVE FSM request 1569 else 1570 { 1571 uint64_t flit = m_cc_receive_to_multi_ack_fifo.read(); 1572 updt_index = DspinDhccpParam::dspin_get(flit, 1573 DspinDhccpParam::MULTI_ACK_UPDT_INDEX); 1574 1575 cc_receive_to_multi_ack_fifo_get = true; 1576 } 1577 1578 assert((updt_index < m_upt.size()) and 1579 "VCI_MEM_CACHE ERROR in MULTI_ACK_IDLE : " 1580 "index too large for UPT"); 1581 1582 r_multi_ack_upt_index = updt_index; 1583 r_multi_ack_fsm = MULTI_ACK_UPT_LOCK; 1584 1585 #if DEBUG_MEMC_MULTI_ACK 1586 if(m_debug) 1587 { 1588 if (multi_ack_fifo_rok) 1589 { 1590 std::cout << " <MEMC " << name() 1591 << " MULTI_ACK_IDLE> Response for UPT entry " 1592 << (size_t)updt_index << std::endl; 1593 } 1594 else 1595 { 1596 std::cout << " <MEMC " << name() 1597 << " MULTI_ACK_IDLE> Write FSM request to decrement UPT entry " 1598 << updt_index << std::endl; 1599 } 1600 } 1601 #endif 1602 break; 1603 } 1604 1605 //////////////////////// 1606 case MULTI_ACK_UPT_LOCK: 1607 { 1608 m_cpt_multi_ack_fsm_upt_lock++; 1609 // get lock to the UPDATE table 1610 if(r_alloc_upt_fsm.read() != ALLOC_UPT_MULTI_ACK) break; 1611 1612 // decrement the number of expected responses 1613 size_t count = 0; 1614 bool valid = m_upt.decrement(r_multi_ack_upt_index.read(), count); 1615 1616 /*ODCCP*/ //m_upt.print(); 1617 1618 if(not valid) 1619 { 1620 std::cout << "VCI_MEM_CACHE ERROR " << name() 1621 << " MULTI_ACK_UPT_LOCK state" << std::endl 1622 << "unsuccessful access to decrement the UPT" << std::endl; 1623 exit(0); 1624 } 1625 1626 if(count == 0) 1627 { 1628 r_multi_ack_fsm = MULTI_ACK_UPT_CLEAR; 1629 } 1630 else 1631 { 1632 r_multi_ack_fsm = MULTI_ACK_IDLE; 1633 } 1634 1635 #if DEBUG_MEMC_MULTI_ACK 1636 if(m_debug) 1637 std::cout << " <MEMC " << name() 1638 << " MULTI_ACK_UPT_LOCK> Decrement the responses counter for UPT:" 1639 << " entry = " << r_multi_ack_upt_index.read() 1640 << " / rsp_count = " << std::dec << count << std::endl; 1641 m_cpt_multi_ack_fsm_n_upt_lock++; 1642 #endif 1643 break; 1644 } 1645 1646 ///////////////////////// 1647 case MULTI_ACK_UPT_CLEAR: // Clear UPT entry / Test if rsp or ack required 1648 { 1649 if(r_alloc_upt_fsm.read() != ALLOC_UPT_MULTI_ACK) 1650 { 1651 std::cout << "VCI_MEM_CACHE ERROR " << name() 1652 << " MULTI_ACK_UPT_CLEAR state" 1653 << " bad UPT allocation" << std::endl; 1654 exit(0); 1655 } 1656 1657 r_multi_ack_srcid = m_upt.srcid(r_multi_ack_upt_index.read()); 1658 r_multi_ack_trdid = m_upt.trdid(r_multi_ack_upt_index.read()); 1659 r_multi_ack_pktid = m_upt.pktid(r_multi_ack_upt_index.read()); 1660 r_multi_ack_nline = m_upt.nline(r_multi_ack_upt_index.read()); 1661 bool need_rsp = m_upt.need_rsp(r_multi_ack_upt_index.read()); 1662 1663 // clear the UPT entry 1664 m_upt.clear(r_multi_ack_upt_index.read()); 1665 1666 if ( need_rsp ) r_multi_ack_fsm = MULTI_ACK_WRITE_RSP; 1667 else r_multi_ack_fsm = MULTI_ACK_IDLE; 1668 1669 #if DEBUG_MEMC_MULTI_ACK 1670 if(m_debug) 1671 std::cout << " <MEMC " << name() 1672 << " MULTI_ACK_UPT_CLEAR> Clear UPT entry " 1673 << std::dec << r_multi_ack_upt_index.read() << std::endl; 1674 #endif 1675 break; 1676 } 1677 ///////////////////////// 1678 case MULTI_ACK_WRITE_RSP: // Post a response request to TGT_RSP FSM 1679 // Wait if pending request 1680 { 1681 if ( r_multi_ack_to_tgt_rsp_req.read() ) break; 1682 1683 r_multi_ack_to_tgt_rsp_req = true; 1684 r_multi_ack_to_tgt_rsp_srcid = r_multi_ack_srcid.read(); 1685 r_multi_ack_to_tgt_rsp_trdid = r_multi_ack_trdid.read(); 1686 r_multi_ack_to_tgt_rsp_pktid = r_multi_ack_pktid.read(); 1687 r_multi_ack_fsm = MULTI_ACK_IDLE; 1688 1689 #if DEBUG_MEMC_MULTI_ACK 1690 if(m_debug) 1691 std::cout << " <MEMC " << name() << " MULTI_ACK_WRITE_RSP>" 1692 << " Request TGT_RSP FSM to send a response to srcid " 1693 << std::hex << r_multi_ack_srcid.read() << std::endl; 1694 #endif 1695 break; 1696 } 1697 } // end switch r_multi_ack_fsm 1698 1699 //////////////////////////////////////////////////////////////////////////////////// 1700 // CONFIG FSM 1701 //////////////////////////////////////////////////////////////////////////////////// 1702 // The CONFIG FSM handles the VCI configuration requests (INVAL & SYNC). 1703 // The target buffer can have any size, and there is one single command for 1704 // all cache lines covered by the target buffer. 1705 // 1706 // An INVAL or SYNC configuration operation is defined by the following registers: 1707 // - bool r_config_cmd : INVAL / SYNC / NOP 1708 1709 // - uint64_t r_config_address : buffer base address 1710 // - uint32_t r_config_cmd_lines : number of lines to be handled 1711 // - uint32_t r_config_rsp_lines : number of lines not completed 1712 1713 // 1714 // For both INVAL and SYNC commands, the CONFIG FSM contains the loop handling 1715 // 1716 // all cache lines covered by the buffer. The various lines of a given buffer 1717 // can be pipelined: the CONFIG FSM does not wait the response for line (n) to send 1718 // the command for line (n+1). It decrements the r_config_cmd_lines counter until 1719 // the last request has been registered in TRT (for a SYNC), or in IVT (for an INVAL). 1720 // 1721 // - INVAL request: 1722 // For each line, it access to the DIR. 1723 // In case of miss, it does nothing, and a response is requested to TGT_RSP FSM. 1724 // In case of hit, with no copies in L1 caches, the line is invalidated and 1725 // a response is requested to TGT_RSP FSM. 1726 // If there is copies, a multi-inval, or a broadcast-inval coherence transaction 1727 // 1728 // is launched and registered in UPT. The multi-inval transaction completion 1729 // is signaled by the CLEANUP FSM by decrementing the r_config_rsp_lines counter. 1730 // The CONFIG INVAL response is sent only when the last line has been invalidated. 1731 // TODO : The target buffer address must be aligned on a cache line boundary. 1732 // This constraint can be released, but it requires to make 2 PUT transactions 1733 // for the first and the last line... 1734 // 1735 // - SYNC request: 1736 // For each line, it access to the DIR. 1737 // In case of miss, it does nothing, and a response is requested to TGT_RSP FSM. 1738 // In case of hit, a PUT transaction is registered in TRT and a request is sent 1739 // to IXR_CMD FSM. The IXR_RSP FSM decrements the r_config_rsp_lines counter 1740 // when a PUT response is received. 1741 // The CONFIG SYNC response is sent only when the last PUT response is received. 1742 // 1743 // From the software point of view, a configuration request is a sequence 1744 // of 6 atomic accesses in an uncached segment. A dedicated lock is used 1745 // to handle only one configuration command at a given time: 1746 // - Read MEMC_LOCK : Get the lock 1747 // - Write MEMC_ADDR_LO : Set the buffer address LSB 1748 // - Write MEMC_ADDR_HI : Set the buffer address MSB 1749 // - Write MEMC_BUF_LENGTH : set buffer length (bytes) 1750 // - Write MEMC_CMD_TYPE : launch the actual operation 1751 // - WRITE MEMC_LOCK : release the lock 1752 //////////////////////////////////////////////////////////////////////////////////// 1753 1754 switch( r_config_fsm.read() ) 1755 { 1756 ///////////////// 1757 case CONFIG_IDLE: // waiting a config request 1758 { 1759 if ( r_config_cmd.read() != MEMC_CMD_NOP ) 1760 { 1761 r_config_fsm = CONFIG_LOOP; 1762 1763 #if DEBUG_MEMC_CONFIG 1764 if(m_debug) 1765 std::cout << " <MEMC " << name() << " CONFIG_IDLE> Config Request received" 1766 << " address = " << std::hex << r_config_address.read() 1767 << " / nlines = " << std::dec << r_config_cmd_lines.read() 1768 << " / type = " << r_config_cmd.read() << std::endl; 1769 #endif 1770 } 1771 break; 1772 } 1773 ///////////////// 1774 case CONFIG_LOOP: // test last line to be handled 1775 { 1776 if ( r_config_cmd_lines.read() == 0 ) 1777 { 1778 r_config_cmd = MEMC_CMD_NOP; 1779 r_config_fsm = CONFIG_WAIT; 1780 } 1781 else 1782 { 1783 r_config_fsm = CONFIG_DIR_REQ; 1784 } 1785 1786 #if DEBUG_MEMC_CONFIG 1787 if(m_debug) 1788 std::cout << " <MEMC " << name() << " CONFIG_LOOP>" 1789 << " address = " << std::hex << r_config_address.read() 1790 << " / nlines = " << std::dec << r_config_cmd_lines.read() 1791 << " / command = " << r_config_cmd.read() << std::endl; 1792 #endif 1793 break; 1794 } 1795 ///////////////// 1796 case CONFIG_WAIT: // wait completion (last response) 1797 { 1798 if ( r_config_rsp_lines.read() == 0 ) // last response received 1799 { 1800 r_config_fsm = CONFIG_RSP; 1801 } 1802 1803 #if DEBUG_MEMC_CONFIG 1804 if(m_debug) 1805 std::cout << " <MEMC " << name() << " CONFIG_WAIT>" 1806 << " / lines to do = " << std::dec << r_config_rsp_lines.read() << std::endl; 1807 #endif 1808 break; 1809 } 1810 //////////////// 1811 case CONFIG_RSP: // request TGT_RSP FSM to return response 1812 { 1813 if ( not r_config_to_tgt_rsp_req.read() ) 1814 { 1815 r_config_to_tgt_rsp_srcid = r_config_srcid.read(); 1816 r_config_to_tgt_rsp_trdid = r_config_trdid.read(); 1817 r_config_to_tgt_rsp_pktid = r_config_pktid.read(); 1818 r_config_to_tgt_rsp_error = false; 1819 r_config_to_tgt_rsp_req = true; 1820 r_config_fsm = CONFIG_IDLE; 1821 1822 #if DEBUG_MEMC_CONFIG 1823 if(m_debug) 1824 std::cout << " <MEMC " << name() << " CONFIG_RSP> Request TGT_RSP FSM to return response:" 1825 << " error = " << r_config_to_tgt_rsp_error.read() 1826 << " / rsrcid = " << std::hex << r_config_srcid.read() 1827 << " / rtrdid = " << std::hex << r_config_trdid.read() 1828 << " / rpktid = " << std::hex << r_config_pktid.read() << std::endl; 1829 #endif 1830 } 1831 break; 1832 1833 } 1834 1835 //////////////////// 1836 case CONFIG_DIR_REQ: // Request directory lock 1837 { 1838 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG ) 1839 { 1840 r_config_fsm = CONFIG_DIR_ACCESS; 1841 } 1842 1843 #if DEBUG_MEMC_CONFIG 1844 if(m_debug) 1845 std::cout << " <MEMC " << name() << " CONFIG_DIR_REQ>" 1846 << " Request DIR access" << std::endl; 1847 #endif 1848 break; 1849 } 1850 /////////////////////// 1851 case CONFIG_DIR_ACCESS: // Access directory and decode config command 1852 { 1853 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1854 "MEMC ERROR in CONFIG_DIR_ACCESS state: bad DIR allocation"); 1855 1856 size_t way = 0; 1857 DirectoryEntry entry = m_cache_directory.read(r_config_address.read(), way); 1858 1859 r_config_dir_way = way; 1860 r_config_dir_copy_inst = entry.owner.inst; 1861 r_config_dir_copy_srcid = entry.owner.srcid; 1862 r_config_dir_is_cnt = entry.is_cnt; 1863 r_config_dir_count = entry.count; 1864 r_config_dir_lock = entry.lock; 1865 r_config_dir_ptr = entry.ptr; 1866 1867 if (entry.valid and // hit & inval command 1868 (r_config_cmd.read() == MEMC_CMD_INVAL)) 1869 { 1870 r_config_fsm = CONFIG_IVT_LOCK; 1871 } 1872 else if ( entry.valid and // hit & sync command 1873 entry.dirty and 1874 (r_config_cmd.read() == MEMC_CMD_SYNC) ) 1875 { 1876 r_config_fsm = CONFIG_TRT_LOCK; 1877 } 1878 else // return to LOOP 1879 { 1880 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1881 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 1882 r_config_address = r_config_address.read() + (m_words<<2); 1883 r_config_fsm = CONFIG_LOOP; 1884 } 1885 1886 #if DEBUG_MEMC_CONFIG 1887 if(m_debug) 1888 std::cout << " <MEMC " << name() << " CONFIG_DIR_ACCESS> Accessing directory: " 1889 << " address = " << std::hex << r_config_address.read() 1890 << " / hit = " << std::dec << entry.valid 1891 << " / dirty = " << entry.dirty 1892 << " / count = " << entry.count 1893 << " / is_cnt = " << entry.is_cnt << std::endl; 1894 #endif 1895 break; 1896 } 1897 ///////////////////// 1898 case CONFIG_TRT_LOCK: // enter this state in case of SYNC command 1899 // to a dirty cache line 1900 // keep DIR lock, and try to get TRT lock 1901 // return to LOOP state if TRT full 1902 // reset dirty bit in DIR and register a PUT 1903 // trabsaction in TRT if not full. 1904 { 1905 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1906 "MEMC ERROR in CONFIG_TRT_LOCK state: bad DIR allocation"); 1907 1908 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG ) 1909 { 1910 size_t index = 0; 1911 bool wok = not m_trt.full(index); 1912 1913 if ( not wok ) 1914 { 1915 r_config_fsm = CONFIG_LOOP; 1916 } 1917 else 1918 { 1919 size_t way = r_config_dir_way.read(); 1920 size_t set = m_y[r_config_address.read()]; 1921 1922 // reset dirty bit in DIR 1923 DirectoryEntry entry; 1924 entry.valid = true; 1925 entry.dirty = false; 1926 entry.tag = m_z[r_config_address.read()]; 1927 entry.is_cnt = r_config_dir_is_cnt.read(); 1928 entry.lock = r_config_dir_lock.read(); 1929 entry.ptr = r_config_dir_ptr.read(); 1930 entry.count = r_config_dir_count.read(); 1931 entry.owner.inst = r_config_dir_copy_inst.read(); 1932 entry.owner.srcid = r_config_dir_copy_srcid.read(); 1933 m_cache_directory.write( set, way, entry ); 1934 1935 r_config_trt_index = index; 1936 r_config_fsm = CONFIG_TRT_SET; 1937 } 1938 1939 #if DEBUG_MEMC_CONFIG 1940 if(m_debug) 1941 std::cout << " <MEMC " << name() << " CONFIG_TRT_LOCK> Access TRT: " 1942 << " wok = " << std::dec << wok 1943 << " index = " << index << std::endl; 1944 #endif 1945 } 1946 break; 1947 } 1948 //////////////////// 1949 case CONFIG_TRT_SET: // read data in cache 1950 // and post a PUT request in TRT 1951 { 1952 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1953 "MEMC ERROR in CONFIG_TRT_SET state: bad DIR allocation"); 1954 1955 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG) and 1956 "MEMC ERROR in CONFIG_TRT_SET state: bad TRT allocation"); 1957 1958 // read data into cache 1959 size_t way = r_config_dir_way.read(); 1960 size_t set = m_y[r_config_address.read()]; 1961 std::vector<data_t> data_vector; 1962 data_vector.clear(); 1963 for(size_t word=0; word<m_words; word++) 1964 { 1965 uint32_t data = m_cache_data.read( way, set, word ); 1966 data_vector.push_back( data ); 1967 } 1968 1969 // post the PUT request in TRT 1970 m_trt.set( r_config_trt_index.read(), 1971 false, // PUT transaction 1972 m_nline[r_config_address.read()], // line index 1973 0, // srcid: unused 1974 0, // trdid: unused 1975 0, // pktid: unused 1976 false, // not proc_read 1977 0, // read_length: unused 1978 0, // word_index: unused 1979 std::vector<be_t>(m_words,0xF), // byte-enable: unused 1980 data_vector, // data to be written 1981 0, // ll_key: unused 1982 true ); // requested by config FSM 1983 r_config_fsm = CONFIG_PUT_REQ; 1984 1985 #if DEBUG_MEMC_CONFIG 1986 if(m_debug) 1987 std::cout << " <MEMC " << name() << " CONFIG_TRT_SET> PUT request in TRT:" 1988 << " address = " << std::hex << r_config_address.read() 1989 << " index = " << std::dec << r_config_trt_index.read() << std::endl; 1990 #endif 1991 break; 1992 } 1993 //////////////////// 1994 case CONFIG_PUT_REQ: // post PUT request to IXR_CMD_FSM 1995 { 1996 if ( not r_config_to_ixr_cmd_req.read() ) 1997 { 1998 r_config_to_ixr_cmd_req = true; 1999 r_config_to_ixr_cmd_index = r_config_trt_index.read(); 2000 2001 // prepare next iteration 2002 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2003 r_config_address = r_config_address.read() + (m_words<<2); 2004 r_config_fsm = CONFIG_LOOP; 2005 2006 #if DEBUG_MEMC_CONFIG 2007 if(m_debug) 2008 std::cout << " <MEMC " << name() << " CONFIG_PUT_REQ> post PUT request to IXR_CMD_FSM" 2009 << " / address = " << std::hex << r_config_address.read() << std::endl; 2010 #endif 2011 } 2012 break; 2013 } 2014 ///////////////////// 2015 case CONFIG_IVT_LOCK: // enter this state in case of INVAL command 2016 // Keep DIR lock and Try to get IVT lock. 2017 // Return to LOOP state if IVT full. 2018 // Register inval in IVT, and invalidate the 2019 // directory if IVT not full. 2020 { 2021 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 2022 "MEMC ERROR in CONFIG_IVT_LOCK state: bad DIR allocation"); 2023 2024 if ( r_alloc_ivt_fsm.read() == ALLOC_IVT_CONFIG ) 2025 { 2026 size_t set = m_y[(addr_t)(r_config_address.read())]; 2027 size_t way = r_config_dir_way.read(); 2028 2029 if ( r_config_dir_count.read() == 0 ) // inval DIR and return to LOOP 2030 { 2031 m_cache_directory.inval( way, set ); 2032 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2033 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 2034 r_config_address = r_config_address.read() + (m_words<<2); 2035 r_config_fsm = CONFIG_LOOP; 2036 2037 #if DEBUG_MEMC_CONFIG 2038 if(m_debug) 2039 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 2040 << " No copies in L1 : inval DIR entry" << std::endl; 2041 #endif 2042 } 2043 else // try to register inval in IVT 2044 { 2045 bool wok = false; 2046 size_t index = 0; 2047 bool broadcast = r_config_dir_is_cnt.read(); 2048 size_t srcid = r_config_srcid.read(); 2049 size_t trdid = r_config_trdid.read(); 2050 size_t pktid = r_config_pktid.read(); 2051 addr_t nline = m_nline[(addr_t)(r_config_address.read())]; 2052 size_t nb_copies = r_config_dir_count.read(); 2053 2054 wok = m_ivt.set(false, // it's an inval transaction 2055 broadcast, 2056 false, // no response required 2057 true, // acknowledge required 2058 srcid, 2059 trdid, 2060 pktid, 2061 nline, 2062 nb_copies, 2063 index); 2064 2065 if ( wok ) // IVT success => inval DIR slot 2066 { 2067 m_cache_directory.inval( way, set ); 2068 r_config_ivt_index = index; 2069 if ( broadcast ) r_config_fsm = CONFIG_BC_SEND; 2070 else r_config_fsm = CONFIG_INVAL_SEND; 2071 2072 #if DEBUG_MEMC_CONFIG 2073 if(m_debug) 2074 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 2075 << " Inval DIR entry and register inval in IVT" 2076 << " : index = " << std::dec << index 2077 << " / broadcast = " << broadcast << std::endl; 2078 #endif 2079 } 2080 else // IVT full => release both DIR and IVT locks 2081 { 2082 r_config_fsm = CONFIG_LOOP; 2083 2084 #if DEBUG_MEMC_CONFIG 2085 if(m_debug) 2086 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 2087 << " IVT full : release DIR & IVT locks and retry" << std::endl; 2088 #endif 2089 } 2090 } 2091 } 2092 break; 2093 } 2094 //////////////////// 2095 case CONFIG_BC_SEND: // Post a broadcast inval request to CC_SEND FSM 2096 { 2097 if( not r_config_to_cc_send_multi_req.read() and 2098 not r_config_to_cc_send_brdcast_req.read() ) 2099 { 2100 // post bc inval request 2101 r_config_to_cc_send_multi_req = false; 2102 r_config_to_cc_send_brdcast_req = true; 2103 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 2104 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 2105 2106 // prepare next iteration 2107 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2108 r_config_address = r_config_address.read() + (m_words<<2); 2109 r_config_fsm = CONFIG_LOOP; 2110 2111 #if DEBUG_MEMC_CONFIG 2112 if(m_debug) 2113 std::cout << " <MEMC " << name() << " CONFIG_BC_SEND>" 2114 << " Post a broadcast inval request to CC_SEND FSM" 2115 << " / address = " << r_config_address.read() <<std::endl; 2116 #endif 2117 } 2118 break; 2119 } 2120 /////////////////////// 2121 case CONFIG_INVAL_SEND: // Post a multi inval request to CC_SEND FSM 2122 { 2123 if( not r_config_to_cc_send_multi_req.read() and 2124 not r_config_to_cc_send_brdcast_req.read() ) 2125 { 2126 r_config_to_cc_send_multi_req = true; 2127 r_config_to_cc_send_brdcast_req = false; 2128 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 2129 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 2130 2131 // post data into FIFO 2132 config_to_cc_send_fifo_srcid = r_config_dir_copy_srcid.read(); 2133 config_to_cc_send_fifo_inst = r_config_dir_copy_inst.read(); 2134 config_to_cc_send_fifo_put = true; 2135 2136 if ( r_config_dir_count.read() == 1 ) // one copy 2137 { 2138 // prepare next iteration 2139 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2140 r_config_address = r_config_address.read() + (m_words<<2); 2141 r_config_fsm = CONFIG_LOOP; 2142 } 2143 else // several copies 2144 { 2145 r_config_fsm = CONFIG_HEAP_REQ; 2146 } 2147 2148 #if DEBUG_MEMC_CONFIG 2149 if(m_debug) 2150 std::cout << " <MEMC " << name() << " CONFIG_INVAL_SEND>" 2151 << " Post multi inval request to CC_SEND FSM" 2152 << " / address = " << std::hex << r_config_address.read() 2153 << " / copy = " << r_config_dir_copy_srcid.read() 2154 << " / inst = " << std::dec << r_config_dir_copy_inst.read() << std::endl; 2155 #endif 2156 } 2157 break; 2158 } 2159 ///////////////////// 2160 case CONFIG_HEAP_REQ: // Try to get access to Heap 2161 { 2162 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_CONFIG ) 2163 { 2164 r_config_fsm = CONFIG_HEAP_SCAN; 2165 r_config_heap_next = r_config_dir_ptr.read(); 2166 } 2167 2168 #if DEBUG_MEMC_CONFIG 2169 if(m_debug) 2170 std::cout << " <MEMC " << name() << " CONFIG_HEAP_REQ>" 2171 << " Requesting HEAP lock" << std::endl; 2172 #endif 2173 break; 2174 } 2175 ////////////////////// 2176 case CONFIG_HEAP_SCAN: // scan HEAP and send inval to CC_SEND FSM 2177 { 2178 HeapEntry entry = m_heap.read( r_config_heap_next.read() ); 2179 bool last_copy = (entry.next == r_config_heap_next.read()); 2180 2181 config_to_cc_send_fifo_srcid = entry.owner.srcid; 2182 config_to_cc_send_fifo_inst = entry.owner.inst; 2183 // config_to_cc_send_fifo_last = last_copy; 2184 config_to_cc_send_fifo_put = true; 2185 2186 if ( m_config_to_cc_send_inst_fifo.wok() ) // inval request accepted 2187 { 2188 r_config_heap_next = entry.next; 2189 if ( last_copy ) r_config_fsm = CONFIG_HEAP_LAST; 2190 } 2191 2192 #if DEBUG_MEMC_CONFIG 2193 if(m_debug) 2194 std::cout << " <MEMC " << name() << " CONFIG_HEAP_SCAN>" 2195 << " Post multi inval request to CC_SEND FSM" 2196 << " / address = " << std::hex << r_config_address.read() 2197 << " / copy = " << entry.owner.srcid 2198 << " / inst = " << std::dec << entry.owner.inst << std::endl; 2199 #endif 2200 break; 2201 } 2202 ////////////////////// 2203 case CONFIG_HEAP_LAST: // HEAP housekeeping 2204 { 2205 size_t free_pointer = m_heap.next_free_ptr(); 2206 HeapEntry last_entry; 2207 last_entry.owner.srcid = 0; 2208 last_entry.owner.inst = false; 2209 2210 if ( m_heap.is_full() ) 2211 { 2212 last_entry.next = r_config_dir_ptr.read(); 2213 m_heap.unset_full(); 2214 } 2215 else 2216 { 2217 last_entry.next = free_pointer; 2218 } 2219 2220 m_heap.write_free_ptr( r_config_dir_ptr.read() ); 2221 m_heap.write( r_config_heap_next.read(), last_entry ); 2222 2223 // prepare next iteration 2224 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2225 r_config_address = r_config_address.read() + (m_words<<2); 2226 r_config_fsm = CONFIG_LOOP; 2227 2228 #if DEBUG_MEMC_CONFIG 2229 if(m_debug) 2230 std::cout << " <MEMC " << name() << " CONFIG_HEAP_LAST>" 2231 << " Heap housekeeping" << std::endl; 2232 #endif 2233 break; 2234 } 2235 } // end switch r_config_fsm 2236 2237 //////////////////////////////////////////////////////////////////////////////////// 2238 // READ FSM 2239 //////////////////////////////////////////////////////////////////////////////////// 2240 // The READ FSM controls the VCI read and ll requests. 2241 // It takes the lock protecting the cache directory to check the cache line status: 2242 // - In case of HIT 2243 // The fsm copies the data (one line, or one single word) 2244 // in the r_read_to_tgt_rsp buffer. It waits if this buffer is not empty. 2245 // The requesting initiator is registered in the cache directory. 2246 // If the number of copy is larger than 1, the new copy is registered 2247 // in the HEAP. 2248 // If the number of copy is larger than the threshold, the HEAP is cleared, 2249 // and the corresponding line switches to the counter mode. 2250 // - In case of MISS 2251 // The READ fsm takes the lock protecting the transaction tab. 2252 // If a read transaction to the XRAM for this line already exists, 2253 // or if the transaction tab is full, the fsm is stalled. 2254 // If a TRT entry is free, the READ request is registered in TRT, 2255 // it is consumed in the request FIFO, and transmited to the IXR_CMD FSM. 2256 // The READ FSM returns in the IDLE state as the read transaction will be 2257 // completed when the missing line will be received. 2258 //////////////////////////////////////////////////////////////////////////////////// 2259 2260 switch(r_read_fsm.read()) 2261 { 2262 /////////////// 2263 case READ_IDLE: // waiting a read request 2264 { 2265 if(m_cmd_read_addr_fifo.rok()) 2266 { 2267 2268 #if DEBUG_MEMC_READ 2269 if(m_debug) 2270 std::cout << " <MEMC " << name() << " READ_IDLE> Read request" 2271 << " : address = " << std::hex << m_cmd_read_addr_fifo.read() 2272 << " / srcid = " << m_cmd_read_srcid_fifo.read() 2273 << " / trdid = " << m_cmd_read_trdid_fifo.read() 2274 << " / pktid = " << m_cmd_read_pktid_fifo.read() 2275 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 2276 #endif 2277 r_read_coherent = false; //WB by default 2278 r_read_ll_done = false; 2279 r_read_fsm = READ_DIR_REQ; 2280 } 2281 break; 2282 } 2283 2284 ////////////////// 2285 case READ_DIR_REQ: // Get the lock to the directory 2286 { 2287 if(r_alloc_dir_fsm.read() == ALLOC_DIR_READ) 2288 { 2289 r_read_fsm = READ_DIR_LOCK; 2290 m_cpt_read_fsm_n_dir_lock++; 2291 } 2292 2293 #if DEBUG_MEMC_READ 2294 if(m_debug) 2295 std::cout << " <MEMC " << name() << " READ_DIR_REQ> Requesting DIR lock " << std::endl; 2296 #endif 2297 2298 m_cpt_read_fsm_dir_lock++; 2299 2300 break; 2301 } 2302 2303 /////////////////// 2304 case READ_DIR_LOCK: // check directory for hit / miss 2305 { 2306 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2307 "MEMC ERROR in READ_DIR_LOCK state: Bad DIR allocation"); 2308 2309 size_t way = 0; 2310 DirectoryEntry entry = 2311 m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 2312 if(((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) and not r_read_ll_done.read()) // access the global table ONLY when we have an LL cmd 2313 { 2314 r_read_ll_key = m_llsc_table.ll(m_cmd_read_addr_fifo.read()); 2315 /**//*std::cout << "MEMCACHE : from proc " << m_cmd_read_srcid_fifo.read() 2316 << " | @ " << std::hex << m_cmd_read_addr_fifo.read() 2317 << " | LL" << std::endl;*/ 2318 r_read_ll_done = true; 2319 } 2320 r_read_is_cnt = entry.is_cnt; 2321 r_read_dirty = entry.dirty; 2322 r_read_lock = entry.lock; 2323 r_read_tag = entry.tag; 2324 r_read_way = way; 2325 r_read_count = entry.count; 2326 r_read_copy = entry.owner.srcid; 2327 2328 r_read_copy_inst = entry.owner.inst; 2329 r_read_ptr = entry.ptr; // pointer to the heap 2330 2331 // check if this is a cached read, this means pktid is either 2332 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 2333 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2334 bool cached_read = (m_cmd_read_pktid_fifo.read() & 0x1); 2335 2336 if(entry.valid) // hit 2337 { 2338 r_read_coherent = entry.cache_coherent; 2339 if (entry.cache_coherent or (entry.count == 0))// or (entry.owner.srcid == m_cmd_read_srcid_fifo.read())) //hit on a WT line or the owner has no more copy (if LL, the owner must be invalidated even if he made the request) 2340 { 2341 // test if we need to register a new copy in the heap 2342 if(entry.is_cnt || (entry.count == 0) || !cached_read) 2343 { 2344 r_read_fsm = READ_DIR_HIT; 2345 } 2346 else 2347 { 2348 //std::cout << "is LL = " << ((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) << std::endl; 2349 //std::cout << "coherent = " << entry.cache_coherent << " | count = " << std::dec << entry.count << " | cached = " << cached_read << std::endl; 2350 r_read_fsm = READ_HEAP_REQ; 2351 } 2352 } 2353 else //hit on a WB line owned by an other proc 2354 { 2355 r_read_fsm = READ_IVT_LOCK; 2356 } 2357 } 2358 else // miss 2359 { 2360 r_read_fsm = READ_TRT_LOCK; 2361 } 2362 2363 #if DEBUG_MEMC_READ 2364 if(m_debug) 2365 { 2366 std::cout << " <MEMC " << name() << " READ_DIR_LOCK> Accessing directory: " 2367 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 2368 << " / hit = " << std::dec << entry.valid 2369 << " / count = " <<std::dec << entry.count 2370 << " / is_cnt = " << entry.is_cnt 2371 << " / is_coherent = " << entry.cache_coherent; 2372 if((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) std::cout << " / LL access" << std::endl; 2373 else std::cout << std::endl; 2374 } 2375 #endif 2376 break; 2377 } 2378 2379 /////////////////// 2380 case READ_IVT_LOCK: 2381 { 2382 if (r_alloc_ivt_fsm.read() == ALLOC_IVT_READ) 2383 { 2384 size_t index; 2385 addr_t nline = m_nline[(addr_t)(m_cmd_read_addr_fifo.read())]; 2386 /*std::cout << "nline = " << std::dec << nline << std::endl 2387 << "inval en cours sur la ligne = " << m_upt.search_inval(nline, index) << std::endl 2388 << "UPT full = " << m_upt.is_full() << std::endl 2389 << "CC_SEND req = " << r_read_to_cc_send_req.read() << std::endl 2390 << "CLENAUP req = " <<r_read_to_cleanup_req.read() << std::endl;*/ 2391 if(m_ivt.search_inval(nline, index) or m_ivt.is_full() or r_read_to_cc_send_req.read() or r_read_to_cleanup_req.read()) //Check pending inval 2392 { 2393 r_read_fsm = READ_WAIT; 2394 #if DEBUG_MEMC_READ 2395 if(m_debug) 2396 { 2397 std::cout 2398 << " <MEMC " << name() << " READ_IVT_LOCK>" 2399 << " Wait cleanup completion" 2400 << std::endl; 2401 } 2402 #endif 2403 } 2404 else 2405 { 2406 r_read_to_cc_send_req = true; 2407 r_read_to_cc_send_dest = r_read_copy.read(); 2408 r_read_to_cc_send_nline = nline; 2409 r_read_to_cc_send_inst = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2410 r_read_to_cleanup_req = true; 2411 r_read_to_cleanup_nline = nline; 2412 r_read_to_cleanup_srcid = m_cmd_read_srcid_fifo.read(); 2413 r_read_to_cleanup_length = m_cmd_read_length_fifo.read(); 2414 r_read_to_cleanup_first_word = m_x[(addr_t) m_cmd_read_addr_fifo.read()]; 2415 r_read_to_cleanup_cached_read = (m_cmd_read_pktid_fifo.read() & 0x1); 2416 r_read_to_cleanup_addr = m_cmd_read_addr_fifo.read(); 2417 r_read_to_cleanup_is_ll= ((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL); 2418 r_read_to_cleanup_ll_key = r_read_ll_key.read(); 2419 //std::cout << "cleanup req (read) on line " << nline << " /on proc " << r_read_copy.read() << std::endl; 2420 2421 m_ivt.set(false, // it's an inval transaction 2422 false, // it's not a broadcast 2423 false, // it needs a read response 2424 false, // no acknowledge required 2425 m_cmd_read_srcid_fifo.read(), 2426 m_cmd_read_trdid_fifo.read(), 2427 m_cmd_read_pktid_fifo.read(), 2428 nline, 2429 0x1, //Expect only one answer 2430 index); 2431 2432 cmd_read_fifo_get = true; 2433 r_read_fsm = READ_IDLE; 2434 #if DEBUG_MEMC_READ 2435 if(m_debug) 2436 { 2437 std::cout 2438 << " <MEMC " << name() << " READ_IVT_LOCK>" 2439 << " Inval req on an NCC line" 2440 << std::endl; 2441 } 2442 #endif 2443 } 2444 } 2445 2446 2447 break; 2448 } 2449 2450 ////////////////// 2451 case READ_WAIT://Release the locks 2452 { 2453 r_read_fsm = READ_DIR_REQ; 2454 #if DEBUG_MEMC_READ 2455 if(m_debug) 2456 { 2457 std::cout 2458 << " <MEMC " << name() << " READ_WAIT>" << std::endl; 2459 } 2460 #endif 2461 break; 2462 } 2463 /////////////////// 2464 case READ_DIR_HIT: // read data in cache & update the directory 2465 // we enter this state in 3 cases: 2466 // - the read request is uncachable 2467 // - the cache line is in counter mode 2468 // - the cache line is valid but not replicated 2469 2470 { 2471 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2472 "MEMC ERROR in READ_DIR_HIT state: Bad DIR allocation"); 2473 // check if this is an instruction read, this means pktid is either 2474 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 2475 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2476 bool inst_read = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2477 // check if this is a cached read, this means pktid is either 2478 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 2479 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2480 bool cached_read = (m_cmd_read_pktid_fifo.read() & 0x1); 2481 bool is_cnt = r_read_is_cnt.read(); 2482 2483 // read data in the cache 2484 size_t set = m_y[(addr_t)(m_cmd_read_addr_fifo.read())]; 2485 size_t way = r_read_way.read(); 2486 2487 m_cache_data.read_line(way, set, r_read_data); 2488 2489 // update the cache directory 2490 DirectoryEntry entry; 2491 entry.valid = true; 2492 entry.cache_coherent = r_read_coherent.read() or inst_read or (!(cached_read)) or (r_read_copy.read() != m_cmd_read_srcid_fifo.read()); 2493 r_read_coherent = r_read_coherent.read() or inst_read or (!(cached_read)) or (r_read_copy.read() != m_cmd_read_srcid_fifo.read()); 2494 entry.is_cnt = is_cnt; 2495 entry.dirty = r_read_dirty.read(); 2496 entry.tag = r_read_tag.read(); 2497 entry.lock = r_read_lock.read(); 2498 entry.ptr = r_read_ptr.read(); 2499 if(cached_read) // Cached read => we must update the copies 2500 { 2501 if(!is_cnt) // Not counter mode 2502 { 2503 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2504 entry.owner.inst = inst_read; 2505 entry.count = r_read_count.read() + 1; 2506 } 2507 else // Counter mode 2508 { 2509 entry.owner.srcid = 0; 2510 entry.owner.inst = false; 2511 entry.count = r_read_count.read() + 1; 2512 } 2513 } 2514 else // Uncached read 2515 { 2516 entry.owner.srcid = r_read_copy.read(); 2517 entry.owner.inst = r_read_copy_inst.read(); 2518 entry.count = r_read_count.read(); 2519 } 2520 2521 #if DEBUG_MEMC_READ 2522 if(m_debug) 2523 std::cout << " <MEMC " << name() << " READ_DIR_HIT> Update directory entry:" 2524 << " addr = " << std::hex << m_cmd_read_addr_fifo.read() 2525 << " / set = " << std::dec << set 2526 << " / way = " << way 2527 << " / owner_id = " << std::hex << entry.owner.srcid 2528 << " / owner_ins = " << std::dec << entry.owner.inst 2529 << " / coherent = " << entry.cache_coherent 2530 << " / count = " << entry.count 2531 << " / is_cnt = " << entry.is_cnt << std::endl; 2532 #endif 2533 2534 m_cache_directory.write(set, way, entry); 2535 r_read_fsm = READ_RSP; 2536 break; 2537 } 2538 /////////////////// 2539 case READ_HEAP_REQ: // Get the lock to the HEAP directory 2540 { 2541 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2542 { 2543 r_read_fsm = READ_HEAP_LOCK; 2544 m_cpt_read_fsm_n_heap_lock++; 2545 } 2546 2547 #if DEBUG_MEMC_READ 2548 if(m_debug) 2549 std::cout << " <MEMC " << name() << " READ_HEAP_REQ>" 2550 << " Requesting HEAP lock " << std::endl; 2551 #endif 2552 2553 m_cpt_read_fsm_heap_lock++; 2554 2555 break; 2556 } 2557 2558 //////////////////// 2559 case READ_HEAP_LOCK: // read data in cache, update the directory 2560 // and prepare the HEAP update 2561 { 2562 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2563 { 2564 // enter counter mode when we reach the limit of copies or the heap is full 2565 bool go_cnt = (r_read_count.read() >= m_max_copies) or m_heap.is_full(); 2566 2567 if (!r_read_coherent.read()) 2568 { 2569 std::cout << "Address = " << std::hex << (m_cmd_read_addr_fifo.read()) << std::dec << " |count = " << r_read_count.read() << std::endl; 2570 } 2571 assert (r_read_coherent.read() && "accÚs au heap sur ncc"); 2572 // read data in the cache 2573 size_t set = m_y[(addr_t)(m_cmd_read_addr_fifo.read())]; 2574 size_t way = r_read_way.read(); 2575 2576 m_cache_data.read_line(way, set, r_read_data); 2577 2578 // update the cache directory 2579 DirectoryEntry entry; 2580 entry.valid = true; 2581 entry.cache_coherent = r_read_coherent.read(); 2582 entry.is_cnt = go_cnt; 2583 entry.dirty = r_read_dirty.read(); 2584 entry.tag = r_read_tag.read(); 2585 entry.lock = r_read_lock.read(); 2586 entry.count = r_read_count.read() + 1; 2587 2588 if(not go_cnt) // Not entering counter mode 2589 { 2590 entry.owner.srcid = r_read_copy.read(); 2591 entry.owner.inst = r_read_copy_inst.read(); 2592 entry.ptr = m_heap.next_free_ptr(); // set pointer on the heap 2593 } 2594 else // Entering Counter mode 2595 { 2596 entry.owner.srcid = 0; 2597 entry.owner.inst = false; 2598 entry.ptr = 0; 2599 } 2600 2601 m_cache_directory.write(set, way, entry); 2602 2603 // prepare the heap update (add an entry, or clear the linked list) 2604 if(not go_cnt) // not switching to counter mode 2605 { 2606 // We test if the next free entry in the heap is the last 2607 HeapEntry heap_entry = m_heap.next_free_entry(); 2608 r_read_next_ptr = heap_entry.next; 2609 r_read_last_free = (heap_entry.next == m_heap.next_free_ptr()); 2610 2611 r_read_fsm = READ_HEAP_WRITE; // add an entry in the HEAP 2612 } 2613 else // switching to counter mode 2614 { 2615 if(r_read_count.read() >1) // heap must be cleared 2616 { 2617 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 2618 r_read_next_ptr = m_heap.next_free_ptr(); 2619 m_heap.write_free_ptr(r_read_ptr.read()); 2620 2621 if(next_entry.next == r_read_ptr.read()) // last entry 2622 { 2623 r_read_fsm = READ_HEAP_LAST; // erase the entry 2624 } 2625 else // not the last entry 2626 { 2627 r_read_ptr = next_entry.next; 2628 r_read_fsm = READ_HEAP_ERASE; // erase the list 2629 } 2630 } 2631 else // the heap is not used / nothing to do 2632 { 2633 r_read_fsm = READ_RSP; 2634 } 2635 } 2636 2637 #if DEBUG_MEMC_READ 2638 if(m_debug) 2639 std::cout << " <MEMC " << name() << " READ_HEAP_LOCK> Update directory:" 2640 << " tag = " << std::hex << entry.tag 2641 << " set = " << std::dec << set 2642 << " way = " << way 2643 << " count = " << entry.count 2644 << " is_cnt = " << entry.is_cnt << std::endl; 2645 #endif 2646 } 2647 else 2648 { 2649 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_LOCK" 2650 << "Bad HEAP allocation" << std::endl; 2651 exit(0); 2652 } 2653 break; 2654 } 2655 ///////////////////// 2656 case READ_HEAP_WRITE: // add an entry in the heap 2657 { 2658 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2659 { 2660 HeapEntry heap_entry; 2661 heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2662 heap_entry.owner.inst = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2663 2664 if(r_read_count.read() == 1) // creation of a new linked list 2665 { 2666 heap_entry.next = m_heap.next_free_ptr(); 2667 } 2668 else // head insertion in existing list 2669 { 2670 heap_entry.next = r_read_ptr.read(); 2671 } 2672 m_heap.write_free_entry(heap_entry); 2673 m_heap.write_free_ptr(r_read_next_ptr.read()); 2674 if(r_read_last_free.read()) m_heap.set_full(); 2675 2676 r_read_fsm = READ_RSP; 2677 2678 #if DEBUG_MEMC_READ 2679 if(m_debug) 2680 std::cout << " <MEMC " << name() << " READ_HEAP_WRITE> Add an entry in the heap:" 2681 << " owner_id = " << std::hex << heap_entry.owner.srcid 2682 << " owner_ins = " << std::dec << heap_entry.owner.inst << std::endl; 2683 #endif 2684 } 2685 else 2686 { 2687 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_WRITE" 2688 << "Bad HEAP allocation" << std::endl; 2689 exit(0); 2690 } 2691 break; 2692 } 2693 ///////////////////// 2694 case READ_HEAP_ERASE: 2695 { 2696 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2697 { 2698 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 2699 if(next_entry.next == r_read_ptr.read()) 2700 { 2701 r_read_fsm = READ_HEAP_LAST; 2702 } 2703 else 2704 { 2705 r_read_ptr = next_entry.next; 2706 r_read_fsm = READ_HEAP_ERASE; 2707 } 2708 } 2709 else 2710 { 2711 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_ERASE" 2712 << "Bad HEAP allocation" << std::endl; 2713 exit(0); 2714 } 2715 break; 2716 } 2717 2718 //////////////////// 2719 case READ_HEAP_LAST: 2720 { 2721 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2722 { 2723 HeapEntry last_entry; 2724 last_entry.owner.srcid = 0; 2725 last_entry.owner.inst = false; 2726 2727 if(m_heap.is_full()) 2728 { 2729 last_entry.next = r_read_ptr.read(); 2730 m_heap.unset_full(); 2731 } 2732 else 2733 { 2734 last_entry.next = r_read_next_ptr.read(); 2735 } 2736 m_heap.write(r_read_ptr.read(),last_entry); 2737 r_read_fsm = READ_RSP; 2738 } 2739 else 2740 { 2741 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_LAST" 2742 << "Bad HEAP allocation" << std::endl; 2743 exit(0); 2744 } 2745 break; 2746 } 2747 ////////////// 2748 case READ_RSP: // request the TGT_RSP FSM to return data 2749 { 2750 if(!r_read_to_tgt_rsp_req) 2751 { 2752 for(size_t i=0 ; i<m_words ; i++) r_read_to_tgt_rsp_data[i] = r_read_data[i]; 2753 r_read_to_tgt_rsp_word = m_x[(addr_t) m_cmd_read_addr_fifo.read()]; 2754 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read(); 2755 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read(); 2756 /*RWT*/ 2757 //BUG pktid 2758 if (r_read_coherent.read()) 2759 { 2760 r_read_to_tgt_rsp_pktid = 0x0 + m_cmd_read_pktid_fifo.read(); 2761 //std::cout << "READ RSP COHERENT on word" << std::hex << m_x[(addr_t) m_cmd_read_addr_fifo.read()] << std::dec << std::endl; 2762 } 2763 else 2764 { 2765 r_read_to_tgt_rsp_pktid = 0x8 + m_cmd_read_pktid_fifo.read(); 2766 } 2767 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); 2768 r_read_to_tgt_rsp_ll_key = r_read_ll_key.read(); 2769 cmd_read_fifo_get = true; 2770 r_read_to_tgt_rsp_req = true; 2771 r_read_fsm = READ_IDLE; 2772 2773 #if DEBUG_MEMC_READ 2774 if(m_debug) 2775 std::cout << " <MEMC " << name() << " READ_RSP> Request TGT_RSP FSM to return data:" 2776 << " rsrcid = " << std::hex << m_cmd_read_srcid_fifo.read() 2777 << " / address = " << std::hex << m_cmd_read_addr_fifo.read() 2778 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 2779 #endif 2780 } 2781 break; 2782 } 2783 /////////////////// 2784 case READ_TRT_LOCK: // read miss : check the Transaction Table 2785 { 2786 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 2787 { 2788 size_t index = 0; 2789 addr_t addr = (addr_t) m_cmd_read_addr_fifo.read(); 2790 bool hit_read = m_trt.hit_read(m_nline[addr], index); 2791 bool hit_write = m_trt.hit_write(m_nline[addr]); 2792 bool wok = !m_trt.full(index); 2793 2794 if(hit_read or !wok or hit_write) // missing line already requested or no space 2795 { 2796 if(!wok) 2797 { 2798 m_cpt_trt_full++; 2799 } 2800 if(hit_read or hit_write) m_cpt_trt_rb++; 2801 r_read_fsm = READ_IDLE; 2802 } 2803 else // missing line is requested to the XRAM 2804 { 2805 m_cpt_read_miss++; 2806 r_read_trt_index = index; 2807 r_read_fsm = READ_TRT_SET; 2808 } 2809 2810 #if DEBUG_MEMC_READ 2811 if(m_debug) 2812 std::cout << " <MEMC " << name() << " READ_TRT_LOCK> Check TRT:" 2813 << " hit_read = " << hit_read 2814 << " / hit_write = " << hit_write 2815 << " / full = " << !wok << std::endl; 2816 m_cpt_read_fsm_n_trt_lock++; 2817 #endif 2818 } 2819 2820 m_cpt_read_fsm_trt_lock++; 2821 2822 break; 2823 } 2824 2825 ////////////////// 2826 case READ_TRT_SET: // register get transaction in TRT 2827 { 2828 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 2829 { 2830 m_trt.set(r_read_trt_index.read(), 2831 true, 2832 m_nline[(addr_t)(m_cmd_read_addr_fifo.read())], 2833 m_cmd_read_srcid_fifo.read(), 2834 m_cmd_read_trdid_fifo.read(), 2835 m_cmd_read_pktid_fifo.read(), 2836 true, 2837 m_cmd_read_length_fifo.read(), 2838 m_x[(addr_t)(m_cmd_read_addr_fifo.read())], 2839 std::vector<be_t> (m_words,0), 2840 std::vector<data_t> (m_words,0), 2841 r_read_ll_key.read()); 2842 2843 #if DEBUG_MEMC_READ 2844 if(m_debug) 2845 std::cout << " <MEMC " << name() << " READ_TRT_SET> Set a GET in TGT:" 2846 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 2847 << " / srcid = " << std::hex << m_cmd_read_srcid_fifo.read() << std::endl; 2848 #endif 2849 r_read_fsm = READ_TRT_REQ; 2850 } 2851 break; 2852 } 2853 2854 ////////////////// 2855 case READ_TRT_REQ: // consume the read request in FIFO and send it to IXR_CMD_FSM 2856 { 2857 if(not r_read_to_ixr_cmd_req) 2858 { 2859 cmd_read_fifo_get = true; 2860 r_read_to_ixr_cmd_req = true; 2861 //r_read_to_ixr_cmd_nline = m_nline[(addr_t)(m_cmd_read_addr_fifo.read())]; 2862 r_read_to_ixr_cmd_index = r_read_trt_index.read(); 2863 r_read_fsm = READ_IDLE; 2864 2865 #if DEBUG_MEMC_READ 2866 if(m_debug) 2867 std::cout << " <MEMC " << name() << " READ_TRT_REQ> Request GET transaction for address " 2868 << std::hex << m_cmd_read_addr_fifo.read() << std::endl; 2869 #endif 2870 } 2871 break; 2872 } 2873 } // end switch read_fsm 2874 2875 /////////////////////////////////////////////////////////////////////////////////// 2876 // WRITE FSM 2877 /////////////////////////////////////////////////////////////////////////////////// 2878 // The WRITE FSM handles the write bursts and sc requests sent by the processors. 2879 // All addresses in a burst must be in the same cache line. 2880 // A complete write burst is consumed in the FIFO & copied to a local buffer. 2881 // Then the FSM takes the lock protecting the cache directory, to check 2882 // if the line is in the cache. 2883 // 2884 // - In case of HIT, the cache is updated. 2885 // If there is no other copy, an acknowledge response is immediately 2886 // returned to the writing processor. 2887 // If the data is cached by other processors, a coherence transaction must 2888 // be launched (sc requests always require a coherence transaction): 2889 // It is a multicast update if the line is not in counter mode: the processor 2890 // takes the lock protecting the Update Table (UPT) to register this transaction. 2891 // If the UPT is full, it releases the lock(s) and retry. Then, it sends 2892 // a multi-update request to all owners of the line (but the writer), 2893 // through the CC_SEND FSM. In case of coherence transaction, the WRITE FSM 2894 // does not respond to the writing processor, as this response will be sent by 2895 // the MULTI_ACK FSM when all update responses have been received. 2896 // It is a broadcast invalidate if the line is in counter mode: The line 2897 // should be erased in memory cache, and written in XRAM with a PUT transaction, 2898 // after registration in TRT. 2899 // 2900 // - In case of MISS, the WRITE FSM takes the lock protecting the transaction 2901 // table (TRT). If a read transaction to the XRAM for this line already exists, 2902 // it writes in the TRT (write buffer). Otherwise, if a TRT entry is free, 2903 // the WRITE FSM register a new transaction in TRT, and sends a GET request 2904 // to the XRAM. If the TRT is full, it releases the lock, and waits. 2905 // Finally, the WRITE FSM returns an aknowledge response to the writing processor. 2906 ///////////////////////////////////////////////////////////////////////////////////// 2907 2908 switch(r_write_fsm.read()) 2909 { 2910 //////////////// 2911 case WRITE_IDLE: // copy first word of a write burst in local buffer 2912 { 2913 if (not m_cmd_write_addr_fifo.rok()) break; 2914 // consume a word in the FIFO & write it in the local buffer 2915 cmd_write_fifo_get = true; 2916 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 2917 2918 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 2919 r_write_word_index = index; 2920 r_write_word_count = 0; 2921 r_write_data[index] = m_cmd_write_data_fifo.read(); 2922 r_write_srcid = m_cmd_write_srcid_fifo.read(); 2923 r_write_trdid = m_cmd_write_trdid_fifo.read(); 2924 r_write_pktid = m_cmd_write_pktid_fifo.read(); 2925 2926 // if SC command, get the SC key 2927 if ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 2928 { 2929 assert( not m_cmd_write_eop_fifo.read() && 2930 "MEMC ERROR in WRITE_IDLE state: " 2931 "invalid packet format for SC command"); 2932 2933 r_write_sc_key = m_cmd_write_data_fifo.read(); 2934 } 2935 // initialize the be field for all words 2936 for(size_t word=0 ; word<m_words ; word++) 2937 { 2938 if(word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 2939 else r_write_be[word] = 0x0; 2940 } 2941 2942 if (m_cmd_write_eop_fifo.read()) 2943 { 2944 r_write_fsm = WRITE_DIR_REQ; 2945 } 2946 else 2947 { 2948 r_write_fsm = WRITE_NEXT; 2949 } 2950 2951 #if DEBUG_MEMC_WRITE 2952 if(m_debug) 2953 std::cout << " <MEMC " << name() << " WRITE_IDLE> Write request " 2954 << " srcid = " << std::hex << m_cmd_write_srcid_fifo.read() 2955 << " / address = " << std::hex << m_cmd_write_addr_fifo.read() 2956 << " / data = " << m_cmd_write_data_fifo.read() 2957 << " / pktid = " << m_cmd_write_pktid_fifo.read() 2958 << std::endl; 2959 #endif 2960 break; 2961 } 2962 2963 //////////////// 2964 case WRITE_NEXT: // copy next word of a write burst in local buffer 2965 { 2966 if (not m_cmd_write_addr_fifo.rok()) break; 2967 2968 // check that the next word is in the same cache line 2969 assert((m_nline[(addr_t)(r_write_address.read())] == 2970 m_nline[(addr_t)(m_cmd_write_addr_fifo.read())]) && 2971 "MEMC ERROR in WRITE_NEXT state: Illegal write burst"); 2972 2973 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 2974 bool is_sc = ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC); 2975 2976 // check that SC command has constant address 2977 assert((not is_sc or (index == r_write_word_index)) && 2978 "MEMC ERROR in WRITE_NEXT state: " 2979 "the address must be constant on a SC command"); 2980 2981 // check that SC command has two flits 2982 assert((not is_sc or m_cmd_write_eop_fifo.read()) && 2983 "MEMC ERROR in WRITE_NEXT state: " 2984 "invalid packet format for SC command"); 2985 // consume a word in the FIFO & write it in the local buffer 2986 cmd_write_fifo_get = true; 2987 2988 r_write_be[index] = m_cmd_write_be_fifo.read(); 2989 r_write_data[index] = m_cmd_write_data_fifo.read(); 2990 2991 // the first flit of a SC command is the reservation key and 2992 // therefore it must not be counted as a data to write 2993 if (not is_sc) 2994 { 2995 r_write_word_count = r_write_word_count.read() + 1; 2996 } 2997 2998 if (m_cmd_write_eop_fifo.read()) r_write_fsm = WRITE_DIR_REQ; 2999 3000 #if DEBUG_MEMC_WRITE 3001 if (m_debug) 3002 std::cout << " <MEMC " << name() 3003 << " WRITE_NEXT> Write another word in local buffer" 3004 << std::endl; 3005 #endif 3006 break; 3007 } 3008 3009 //////////////////// 3010 case WRITE_DIR_REQ: 3011 { 3012 // Get the lock to the directory 3013 // and access the llsc_global_table 3014 if (r_alloc_dir_fsm.read() != ALLOC_DIR_WRITE ) break; 3015 3016 if ((r_write_pktid.read() & 0x7) == TYPE_SC) 3017 { 3018 // test address and key match of the SC command on the 3019 // LL/SC table without removing reservation. The reservation 3020 // will be erased after in this FSM. 3021 bool sc_success = m_llsc_table.check(r_write_address.read(), 3022 r_write_sc_key.read()); 3023 3024 r_write_sc_fail = not sc_success; 3025 3026 if (not sc_success) r_write_fsm = WRITE_RSP; 3027 else r_write_fsm = WRITE_DIR_LOCK; 3028 } 3029 else 3030 { 3031 // write burst 3032 #define L2 soclib::common::uint32_log2 3033 addr_t min = r_write_address.read(); 3034 addr_t max = r_write_address.read() + 3035 (r_write_word_count.read() << L2(vci_param_int::B)); 3036 #undef L2 3037 3038 m_llsc_table.sw(min, max); 3039 3040 r_write_fsm = WRITE_DIR_LOCK; 3041 } 3042 3043 #if DEBUG_MEMC_WRITE 3044 if(m_debug) 3045 std::cout << " <MEMC " << name() << " WRITE_DIR_REQ> Requesting DIR lock " 3046 << std::endl; 3047 #endif 3048 break; 3049 } 3050 3051 //////////////////// 3052 case WRITE_DIR_LOCK: // access directory to check hit/miss 3053 { 3054 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3055 "MEMC ERROR in ALLOC_DIR_LOCK state: Bad DIR allocation"); 3056 size_t way = 0; 3057 DirectoryEntry entry(m_cache_directory.read(r_write_address.read(), way)); 3058 3059 if(entry.valid) // hit 3060 { 3061 // copy directory entry in local buffer in case of hit 3062 r_write_is_cnt = entry.is_cnt; 3063 r_write_lock = entry.lock; 3064 r_write_tag = entry.tag; 3065 r_write_copy = entry.owner.srcid; 3066 r_write_copy_inst = entry.owner.inst; 3067 r_write_count = entry.count; 3068 r_write_ptr = entry.ptr; 3069 r_write_way = way; 3070 3071 r_write_coherent = entry.cache_coherent; 3072 3073 if (entry.cache_coherent or (entry.owner.srcid == r_write_srcid.read()) or (entry.count == 0)) // hit WT 3074 { 3075 if(entry.is_cnt && entry.count) 3076 { 3077 r_write_fsm = WRITE_BC_DIR_READ; 3078 } 3079 else 3080 { 3081 r_write_fsm = WRITE_DIR_HIT; 3082 } 3083 } 3084 else 3085 { 3086 if (r_write_to_cleanup_req.read())//inval already sent 3087 { 3088 r_write_fsm = WRITE_WAIT; 3089 } 3090 else // hit on a NCC line with a different owner 3091 { 3092 r_write_fsm = WRITE_IVT_LOCK_HIT_WB; 3093 // if(r_write_pktid.read() == TYPE_SC) 3094 // { 3095 // r_write_sc_fail = true; 3096 // } 3097 } 3098 } 3099 } 3100 else // miss 3101 { 3102 r_write_fsm = WRITE_MISS_IVT_LOCK; 3103 } 3104 3105 #if DEBUG_MEMC_WRITE 3106 if(m_debug) 3107 { 3108 std::cout << " <MEMC " << name() << " WRITE_DIR_LOCK> Check the directory: " 3109 << " address = " << std::hex << r_write_address.read() 3110 << " / hit = " << std::dec << entry.valid 3111 << " / count = " << entry.count 3112 << " / is_cnt = " << entry.is_cnt ; 3113 if((r_write_pktid.read() & 0x7) == TYPE_SC) 3114 std::cout << " / SC access" << std::endl; 3115 else 3116 std::cout << " / SW access" << std::endl; 3117 } 3118 #endif 3119 break; 3120 } 3121 //////////////////// 3122 case WRITE_IVT_LOCK_HIT_WB: 3123 { 3124 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 3125 { 3126 3127 size_t index = 0; 3128 bool match_inval; 3129 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 3130 3131 //std::cout << "WRITE on NCC on line" << std::hex << nline << std::dec << std::endl; 3132 //if there is a matched updt req, we should wait until it is over. Because 3133 //we need the lastest updt data. 3134 match_inval = m_ivt.search_inval(nline, index); 3135 3136 assert ((r_write_count.read() == 1) and "NCC to CC req without copy"); 3137 if(!match_inval and !r_write_to_cc_send_req.read()) 3138 { 3139 r_write_to_cc_send_req = true; 3140 r_write_to_cc_send_dest = r_write_copy; 3141 r_write_to_cc_send_nline = nline; 3142 r_write_to_cleanup_req = true; 3143 r_write_to_cleanup_nline = nline; 3144 3145 m_ivt.set(false, // it's an inval transaction 3146 false, // it's not a broadcast 3147 true, // it needs no read response 3148 false, // no acknowledge required 3149 m_cmd_write_srcid_fifo.read(), //never read, used for debug 3150 m_cmd_write_trdid_fifo.read(), //never read, used for debug 3151 m_cmd_write_pktid_fifo.read(), //never read, used for debug 3152 nline, 3153 0x1, //Expect only one answer 3154 index); 3155 } 3156 r_write_fsm = WRITE_WAIT; 3157 #if DEBUG_MEMC_WRITE 3158 if(m_debug) 3159 { 3160 std::cout << " <MEMC " << name() << " WRITE_IVT_LOCK_HIT_WB> get access to the UPT: " 3161 << " Inval requested = " << (!match_inval and !r_write_to_cc_send_req.read()) 3162 << std::endl; 3163 } 3164 #endif 3165 } 3166 #if DEBUG_MEMC_WRITE 3167 if(m_debug) 3168 { 3169 std::cout << " <MEMC " << name() << " WRITE_IVT_LOCK_HIT_WB> failed to access to the UPT: " 3170 << std::endl; 3171 } 3172 #endif 3173 break; 3174 } 3175 3176 /////////////////// 3177 case WRITE_DIR_HIT: 3178 { 3179 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3180 "MEMC ERROR in ALLOC_DIR_HIT state: Bad DIR allocation"); 3181 3182 // update the cache directory 3183 // update directory with Dirty bit 3184 DirectoryEntry entry; 3185 entry.valid = true; 3186 entry.cache_coherent = r_write_coherent.read(); 3187 entry.dirty = true; 3188 entry.tag = r_write_tag.read(); 3189 entry.is_cnt = r_write_is_cnt.read(); 3190 entry.lock = r_write_lock.read(); 3191 entry.owner.srcid = r_write_copy.read(); 3192 entry.owner.inst = r_write_copy_inst.read(); 3193 entry.count = r_write_count.read(); 3194 entry.ptr = r_write_ptr.read(); 3195 3196 size_t set = m_y[(addr_t)(r_write_address.read())]; 3197 size_t way = r_write_way.read(); 3198 3199 // update directory 3200 m_cache_directory.write(set, way, entry); 3201 3202 // owner is true when the the first registered copy is the writer itself 3203 bool owner = (((r_write_copy.read() == r_write_srcid.read()) 3204 ) and not r_write_copy_inst.read()); 3205 3206 // no_update is true when there is no need for coherence transaction 3207 bool no_update = ( (r_write_count.read() == 0) or 3208 (owner and (r_write_count.read() == 1) and 3209 ((r_write_pktid.read() & 0x7) != TYPE_SC))); 3210 3211 // write data in the cache if no coherence transaction 3212 if(no_update) 3213 { 3214 // SC command but zero copies 3215 if ((r_write_pktid.read() & 0x7) == TYPE_SC) 3216 { 3217 m_llsc_table.sc(r_write_address.read(), 3218 r_write_sc_key.read()); 3219 } 3220 3221 for(size_t word=0 ; word<m_words ; word++) 3222 { 3223 m_cache_data.write(way, set, word, r_write_data[word].read(), r_write_be[word].read()); 3224 3225 } 3226 } 3227 3228 if (owner and not no_update and ((r_write_pktid.read() & 0x7) != TYPE_SC)) 3229 { 3230 r_write_count = r_write_count.read() - 1; 3231 } 3232 3233 if(no_update) 3234 // Write transaction completed 3235 { 3236 r_write_fsm = WRITE_RSP; 3237 } 3238 else 3239 // coherence update required 3240 { 3241 if(!r_write_to_cc_send_multi_req.read() and 3242 !r_write_to_cc_send_brdcast_req.read()) 3243 { 3244 r_write_fsm = WRITE_UPT_LOCK; 3245 } 3246 else 3247 { 3248 r_write_fsm = WRITE_WAIT; 3249 } 3250 } 3251 3252 #if DEBUG_MEMC_WRITE 3253 if(m_debug) 3254 { 3255 if(no_update) 3256 { 3257 std::cout << " <MEMC " << name() 3258 << " WRITE_DIR_HIT> Write into cache / No coherence transaction" 3259 << std::endl; 3260 } 3261 else 3262 { 3263 std::cout << " <MEMC " << name() << " WRITE_DIR_HIT> Coherence update required:" 3264 << " is_cnt = " << r_write_is_cnt.read() 3265 << " nb_copies = " << std::dec << r_write_count.read() << std::endl; 3266 if(owner) std::cout << " ... but the first copy is the writer" << std::endl; 3267 } 3268 } 3269 #endif 3270 break; 3271 } 3272 //////////////////// 3273 case WRITE_UPT_LOCK: // Try to register the update request in UPT 3274 { 3275 if(r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE) 3276 { 3277 bool wok = false; 3278 size_t index = 0; 3279 size_t srcid = r_write_srcid.read(); 3280 size_t trdid = r_write_trdid.read(); 3281 size_t pktid = r_write_pktid.read(); 3282 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 3283 size_t nb_copies = r_write_count.read(); 3284 size_t set = m_y[(addr_t)(r_write_address.read())]; 3285 size_t way = r_write_way.read(); 3286 3287 3288 wok = m_upt.set(true, // it's an update transaction 3289 false, // it's not a broadcast 3290 true, // response required 3291 false, // no acknowledge required 3292 srcid, 3293 trdid, 3294 pktid, 3295 nline, 3296 nb_copies, 3297 index); 3298 if(wok) // write data in cache 3299 { 3300 3301 if ((r_write_pktid.read() & 0x7) == TYPE_SC) 3302 { 3303 m_llsc_table.sc(r_write_address.read(), 3304 r_write_sc_key.read()); 3305 } 3306 3307 for(size_t word=0 ; word<m_words ; word++) 3308 { 3309 m_cache_data.write(way, 3310 set, 3311 word, 3312 r_write_data[word].read(), 3313 r_write_be[word].read()); 3314 3315 } 3316 } 3317 3318 #if DEBUG_MEMC_WRITE 3319 if(m_debug and wok) 3320 { 3321 if(wok) 3322 { 3323 std::cout << " <MEMC " << name() 3324 << " WRITE_UPT_LOCK> Register the multicast update in UPT / " 3325 << " nb_copies = " << r_write_count.read() << std::endl; 3326 } 3327 } 3328 #endif 3329 r_write_upt_index = index; 3330 // releases the lock protecting UPT and the DIR if no entry... 3331 if(wok) r_write_fsm = WRITE_UPT_HEAP_LOCK; 3332 else r_write_fsm = WRITE_WAIT; 3333 m_cpt_write_fsm_n_upt_lock++; 3334 } 3335 3336 m_cpt_write_fsm_upt_lock++; 3337 3338 break; 3339 } 3340 3341 ///////////////////////// 3342 case WRITE_UPT_HEAP_LOCK: // get access to heap 3343 { 3344 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE) 3345 { 3346 3347 #if DEBUG_MEMC_WRITE 3348 if(m_debug) 3349 std::cout << " <MEMC " << name() 3350 << " WRITE_UPT_HEAP_LOCK> Get acces to the HEAP" << std::endl; 3351 #endif 3352 r_write_fsm = WRITE_UPT_REQ; 3353 m_cpt_write_fsm_n_heap_lock++; 3354 } 3355 3356 m_cpt_write_fsm_heap_lock++; 3357 3358 break; 3359 } 3360 3361 ////////////////// 3362 case WRITE_UPT_REQ: // prepare the coherence transaction for the CC_SEND FSM 3363 // and write the first copy in the FIFO 3364 // send the request if only one copy 3365 { 3366 assert(not r_write_to_cc_send_multi_req.read() and 3367 not r_write_to_cc_send_brdcast_req.read() and 3368 "Error in VCI_MEM_CACHE : pending multicast or broadcast\n" 3369 "transaction in WRITE_UPT_REQ state" 3370 ); 3371 3372 3373 r_write_to_cc_send_brdcast_req = false; 3374 r_write_to_cc_send_trdid = r_write_upt_index.read(); 3375 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())]; 3376 r_write_to_cc_send_index = r_write_word_index.read(); 3377 r_write_to_cc_send_count = r_write_word_count.read(); 3378 3379 for(size_t i=0; i<m_words ; i++) r_write_to_cc_send_be[i]=r_write_be[i].read(); 3380 3381 size_t min = r_write_word_index.read(); 3382 size_t max = r_write_word_index.read() + r_write_word_count.read(); 3383 for(size_t i=min ; i<=max ; i++) r_write_to_cc_send_data[i] = r_write_data[i]; 3384 3385 if ((r_write_copy.read() != r_write_srcid.read()) or 3386 ((r_write_pktid.read() & 0x7) == TYPE_SC) or 3387 r_write_copy_inst.read()) 3388 { 3389 // put the first srcid in the fifo 3390 write_to_cc_send_fifo_put = true; 3391 write_to_cc_send_fifo_inst = r_write_copy_inst.read(); 3392 write_to_cc_send_fifo_srcid = r_write_copy.read(); 3393 if(r_write_count.read() == 1) 3394 { 3395 r_write_fsm = WRITE_IDLE; 3396 r_write_to_cc_send_multi_req = true; 3397 } 3398 else 3399 { 3400 r_write_fsm = WRITE_UPT_NEXT; 3401 r_write_to_dec = false; 3402 3403 } 3404 } 3405 else 3406 { 3407 r_write_fsm = WRITE_UPT_NEXT; 3408 r_write_to_dec = false; 3409 } 3410 3411 #if DEBUG_MEMC_WRITE 3412 if(m_debug) 3413 { 3414 std::cout 3415 << " <MEMC " << name() 3416 << " WRITE_UPT_REQ> Post first request to CC_SEND FSM" 3417 << " / srcid = " << std::dec << r_write_copy.read() 3418 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3419 3420 if(r_write_count.read() == 1) 3421 std::cout << " ... and this is the last" << std::endl; 3422 } 3423 #endif 3424 break; 3425 } 3426 3427 /////////////////// 3428 case WRITE_UPT_NEXT: 3429 { 3430 // continue the multi-update request to CC_SEND fsm 3431 // when there is copies in the heap. 3432 // if one copy in the heap is the writer itself 3433 // the corresponding SRCID should not be written in the fifo, 3434 // but the UPT counter must be decremented. 3435 // As this decrement is done in the WRITE_UPT_DEC state, 3436 // after the last copy has been found, the decrement request 3437 // must be registered in the r_write_to_dec flip-flop. 3438 3439 HeapEntry entry = m_heap.read(r_write_ptr.read()); 3440 3441 bool dec_upt_counter; 3442 3443 // put the next srcid in the fifo 3444 if ((entry.owner.srcid != r_write_srcid.read()) or 3445 ((r_write_pktid.read() & 0x7) == TYPE_SC) or 3446 entry.owner.inst) 3447 { 3448 dec_upt_counter = false; 3449 write_to_cc_send_fifo_put = true; 3450 write_to_cc_send_fifo_inst = entry.owner.inst; 3451 write_to_cc_send_fifo_srcid = entry.owner.srcid; 3452 3453 #if DEBUG_MEMC_WRITE 3454 if(m_debug) 3455 { 3456 std::cout << " <MEMC " << name() << " WRITE_UPT_NEXT> Post another request to CC_SEND FSM" 3457 << " / heap_index = " << std::dec << r_write_ptr.read() 3458 << " / srcid = " << std::dec << r_write_copy.read() 3459 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3460 if(entry.next == r_write_ptr.read()) 3461 std::cout << " ... and this is the last" << std::endl; 3462 } 3463 #endif 3464 } 3465 else // the UPT counter must be decremented 3466 { 3467 dec_upt_counter = true; 3468 3469 #if DEBUG_MEMC_WRITE 3470 if(m_debug) 3471 { 3472 std::cout << " <MEMC " << name() << " WRITE_UPT_NEXT> Skip one entry in heap matching the writer" 3473 << " / heap_index = " << std::dec << r_write_ptr.read() 3474 << " / srcid = " << std::dec << r_write_copy.read() 3475 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3476 if(entry.next == r_write_ptr.read()) 3477 std::cout << " ... and this is the last" << std::endl; 3478 } 3479 #endif 3480 } 3481 3482 // register the possible UPT decrement request 3483 r_write_to_dec = dec_upt_counter or r_write_to_dec.read(); 3484 3485 if(not m_write_to_cc_send_inst_fifo.wok()) 3486 { 3487 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_UPT_NEXT state" << std::endl 3488 << "The write_to_cc_send_fifo should not be full" << std::endl 3489 << "as the depth should be larger than the max number of copies" << std::endl; 3490 exit(0); 3491 } 3492 3493 r_write_ptr = entry.next; 3494 3495 if(entry.next == r_write_ptr.read()) // last copy 3496 { 3497 r_write_to_cc_send_multi_req = true; 3498 if(r_write_to_dec.read() or dec_upt_counter) r_write_fsm = WRITE_UPT_DEC; 3499 else r_write_fsm = WRITE_IDLE; 3500 } 3501 break; 3502 } 3503 3504 ////////////////// 3505 case WRITE_UPT_DEC: 3506 { 3507 // If the initial writer has a copy, it should not 3508 // receive an update request, but the counter in the 3509 // update table must be decremented by the MULTI_ACK FSM. 3510 3511 if(!r_write_to_multi_ack_req.read()) 3512 { 3513 r_write_to_multi_ack_req = true; 3514 r_write_to_multi_ack_upt_index = r_write_upt_index.read(); 3515 r_write_fsm = WRITE_IDLE; 3516 } 3517 break; 3518 } 3519 3520 /////////////// 3521 case WRITE_RSP: 3522 { 3523 // Post a request to TGT_RSP FSM to acknowledge the write 3524 // In order to increase the Write requests throughput, 3525 // we don't wait to return in the IDLE state to consume 3526 // a new request in the write FIFO 3527 3528 if (not r_write_to_tgt_rsp_req.read()) 3529 { 3530 // post the request to TGT_RSP_FSM 3531 r_write_to_tgt_rsp_req = true; 3532 r_write_to_tgt_rsp_srcid = r_write_srcid.read(); 3533 r_write_to_tgt_rsp_trdid = r_write_trdid.read(); 3534 r_write_to_tgt_rsp_pktid = r_write_pktid.read(); 3535 r_write_to_tgt_rsp_sc_fail = r_write_sc_fail.read(); 3536 3537 // try to get a new write request from the FIFO 3538 if (not m_cmd_write_addr_fifo.rok()) 3539 { 3540 r_write_fsm = WRITE_IDLE; 3541 } 3542 else 3543 { 3544 // consume a word in the FIFO & write it in the local buffer 3545 cmd_write_fifo_get = true; 3546 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 3547 3548 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 3549 r_write_word_index = index; 3550 r_write_word_count = 0; 3551 r_write_data[index] = m_cmd_write_data_fifo.read(); 3552 r_write_srcid = m_cmd_write_srcid_fifo.read(); 3553 r_write_trdid = m_cmd_write_trdid_fifo.read(); 3554 r_write_pktid = m_cmd_write_pktid_fifo.read(); 3555 3556 // if SC command, get the SC key 3557 if ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 3558 { 3559 assert( not m_cmd_write_eop_fifo.read() && 3560 "MEMC ERROR in WRITE_RSP state: " 3561 "invalid packet format for SC command"); 3562 3563 r_write_sc_key = m_cmd_write_data_fifo.read(); 3564 } 3565 3566 // initialize the be field for all words 3567 for(size_t word=0 ; word<m_words ; word++) 3568 { 3569 if(word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 3570 else r_write_be[word] = 0x0; 3571 } 3572 3573 if( m_cmd_write_eop_fifo.read()) 3574 { 3575 r_write_fsm = WRITE_DIR_REQ; 3576 } 3577 else 3578 { 3579 r_write_fsm = WRITE_NEXT; 3580 } 3581 } 3582 3583 #if DEBUG_MEMC_WRITE 3584 if(m_debug) 3585 { 3586 std::cout << " <MEMC " << name() << " WRITE_RSP> Post a request to TGT_RSP FSM" 3587 << " : rsrcid = " << std::hex << r_write_srcid.read() 3588 << " : rpktid = " << std::hex << r_write_pktid.read() 3589 << " : sc_fail= " << std::hex << r_write_sc_fail.read() 3590 << std::endl; 3591 if(m_cmd_write_addr_fifo.rok()) 3592 { 3593 std::cout << " New Write request: " 3594 << " srcid = " << std::hex << m_cmd_write_srcid_fifo.read() 3595 << " / address = " << m_cmd_write_addr_fifo.read() 3596 << " / data = " << m_cmd_write_data_fifo.read() 3597 << " / pktid = " << m_cmd_write_pktid_fifo.read() 3598 << std::endl; 3599 } 3600 } 3601 #endif 3602 } 3603 break; 3604 } 3605 ///////////////////////// RWT 3606 case WRITE_MISS_IVT_LOCK: 3607 { 3608 if (r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 3609 { 3610 size_t index; 3611 if(m_ivt.search_inval(m_nline[(addr_t)(r_write_address.read())], index)) 3612 { 3613 r_write_fsm = WRITE_WAIT; 3614 } 3615 else 3616 { 3617 r_write_fsm = WRITE_MISS_TRT_LOCK; 3618 } 3619 } 3620 break; 3621 } 3622 3623 ///////////////////////// 3624 case WRITE_MISS_TRT_LOCK: // Miss : check Transaction Table 3625 { 3626 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3627 { 3628 3629 #if DEBUG_MEMC_WRITE 3630 if(m_debug) 3631 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_LOCK> Check the TRT" << std::endl; 3632 #endif 3633 size_t hit_index = 0; 3634 size_t wok_index = 0; 3635 addr_t addr = (addr_t) r_write_address.read(); 3636 bool hit_read = m_trt.hit_read(m_nline[addr], hit_index); 3637 bool hit_write = m_trt.hit_write(m_nline[addr]); 3638 bool wok = not m_trt.full(wok_index); 3639 3640 // wait an empty entry in TRT 3641 if(not hit_read and (not wok or hit_write)) 3642 { 3643 r_write_fsm = WRITE_WAIT; 3644 m_cpt_trt_full++; 3645 3646 break; 3647 } 3648 3649 if ((r_write_pktid.read() & 0x7) == TYPE_SC) 3650 { 3651 m_llsc_table.sc(r_write_address.read(), 3652 r_write_sc_key.read()); 3653 } 3654 3655 // register the modified data in TRT 3656 if (hit_read) 3657 { 3658 r_write_trt_index = hit_index; 3659 r_write_fsm = WRITE_MISS_TRT_DATA; 3660 m_cpt_write_miss++; 3661 break; 3662 } 3663 // set a new entry in TRT 3664 if (wok and not hit_write) 3665 { 3666 r_write_trt_index = wok_index; 3667 r_write_fsm = WRITE_MISS_TRT_SET; 3668 m_cpt_write_miss++; 3669 break; 3670 } 3671 3672 assert(false && "VCI_MEM_CACHE ERROR: this part must not be reached"); 3673 } 3674 break; 3675 } 3676 3677 //////////////// 3678 case WRITE_WAIT: // release the locks protecting the shared ressources 3679 { 3680 3681 #if DEBUG_MEMC_WRITE 3682 if(m_debug) 3683 std::cout << " <MEMC " << name() << " WRITE_WAIT> Releases the locks before retry" << std::endl; 3684 #endif 3685 r_write_fsm = WRITE_DIR_REQ; 3686 break; 3687 } 3688 3689 //////////////////////// 3690 case WRITE_MISS_TRT_SET: // register a new transaction in TRT (Write Buffer) 3691 { 3692 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3693 { 3694 std::vector<be_t> be_vector; 3695 std::vector<data_t> data_vector; 3696 be_vector.clear(); 3697 data_vector.clear(); 3698 for(size_t i=0; i<m_words; i++) 3699 { 3700 be_vector.push_back(r_write_be[i]); 3701 data_vector.push_back(r_write_data[i]); 3702 } 3703 m_trt.set(r_write_trt_index.read(), 3704 true, // read request to XRAM 3705 m_nline[(addr_t)(r_write_address.read())], 3706 r_write_srcid.read(), 3707 r_write_trdid.read(), 3708 r_write_pktid.read(), 3709 false, // not a processor read 3710 0, // not a single word 3711 0, // word index 3712 be_vector, 3713 data_vector); 3714 r_write_fsm = WRITE_MISS_XRAM_REQ; 3715 3716 #if DEBUG_MEMC_WRITE 3717 if(m_debug) 3718 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_SET> Set a new entry in TRT" << std::endl; 3719 #endif 3720 } 3721 break; 3722 } 3723 3724 ///////////////////////// 3725 case WRITE_MISS_TRT_DATA: // update an entry in TRT (used as a Write Buffer) 3726 { 3727 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3728 { 3729 std::vector<be_t> be_vector; 3730 std::vector<data_t> data_vector; 3731 be_vector.clear(); 3732 data_vector.clear(); 3733 for(size_t i=0; i<m_words; i++) 3734 { 3735 be_vector.push_back(r_write_be[i]); 3736 data_vector.push_back(r_write_data[i]); 3737 } 3738 m_trt.write_data_mask(r_write_trt_index.read(), 3739 be_vector, 3740 data_vector); 3741 r_write_fsm = WRITE_RSP; 3742 3743 #if DEBUG_MEMC_WRITE 3744 if(m_debug) 3745 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_DATA> Modify an existing entry in TRT" << std::endl; 3746 #endif 3747 } 3748 break; 3749 } 3750 3751 ///////////////////////// 3752 case WRITE_MISS_XRAM_REQ: // send a GET request to IXR_CMD FSM 3753 { 3754 if(not r_write_to_ixr_cmd_req.read()) 3755 { 3756 r_write_to_ixr_cmd_req = true; 3757 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 3758 r_write_fsm = WRITE_RSP; 3759 3760 #if DEBUG_MEMC_WRITE 3761 if(m_debug) 3762 std::cout << " <MEMC " << name() 3763 << " WRITE_MISS_XRAM_REQ> Post a GET request to the" 3764 << " IXR_CMD FSM" << std::endl; 3765 #endif 3766 } 3767 break; 3768 } 3769 3770 /////////////////////// 3771 case WRITE_BC_DIR_READ: // enter this state if a broadcast-inval is required 3772 // the cache line must be erased in mem-cache, and written 3773 // into XRAM. 3774 { 3775 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3776 "MEMC ERROR in WRITE_BC_DIR_READ state: Bad DIR allocation"); 3777 3778 m_cpt_write_broadcast++; 3779 3780 // write enable signal for data buffer. 3781 r_write_bc_data_we = true; 3782 3783 r_write_fsm = WRITE_BC_TRT_LOCK; 3784 3785 #if DEBUG_MEMC_WRITE 3786 if (m_debug) 3787 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_READ>" 3788 << " Read the cache to complete local buffer" << std::endl; 3789 #endif 3790 break; 3791 } 3792 /////////////////////// 3793 case WRITE_BC_TRT_LOCK: // get TRT lock to check TRT not full 3794 { 3795 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3796 "MEMC ERROR in WRITE_BC_TRT_LOCK state: Bad DIR allocation"); 3797 3798 // We read the cache and complete the buffer. As the DATA cache uses a 3799 // synchronous RAM, the read DATA request has been performed in the 3800 // WRITE_BC_DIR_READ state but the data is available in this state. 3801 if (r_write_bc_data_we.read()) 3802 { 3803 size_t set = m_y[(addr_t)(r_write_address.read())]; 3804 size_t way = r_write_way.read(); 3805 for(size_t word=0 ; word<m_words ; word++) 3806 { 3807 data_t mask = 0; 3808 if(r_write_be[word].read() & 0x1) mask = mask | 0x000000FF; 3809 if(r_write_be[word].read() & 0x2) mask = mask | 0x0000FF00; 3810 if(r_write_be[word].read() & 0x4) mask = mask | 0x00FF0000; 3811 if(r_write_be[word].read() & 0x8) mask = mask | 0xFF000000; 3812 3813 // complete only if mask is not null (for energy consumption) 3814 r_write_data[word] = 3815 (r_write_data[word].read() & mask) | 3816 (m_cache_data.read(way, set, word) & ~mask); 3817 } 3818 #if DEBUG_MEMC_WRITE 3819 if(m_debug) 3820 std::cout 3821 << " <MEMC " << name() 3822 << " WRITE_BC_TRT_LOCK> Complete data buffer" << std::endl; 3823 #endif 3824 } 3825 3826 if (r_alloc_trt_fsm.read() != ALLOC_TRT_WRITE) 3827 { 3828 // if we loop in this state, the data does not need to be 3829 // rewritten (for energy consuption) 3830 r_write_bc_data_we = false; 3831 break; 3832 } 3833 3834 size_t wok_index = 0; 3835 bool wok = not m_trt.full(wok_index); 3836 if(wok) // set a new entry in TRT 3837 { 3838 r_write_trt_index = wok_index; 3839 r_write_fsm = WRITE_BC_IVT_LOCK; 3840 } 3841 else // wait an empty entry in TRT 3842 { 3843 r_write_fsm = WRITE_WAIT; 3844 } 3845 3846 #if DEBUG_MEMC_WRITE 3847 if(m_debug) 3848 std::cout << " <MEMC " << name() 3849 << " WRITE_BC_TRT_LOCK> Check TRT : wok = " << wok 3850 << " / index = " << wok_index << std::endl; 3851 #endif 3852 3853 m_cpt_write_fsm_trt_lock++; 3854 3855 break; 3856 } 3857 3858 ////////////////////// 3859 case WRITE_BC_IVT_LOCK: // register BC transaction in IVT 3860 { 3861 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3862 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad DIR allocation"); 3863 3864 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 3865 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad TRT allocation"); 3866 3867 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 3868 { 3869 bool wok = false; 3870 size_t index = 0; 3871 size_t srcid = r_write_srcid.read(); 3872 size_t trdid = r_write_trdid.read(); 3873 size_t pktid = r_write_pktid.read(); 3874 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 3875 size_t nb_copies = r_write_count.read(); 3876 3877 wok = m_ivt.set(false, // it's an inval transaction 3878 true, // it's a broadcast 3879 true, // response required 3880 false, // no acknowledge required 3881 srcid, 3882 trdid, 3883 pktid, 3884 nline, 3885 nb_copies, 3886 index); 3887 /*ODCCP*/ //m_upt.print(); 3888 #if DEBUG_MEMC_WRITE 3889 if( m_debug and wok ) 3890 std::cout << " <MEMC " << name() << " WRITE_BC_IVT_LOCK> Register broadcast inval in IVT" 3891 << " / nb_copies = " << r_write_count.read() << std::endl; 3892 #endif 3893 r_write_upt_index = index; 3894 3895 if(wok) r_write_fsm = WRITE_BC_DIR_INVAL; 3896 else r_write_fsm = WRITE_WAIT; 3897 m_cpt_write_fsm_n_upt_lock++; 3898 } 3899 3900 m_cpt_write_fsm_upt_lock++; 3901 3902 break; 3903 } 3904 3905 //////////////////////// 3906 case WRITE_BC_DIR_INVAL: 3907 { 3908 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3909 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad DIR allocation"); 3910 3911 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 3912 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad TRT allocation"); 3913 3914 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) and 3915 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad IVT allocation"); 3916 3917 // register PUT request in TRT 3918 std::vector<data_t> data_vector; 3919 data_vector.clear(); 3920 for(size_t i=0; i<m_words; i++) data_vector.push_back(r_write_data[i].read()); 3921 m_trt.set( r_write_trt_index.read(), 3922 false, // PUT request 3923 m_nline[(addr_t)(r_write_address.read())], 3924 0, // unused 3925 0, // unused 3926 0, // unused 3927 false, // not a processor read 3928 0, // unused 3929 0, // unused 3930 std::vector<be_t> (m_words,0), 3931 data_vector ); 3932 3933 // invalidate directory entry 3934 DirectoryEntry entry; 3935 entry.valid = false; 3936 entry.cache_coherent= false; 3937 entry.dirty = false; 3938 entry.tag = 0; 3939 entry.is_cnt = false; 3940 entry.lock = false; 3941 entry.owner.srcid = 0; 3942 entry.owner.inst = false; 3943 entry.ptr = 0; 3944 entry.count = 0; 3945 size_t set = m_y[(addr_t)(r_write_address.read())]; 3946 size_t way = r_write_way.read(); 3947 3948 m_cache_directory.write(set, way, entry); 3949 3950 if ((r_write_pktid.read() & 0x7) == TYPE_SC) 3951 { 3952 m_llsc_table.sc(r_write_address.read(), 3953 r_write_sc_key.read()); 3954 } 3955 3956 #if DEBUG_MEMC_WRITE 3957 if(m_debug) 3958 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_INVAL> Invalidate the directory entry: @ = " 3959 << r_write_address.read() << " / register the put transaction in TRT:" << std::endl; 3960 #endif 3961 r_write_fsm = WRITE_BC_CC_SEND; 3962 break; 3963 } 3964 3965 ////////////////////// 3966 case WRITE_BC_CC_SEND: // Post a coherence broadcast request to CC_SEND FSM 3967 { 3968 if(!r_write_to_cc_send_multi_req.read() and !r_write_to_cc_send_brdcast_req.read()) 3969 { 3970 r_write_to_cc_send_multi_req = false; 3971 r_write_to_cc_send_brdcast_req = true; 3972 r_write_to_cc_send_trdid = r_write_upt_index.read(); 3973 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())]; 3974 r_write_to_cc_send_index = 0; 3975 r_write_to_cc_send_count = 0; 3976 3977 for(size_t i=0; i<m_words ; i++) 3978 { 3979 r_write_to_cc_send_be[i]=0; 3980 r_write_to_cc_send_data[i] = 0; 3981 } 3982 r_write_fsm = WRITE_BC_XRAM_REQ; 3983 3984 #if DEBUG_MEMC_WRITE 3985 if(m_debug) 3986 std::cout << " <MEMC " << name() 3987 << " WRITE_BC_CC_SEND> Post a broadcast request to CC_SEND FSM" << std::endl; 3988 #endif 3989 } 3990 break; 3991 } 3992 3993 /////////////////////// 3994 case WRITE_BC_XRAM_REQ: // Post a put request to IXR_CMD FSM 3995 { 3996 if( not r_write_to_ixr_cmd_req.read() ) 3997 { 3998 r_write_to_ixr_cmd_req = true; 3999 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 4000 r_write_fsm = WRITE_IDLE; 4001 4002 #if DEBUG_MEMC_WRITE 4003 if(m_debug) 4004 std::cout << " <MEMC " << name() 4005 << " WRITE_BC_XRAM_REQ> Post a put request to IXR_CMD FSM" << std::endl; 4006 #endif 4007 } 4008 break; 4009 } 4010 } // end switch r_write_fsm 4011 4012 /////////////////////////////////////////////////////////////////////// 4013 // IXR_CMD FSM 4014 /////////////////////////////////////////////////////////////////////// 4015 // The IXR_CMD fsm controls the command packets to the XRAM : 4016 // It handles requests from 5 FSMs with a round-robin priority: 4017 // READ > WRITE > CAS > XRAM_RSP > CONFIG 4018 // 4019 // - It sends a single flit VCI read to the XRAM in case of 4020 // GET request posted by the READ, WRITE or CAS FSMs. 4021 // - It sends a multi-flit VCI write in case of PUT request posted by 4022 // the XRAM_RSP, WRITE, CAS, or CONFIG FSMs. 4023 // 4024 // For each client, there is three steps: 4025 // - IXR_CMD_*_IDLE : round-robin allocation to a client 4026 // - IXR_CMD_*_TRT : access to TRT for address and data 4027 // - IXR_CMD_*_SEND : send the PUT or GET VCI command 4028 // 4029 // The address and data to be written (for a PUT) are stored in TRT. 4030 // The trdid field contains always the TRT entry index. 4031 //////////////////////////////////////////////////////////////////////// 4032 4033 switch(r_ixr_cmd_fsm.read()) 4034 { 4035 //////////////////////// 4036 case IXR_CMD_READ_IDLE: 4037 if (r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4038 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4039 else if(r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4040 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4041 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4042 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4043 break; 4044 //////////////////////// 4045 case IXR_CMD_WRITE_IDLE: 4046 if (r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4047 else if(r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4048 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4049 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4050 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4051 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4052 break; 4053 //////////////////////// 4054 case IXR_CMD_CAS_IDLE: 4055 if (r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4056 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4057 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4058 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4059 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4060 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4061 break; 4062 //////////////////////// 4063 case IXR_CMD_XRAM_IDLE: 4064 if (r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4065 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4066 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4067 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4068 else if(r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4069 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4070 break; 4071 //////////////////////// 4072 case IXR_CMD_CLEANUP_IDLE: 4073 /*ODCCP*///std::cout << "IXR_CMD_CLEANUP_IDLE" << std::endl; 4074 if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4075 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4076 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4077 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4078 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4079 else if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4080 break; 4081 ///////////////////////// 4082 case IXR_CMD_CONFIG_IDLE: 4083 { 4084 if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4085 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4086 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4087 else if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4088 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4089 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4090 break; 4091 } 4092 4093 ////////////////////// 4094 case IXR_CMD_READ_TRT: // access TRT for a GET 4095 { 4096 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 4097 { 4098 TransactionTabEntry entry = m_trt.read( r_read_to_ixr_cmd_index.read() ); 4099 r_ixr_cmd_address = entry.nline * (m_words<<2); 4100 r_ixr_cmd_trdid = r_read_to_ixr_cmd_index.read(); 4101 r_ixr_cmd_get = true; 4102 r_ixr_cmd_word = 0; 4103 r_ixr_cmd_fsm = IXR_CMD_READ_SEND; 4104 4105 #if DEBUG_MEMC_IXR_CMD 4106 if(m_debug) 4107 std::cout << " <MEMC " << name() << " IXR_CMD_READ_TRT> TRT access" 4108 << " index = " << std::dec << r_read_to_ixr_cmd_index.read() 4109 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 4110 #endif 4111 } 4112 break; 4113 } 4114 /////////////////////// 4115 case IXR_CMD_WRITE_TRT: // access TRT for a PUT or a GET 4116 { 4117 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 4118 { 4119 TransactionTabEntry entry = m_trt.read( r_write_to_ixr_cmd_index.read() ); 4120 r_ixr_cmd_address = entry.nline * (m_words<<2); 4121 r_ixr_cmd_trdid = r_write_to_ixr_cmd_index.read(); 4122 r_ixr_cmd_get = entry.xram_read; 4123 r_ixr_cmd_word = 0; 4124 r_ixr_cmd_fsm = IXR_CMD_WRITE_SEND; 4125 4126 // Read data from TRT if PUT transaction 4127 if (not entry.xram_read) 4128 { 4129 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 4130 } 4131 4132 #if DEBUG_MEMC_IXR_CMD 4133 if(m_debug) 4134 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_TRT> TRT access" 4135 << " index = " << std::dec << r_write_to_ixr_cmd_index.read() 4136 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 4137 #endif 4138 } 4139 break; 4140 } 4141 ///////////////////// 4142 case IXR_CMD_CAS_TRT: // access TRT for a PUT or a GET 4143 { 4144 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 4145 { 4146 TransactionTabEntry entry = m_trt.read( r_cas_to_ixr_cmd_index.read() ); 4147 r_ixr_cmd_address = entry.nline * (m_words<<2); 4148 r_ixr_cmd_trdid = r_cas_to_ixr_cmd_index.read(); 4149 r_ixr_cmd_get = entry.xram_read; 4150 r_ixr_cmd_word = 0; 4151 r_ixr_cmd_fsm = IXR_CMD_CAS_SEND; 4152 4153 // Read data from TRT if PUT transaction 4154 if (not entry.xram_read) 4155 { 4156 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 4157 } 4158 4159 #if DEBUG_MEMC_IXR_CMD 4160 if(m_debug) 4161 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_TRT> TRT access" 4162 << " index = " << std::dec << r_cas_to_ixr_cmd_index.read() 4163 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 4164 #endif 4165 } 4166 break; 4167 } 4168 ////////////////////// 4169 case IXR_CMD_XRAM_TRT: // access TRT for a PUT 4170 { 4171 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 4172 { 4173 TransactionTabEntry entry = m_trt.read( r_xram_rsp_to_ixr_cmd_index.read() ); 4174 r_ixr_cmd_address = entry.nline * (m_words<<2); 4175 r_ixr_cmd_trdid = r_xram_rsp_to_ixr_cmd_index.read(); 4176 r_ixr_cmd_get = false; 4177 r_ixr_cmd_word = 0; 4178 r_ixr_cmd_fsm = IXR_CMD_XRAM_SEND; 4179 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 4180 4181 #if DEBUG_MEMC_IXR_CMD 4182 if(m_debug) 4183 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_TRT> TRT access" 4184 << " index = " << std::dec << r_xram_rsp_to_ixr_cmd_index.read() 4185 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 4186 #endif 4187 } 4188 break; 4189 } 4190 ////////////////////// 4191 case IXR_CMD_CLEANUP_TRT: // access TRT for a PUT 4192 { 4193 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 4194 { 4195 4196 TransactionTabEntry entry = m_trt.read( r_cleanup_to_ixr_cmd_index.read() ); 4197 r_ixr_cmd_address = entry.nline * (m_words<<2); 4198 r_ixr_cmd_trdid = r_cleanup_to_ixr_cmd_index.read(); 4199 r_ixr_cmd_get = false; 4200 r_ixr_cmd_word = 0; 4201 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 4202 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 4203 4204 #if DEBUG_MEMC_IXR_CMD 4205 if(m_debug) 4206 std::cout << " <MEMC " << name() << " IXR_CMD_CLEANUP_TRT> TRT access" 4207 << " index = " << std::dec << r_cleanup_to_ixr_cmd_index.read() 4208 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 4209 #endif 4210 } 4211 break; 4212 } 4213 //////////////////////// 4214 case IXR_CMD_CONFIG_TRT: // access TRT for a PUT 4215 { 4216 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 4217 { 4218 TransactionTabEntry entry = m_trt.read( r_config_to_ixr_cmd_index.read() ); 4219 r_ixr_cmd_address = entry.nline * (m_words<<2); 4220 r_ixr_cmd_trdid = r_config_to_ixr_cmd_index.read(); 4221 r_ixr_cmd_get = false; 4222 r_ixr_cmd_word = 0; 4223 r_ixr_cmd_fsm = IXR_CMD_CONFIG_SEND; 4224 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 4225 4226 #if DEBUG_MEMC_IXR_CMD 4227 if(m_debug) 4228 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_TRT> TRT access" 4229 << " index = " << std::dec << r_config_to_ixr_cmd_index.read() 4230 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 4231 #endif 4232 } 4233 break; 4234 } 4235 4236 /////////////////////// 4237 case IXR_CMD_READ_SEND: // send a get from READ FSM 4238 { 4239 if(p_vci_ixr.cmdack) 4240 { 4241 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 4242 r_read_to_ixr_cmd_req = false; 4243 4244 #if DEBUG_MEMC_IXR_CMD 4245 if(m_debug) 4246 std::cout << " <MEMC " << name() << " IXR_CMD_READ_SEND> GET request:" << std::hex 4247 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4248 #endif 4249 } 4250 break; 4251 } 4252 //////////////////////// 4253 case IXR_CMD_WRITE_SEND: // send a put or get from WRITE FSM 4254 { 4255 if(p_vci_ixr.cmdack) 4256 { 4257 if (not r_ixr_cmd_get.read()) // PUT 4258 { 4259 if(r_ixr_cmd_word.read() == (m_words - 2)) 4260 { 4261 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 4262 r_write_to_ixr_cmd_req = false; 4263 } 4264 else 4265 { 4266 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 4267 } 4268 4269 #if DEBUG_MEMC_IXR_CMD 4270 if(m_debug) 4271 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> PUT request:" << std::hex 4272 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4273 #endif 4274 } 4275 else // GET 4276 { 4277 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 4278 r_write_to_ixr_cmd_req = false; 4279 4280 #if DEBUG_MEMC_IXR_CMD 4281 if(m_debug) 4282 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> GET request:" << std::hex 4283 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4284 #endif 4285 } 4286 } 4287 break; 4288 } 4289 ////////////////////// 4290 case IXR_CMD_CAS_SEND: // send a put or get command from CAS FSM 4291 { 4292 if(p_vci_ixr.cmdack) 4293 { 4294 if (not r_ixr_cmd_get.read()) // PUT 4295 { 4296 if(r_ixr_cmd_word.read() == (m_words - 2)) 4297 { 4298 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 4299 r_cas_to_ixr_cmd_req = false; 4300 } 4301 else 4302 { 4303 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 4304 } 4305 4306 #if DEBUG_MEMC_IXR_CMD 4307 if(m_debug) 4308 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> PUT request:" << std::hex 4309 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4310 #endif 4311 } 4312 else // GET 4313 { 4314 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 4315 r_cas_to_ixr_cmd_req = false; 4316 4317 #if DEBUG_MEMC_IXR_CMD 4318 if(m_debug) 4319 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> GET request:" << std::hex 4320 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4321 #endif 4322 } 4323 } 4324 break; 4325 } 4326 /////////////////////// 4327 case IXR_CMD_XRAM_SEND: // send a put from XRAM_RSP FSM 4328 { 4329 if(p_vci_ixr.cmdack.read()) 4330 { 4331 if(r_ixr_cmd_word.read() == (m_words - 2)) 4332 { 4333 r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE; 4334 r_xram_rsp_to_ixr_cmd_req = false; 4335 } 4336 else 4337 { 4338 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 4339 } 4340 #if DEBUG_MEMC_IXR_CMD 4341 if(m_debug) 4342 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_SEND> PUT request:" << std::hex 4343 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4344 #endif 4345 } 4346 break; 4347 } 4348 4349 //////////////////////// 4350 case IXR_CMD_CLEANUP_DATA_SEND: // send a put command to XRAM 4351 { 4352 if(p_vci_ixr.cmdack.read()) 4353 { 4354 /*ODCCP*/ //std::cout << "IXR_CMD_CLEANUP_DATA_SEND STATE at cycle : " << std::dec << m_cpt_cycles << std::endl; 4355 if(r_ixr_cmd_word.read() == (m_words - 2)) 4356 { 4357 /*ODCCP*/ //std::cout << "IXR_CMD_CLEANUP_DATA_SEND GO TO IXR_CMD_CLEANUP_IDLE" << std::endl; 4358 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_IDLE; 4359 r_cleanup_to_ixr_cmd_req = false; 4360 //r_ixr_cmd_word = 0; 4361 //r_xram_rsp_to_ixr_cmd_inval_ncc_pending = false; 4362 } 4363 else 4364 { 4365 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 4366 } 4367 4368 #if DEBUG_MEMC_IXR_CMD 4369 if(m_debug) 4370 { 4371 std::cout << " <MEMC " << name() << ".IXR_CMD_CLEANUP_DATA_SEND> Send a put request to xram" << std::endl; 4372 } 4373 #endif 4374 } 4375 break; 4376 } 4377 4378 ///////////////////////// 4379 case IXR_CMD_CONFIG_SEND: // send a put from CONFIG FSM 4380 { 4381 if(p_vci_ixr.cmdack.read()) 4382 { 4383 if(r_ixr_cmd_word.read() == (m_words - 2)) 4384 { 4385 r_ixr_cmd_fsm = IXR_CMD_CONFIG_IDLE; 4386 r_config_to_ixr_cmd_req = false; 4387 } 4388 else 4389 { 4390 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 4391 } 4392 4393 #if DEBUG_MEMC_IXR_CMD 4394 if(m_debug) 4395 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_SEND> PUT request:" << std::hex 4396 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4397 #endif 4398 } 4399 break; 4400 } 4401 } // end switch r_ixr_cmd_fsm 4402 4403 //////////////////////////////////////////////////////////////////////////// 4404 // IXR_RSP FSM 4405 //////////////////////////////////////////////////////////////////////////// 4406 // The IXR_RSP FSM receives the response packets from the XRAM, 4407 // for both PUT transaction, and GET transaction. 4408 // 4409 // - A response to a PUT request is a single-cell VCI packet. 4410 // The TRT index is contained in the RTRDID field. 4411 // The FSM takes the lock protecting the TRT, and the corresponding 4412 // entry is erased. If an acknowledge was required (in case of software SYNC) 4413 // the r_config_rsp_lines counter is decremented. 4414 // 4415 // - A response to a GET request is a multi-cell VCI packet. 4416 // The TRT index is contained in the RTRDID field. 4417 // The N cells contain the N words of the cache line in the RDATA field. 4418 // The FSM takes the lock protecting the TRT to store the line in the TRT 4419 // (taking into account the write requests already stored in the TRT). 4420 // When the line is completely written, the r_ixr_rsp_to_xram_rsp_rok[index] 4421 // signal is set to inform the XRAM_RSP FSM. 4422 /////////////////////////////////////////////////////////////////////////////// 4423 4424 switch(r_ixr_rsp_fsm.read()) 4425 { 4426 ////////////////// 4427 case IXR_RSP_IDLE: // test transaction type: PUT/GET 4428 { 4429 if(p_vci_ixr.rspval.read()) 4430 { 4431 r_ixr_rsp_cpt = 0; 4432 r_ixr_rsp_trt_index = p_vci_ixr.rtrdid.read(); 4433 4434 assert( ((p_vci_ixr.rerror.read() & 0x1) == 0) and 4435 "MEMC ERROR in IXR_RSP state: XRAM response error !"); 4436 4437 if(p_vci_ixr.reop.read()) // PUT 4438 { 4439 r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 4440 } 4441 4442 else // GET transaction 4443 { 4444 r_ixr_rsp_fsm = IXR_RSP_TRT_READ; 4445 4446 #if DEBUG_MEMC_IXR_RSP 4447 if(m_debug) 4448 std::cout << " <MEMC " << name() 4449 << " IXR_RSP_IDLE> Response from XRAM to a get transaction" << std::endl; 4450 #endif 4451 } 4452 } 4453 break; 4454 } 4455 //////////////////////// 4456 case IXR_RSP_ACK: // Acknowledge PUT transaction 4457 { 4458 r_ixr_rsp_fsm = IXR_RSP_IDLE; 4459 break; 4460 } 4461 4462 //////////////////////// 4463 case IXR_RSP_TRT_ERASE: // erase the entry in the TRT 4464 // decrease the line counter if config request 4465 { 4466 if(r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) 4467 { 4468 size_t index = r_ixr_rsp_trt_index.read(); 4469 if (m_trt.is_config(index) ) r_config_rsp_lines = r_config_rsp_lines.read() - 1; 4470 m_trt.erase(index); 4471 r_ixr_rsp_fsm = IXR_RSP_IDLE; 4472 4473 // std::cout << "remove a valid slot in trt index = " << r_ixr_rsp_trt_index.read()<< std::endl; 4474 #if DEBUG_MEMC_IXR_RSP 4475 if(m_debug) 4476 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_ERASE> Erase TRT entry " 4477 << r_ixr_rsp_trt_index.read() << std::endl; 4478 #endif 4479 } 4480 break; 4481 } 4482 ////////////////////// 4483 case IXR_RSP_TRT_READ: // write a 64 bits data in the TRT 4484 { 4485 if((r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) and p_vci_ixr.rspval) 4486 { 4487 size_t index = r_ixr_rsp_trt_index.read(); 4488 size_t word = r_ixr_rsp_cpt.read(); 4489 bool eop = p_vci_ixr.reop.read(); 4490 wide_data_t data = p_vci_ixr.rdata.read(); 4491 bool error = ((p_vci_ixr.rerror.read() & 0x1) == 1); 4492 4493 assert(((eop == (word == (m_words-2))) or error) and 4494 "MEMC ERROR in IXR_RSP_TRT_READ state : invalid response from XRAM"); 4495 4496 m_trt.write_rsp( index, 4497 word, 4498 data ); 4499 4500 r_ixr_rsp_cpt = word + 2; 4501 4502 if(eop) 4503 { 4504 r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()]=true; 4505 r_ixr_rsp_fsm = IXR_RSP_IDLE; 4506 } 4507 4508 #if DEBUG_MEMC_IXR_RSP 4509 if(m_debug) 4510 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_READ> Writing a word in TRT : " 4511 << " index = " << std::dec << index 4512 << " / word = " << word 4513 << " / data = " << std::hex << data << std::endl; 4514 #endif 4515 m_cpt_ixr_fsm_n_trt_lock++; 4516 } 4517 m_cpt_ixr_fsm_trt_lock++; 4518 break; 4519 } 4520 } // end swich r_ixr_rsp_fsm 4521 4522 //////////////////////////////////////////////////////////////////////////// 4523 // XRAM_RSP FSM 4524 //////////////////////////////////////////////////////////////////////////// 4525 // The XRAM_RSP FSM handles the incoming cache lines after an XRAM GET. 4526 // The cache line has been written in the TRT by the IXR_CMD_FSM. 4527 // As the IXR_RSP FSM and the XRAM_RSP FSM are running in parallel, 4528 // there is as many flip-flops r_ixr_rsp_to_xram_rsp_rok[i] as the number 4529 // of entries in the TRT, that are handled with a round-robin priority... 4530 // 4531 // The FSM takes the lock protecting TRT, and the lock protecting DIR. 4532 // The selected TRT entry is copied in the local buffer r_xram_rsp_trt_buf. 4533 // It selects a cache slot and save the victim line in another local buffer 4534 // r_xram_rsp_victim_***. 4535 // It writes the line extracted from TRT in the cache. 4536 // If it was a read MISS, the XRAM_RSP FSM send a request to the TGT_RSP 4537 // FSM to return the cache line to the registered processor. 4538 // If there is no empty slot, a victim line is evicted, and 4539 // invalidate requests are sent to the L1 caches containing copies. 4540 // If this line is dirty, the XRAM_RSP FSM send a request to the IXR_CMD 4541 // FSM to save the victim line to the XRAM, and register the write transaction 4542 // in the TRT (using the entry previously used by the read transaction). 4543 /////////////////////////////////////////////////////////////////////////////// 4544 4545 switch(r_xram_rsp_fsm.read()) 4546 { 4547 /////////////////// 4548 case XRAM_RSP_IDLE: // scan the XRAM responses / select a TRT index (round robin) 4549 { 4550 size_t old = r_xram_rsp_trt_index.read(); 4551 size_t lines = m_trt_lines; 4552 for(size_t i=0 ; i<lines ; i++) 4553 { 4554 size_t index = (i+old+1) %lines; 4555 if(r_ixr_rsp_to_xram_rsp_rok[index]) 4556 { 4557 r_xram_rsp_trt_index = index; 4558 r_ixr_rsp_to_xram_rsp_rok[index] = false; 4559 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 4560 4561 #if DEBUG_MEMC_XRAM_RSP 4562 if(m_debug) 4563 std::cout << " <MEMC " << name() << " XRAM_RSP_IDLE>" 4564 << " Available cache line in TRT:" 4565 << " index = " << std::dec << index << std::endl; 4566 #endif 4567 break; 4568 } 4569 } 4570 break; 4571 } 4572 /////////////////////// 4573 case XRAM_RSP_DIR_LOCK: // Takes the DIR lock and the TRT lock 4574 // Copy the TRT entry in a local buffer 4575 { 4576 if((r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4577 (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP)) 4578 { 4579 // copy the TRT entry in the r_xram_rsp_trt_buf local buffer 4580 size_t index = r_xram_rsp_trt_index.read(); 4581 r_xram_rsp_trt_buf.copy( m_trt.read(index) ); 4582 r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; 4583 4584 #if DEBUG_MEMC_XRAM_RSP 4585 if(m_debug) 4586 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_LOCK>" 4587 << " Get access to DIR and TRT" << std::endl; 4588 #endif 4589 m_cpt_xram_rsp_fsm_n_dir_lock++; 4590 m_cpt_xram_rsp_fsm_n_trt_lock++; 4591 } 4592 m_cpt_xram_rsp_fsm_dir_lock++; 4593 m_cpt_xram_rsp_fsm_trt_lock++; 4594 break; 4595 } 4596 /////////////////////// 4597 case XRAM_RSP_TRT_COPY: // Select a victim cache line 4598 // and copy it in a local buffer 4599 { 4600 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4601 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad DIR allocation"); 4602 4603 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4604 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad TRT allocation"); 4605 4606 // selects & extracts a victim line from cache 4607 size_t way = 0; 4608 size_t set = m_y[(addr_t)(r_xram_rsp_trt_buf.nline * m_words * 4)]; 4609 4610 DirectoryEntry victim(m_cache_directory.select(set, way)); 4611 4612 bool inval = (victim.count && victim.valid) or (!victim.cache_coherent and (victim.count == 1)) ; 4613 4614 4615 // copy the victim line in a local buffer 4616 m_cache_data.read_line(way, set, r_xram_rsp_victim_data); 4617 4618 r_xram_rsp_victim_copy = victim.owner.srcid; 4619 r_xram_rsp_victim_coherent = victim.cache_coherent; 4620 r_xram_rsp_victim_copy_inst = victim.owner.inst; 4621 r_xram_rsp_victim_count = victim.count; 4622 r_xram_rsp_victim_ptr = victim.ptr; 4623 r_xram_rsp_victim_way = way; 4624 r_xram_rsp_victim_set = set; 4625 r_xram_rsp_victim_nline = victim.tag*m_sets + set; 4626 r_xram_rsp_victim_is_cnt = victim.is_cnt; 4627 r_xram_rsp_victim_inval = inval ; 4628 r_xram_rsp_victim_dirty = victim.dirty or (!victim.cache_coherent && (victim.count == 1)); //a NCC line is by default considered as dirty in the L1: we must take a reservation on a TRT entry 4629 4630 if( not r_xram_rsp_trt_buf.rerror ) r_xram_rsp_fsm = XRAM_RSP_IVT_LOCK; 4631 else r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 4632 4633 #if DEBUG_MEMC_XRAM_RSP 4634 if(m_debug) 4635 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_COPY>" 4636 << " Select a victim slot: " 4637 << " way = " << std::dec << way 4638 << " / set = " << set 4639 << "/ count = " << victim.count 4640 << " / inval_required = " << inval << std::endl; 4641 #endif 4642 break; 4643 } 4644 /////////////////////// 4645 case XRAM_RSP_IVT_LOCK: // Keep DIR and TRT locks and take the IVT lock 4646 // to check a possible pending inval 4647 { 4648 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4649 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad DIR allocation"); 4650 4651 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4652 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad TRT allocation"); 4653 4654 if(r_alloc_ivt_fsm == ALLOC_IVT_XRAM_RSP) 4655 { 4656 size_t index = 0; 4657 if(m_ivt.search_inval(r_xram_rsp_trt_buf.nline, index)) // pending inval 4658 { 4659 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 4660 4661 #if DEBUG_MEMC_XRAM_RSP 4662 if(m_debug) 4663 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 4664 << " Get acces to IVT, but line invalidation registered" 4665 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline*m_words*4 4666 << " / index = " << std::dec << index << std::endl; 4667 #endif 4668 4669 } 4670 else if(m_ivt.is_full() and r_xram_rsp_victim_inval.read()) // IVT full 4671 { 4672 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 4673 4674 #if DEBUG_MEMC_XRAM_RSP 4675 if(m_debug) 4676 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 4677 << " Get acces to IVT, but inval required and IVT full" << std::endl; 4678 #endif 4679 } 4680 else 4681 { 4682 r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; 4683 4684 #if DEBUG_MEMC_XRAM_RSP 4685 if(m_debug) 4686 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 4687 << " Get acces to IVT / no pending inval request" << std::endl; 4688 #endif 4689 } 4690 } 4691 break; 4692 } 4693 ///////////////////////// 4694 case XRAM_RSP_INVAL_WAIT: // release all locks and returns to DIR_LOCK to retry 4695 { 4696 4697 #if DEBUG_MEMC_XRAM_RSP 4698 if(m_debug) 4699 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL_WAIT>" 4700 << " Release all locks and retry" << std::endl; 4701 #endif 4702 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 4703 break; 4704 } 4705 /////////////////////// 4706 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory), 4707 // erases the TRT entry if victim not dirty, 4708 // and set inval request in IVT if required 4709 { 4710 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4711 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad DIR allocation"); 4712 4713 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4714 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad TRT allocation"); 4715 4716 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_XRAM_RSP) and 4717 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad IVT allocation"); 4718 4719 // check if this is an instruction read, this means pktid is either 4720 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 4721 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4722 4723 // check if this is a cached read, this means pktid is either 4724 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 4725 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4726 bool cached_read = (r_xram_rsp_trt_buf.pktid & 0x1) and r_xram_rsp_trt_buf.proc_read; 4727 4728 bool inst_read = (r_xram_rsp_trt_buf.pktid & 0x2) and r_xram_rsp_trt_buf.proc_read; 4729 4730 bool dirty = false; 4731 4732 // update cache data 4733 size_t set = r_xram_rsp_victim_set.read(); 4734 size_t way = r_xram_rsp_victim_way.read(); 4735 for(size_t word=0; word<m_words ; word++) 4736 { 4737 m_cache_data.write(way, set, word, r_xram_rsp_trt_buf.wdata[word]); 4738 dirty = dirty or (r_xram_rsp_trt_buf.wdata_be[word] != 0); 4739 4740 } 4741 4742 // update cache directory 4743 DirectoryEntry entry; 4744 entry.valid = true; 4745 entry.cache_coherent = (inst_read or (not(cached_read))) and (r_xram_rsp_trt_buf.proc_read); 4746 entry.is_cnt = false; 4747 entry.lock = false; 4748 entry.dirty = dirty; 4749 entry.tag = r_xram_rsp_trt_buf.nline / m_sets; 4750 entry.ptr = 0; 4751 if(cached_read) 4752 { 4753 entry.owner.srcid = r_xram_rsp_trt_buf.srcid; 4754 entry.owner.inst = inst_read; 4755 entry.count = 1; 4756 } 4757 else 4758 { 4759 entry.owner.srcid = 0; 4760 entry.owner.inst = 0; 4761 entry.count = 0; 4762 } 4763 m_cache_directory.write(set, way, entry); 4764 //RWT: keep the coherence information in order to send it to the read_rsp 4765 r_xram_rsp_coherent = inst_read or (not(cached_read)); 4766 // request an invalidattion request in IVT for victim line 4767 if(r_xram_rsp_victim_inval.read()) 4768 { 4769 bool broadcast = r_xram_rsp_victim_is_cnt.read(); 4770 size_t index = 0; 4771 size_t count_copies = r_xram_rsp_victim_count.read(); 4772 4773 bool wok = m_ivt.set(false, // it's an inval transaction 4774 broadcast, // set broadcast bit 4775 false, // no response required 4776 false, // no acknowledge required 4777 0, // srcid 4778 0, // trdid 4779 0, // pktid 4780 r_xram_rsp_victim_nline.read(), 4781 count_copies, 4782 index); 4783 4784 r_xram_rsp_ivt_index = index; 4785 assert( wok and 4786 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: IVT should not be full"); 4787 4788 } 4789 if (!r_xram_rsp_victim_coherent.read()) 4790 { 4791 addr_t min = r_xram_rsp_victim_nline.read()*m_words*4 ; 4792 addr_t max = r_xram_rsp_victim_nline.read()*m_words*4 + (m_words - 1)*4; 4793 m_llsc_table.sw(min, max); 4794 } 4795 #if DEBUG_MEMC_XRAM_RSP 4796 if(m_debug) 4797 { 4798 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_UPDT>" 4799 << " Cache update: " 4800 << " way = " << std::dec << way 4801 << " / set = " << set 4802 << " / owner_id = " << std::hex << entry.owner.srcid 4803 << " / owner_ins = " << std::dec << entry.owner.inst 4804 << " / count = " << entry.count 4805 << " / nline = " << r_xram_rsp_trt_buf.nline 4806 << " / is_cnt = " << entry.is_cnt << std::endl; 4807 if(r_xram_rsp_victim_inval.read()) 4808 std::cout << " Invalidation request for victim line " 4809 << std::hex << r_xram_rsp_victim_nline.read() 4810 << " / broadcast = " << r_xram_rsp_victim_is_cnt.read() << std::endl; 4811 } 4812 #endif 4813 4814 // If the victim is not dirty (RWT: if it is not coherent, we can not know wether it is dirty or not), we don't need another XRAM put transaction, 4815 // and we can erase the TRT entry 4816 if(!r_xram_rsp_victim_dirty.read() and (r_xram_rsp_victim_coherent.read() or (r_xram_rsp_victim_count.read() == 0))) m_trt.erase(r_xram_rsp_trt_index.read()); 4817 4818 // Next state 4819 if(r_xram_rsp_victim_dirty.read() or (!r_xram_rsp_victim_coherent.read() and (r_xram_rsp_victim_count.read() == 1))) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; 4820 else if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4821 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4822 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4823 break; 4824 } 4825 //////////////////////// 4826 case XRAM_RSP_TRT_DIRTY: // set the TRT entry (write to XRAM) if the victim is dirty or not coherent (RWT) 4827 { 4828 if(r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) 4829 { 4830 std::vector<data_t> data_vector; 4831 data_vector.clear(); 4832 for(size_t i=0; i<m_words; i++) 4833 { 4834 data_vector.push_back(r_xram_rsp_victim_data[i].read()); 4835 } 4836 m_trt.set( r_xram_rsp_trt_index.read(), 4837 false, // PUT 4838 r_xram_rsp_victim_nline.read(), // line index 4839 0, // unused 4840 0, // unused 4841 0, // unused 4842 false, // not proc_read 4843 0, // unused 4844 0, // unused 4845 std::vector<be_t>(m_words,0xF), 4846 data_vector); 4847 #if DEBUG_MEMC_XRAM_RSP 4848 if(m_debug) 4849 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_DIRTY>" 4850 << " Set TRT entry for the put transaction" 4851 << " / dirty victim line = " << r_xram_rsp_victim_nline.read() << std::endl; 4852 #endif 4853 4854 // if( not r_xram_rsp_victim_coherent ) 4855 // std::cout << "a victim coherent not sent trt index =" << r_xram_rsp_trt_index.read() << std::endl; 4856 if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4857 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4858 else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4859 m_cpt_xram_rsp_fsm_n_trt_lock++; 4860 } 4861 4862 m_cpt_xram_rsp_fsm_trt_lock++; 4863 4864 break; 4865 } 4866 ////////////////////// 4867 case XRAM_RSP_DIR_RSP: // Request a response to TGT_RSP FSM 4868 { 4869 if( not r_xram_rsp_to_tgt_rsp_req.read()) 4870 { 4871 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 4872 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 4873 if (r_xram_rsp_coherent.read()) 4874 { 4875 r_xram_rsp_to_tgt_rsp_pktid = 0x0 + r_xram_rsp_trt_buf.pktid;//RWT CC 4876 } 4877 else 4878 { 4879 r_xram_rsp_to_tgt_rsp_pktid = 0x8 + r_xram_rsp_trt_buf.pktid;//RWT NCC 4880 } 4881 for(size_t i=0; i < m_words; i++) 4882 { 4883 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 4884 } 4885 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 4886 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 4887 r_xram_rsp_to_tgt_rsp_ll_key = r_xram_rsp_trt_buf.ll_key; 4888 r_xram_rsp_to_tgt_rsp_rerror = false; 4889 r_xram_rsp_to_tgt_rsp_req = true; 4890 4891 4892 if(r_xram_rsp_victim_inval) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4893 else if(r_xram_rsp_victim_dirty) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4894 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4895 4896 #if DEBUG_MEMC_XRAM_RSP 4897 if(m_debug) 4898 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_RSP>" 4899 << " Request the TGT_RSP FSM to return data:" 4900 << " rsrcid = " << std::hex << r_xram_rsp_trt_buf.srcid 4901 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline*m_words*4 4902 << " / nwords = " << std::dec << r_xram_rsp_trt_buf.read_length << std::endl; 4903 #endif 4904 } 4905 break; 4906 } 4907 //////////////////// 4908 case XRAM_RSP_INVAL: // send invalidate request to CC_SEND FSM 4909 { 4910 if(!r_xram_rsp_to_cc_send_multi_req.read() and 4911 !r_xram_rsp_to_cc_send_brdcast_req.read()) 4912 { 4913 bool multi_req = !r_xram_rsp_victim_is_cnt.read(); 4914 bool last_multi_req = multi_req and (r_xram_rsp_victim_count.read() == 1); 4915 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 4916 4917 r_xram_rsp_to_cc_send_multi_req = last_multi_req; 4918 r_xram_rsp_to_cc_send_brdcast_req = r_xram_rsp_victim_is_cnt.read(); 4919 r_xram_rsp_to_cc_send_nline = r_xram_rsp_victim_nline.read(); 4920 r_xram_rsp_to_cc_send_trdid = r_xram_rsp_ivt_index; 4921 xram_rsp_to_cc_send_fifo_srcid = r_xram_rsp_victim_copy.read(); 4922 xram_rsp_to_cc_send_fifo_inst = r_xram_rsp_victim_copy_inst.read(); 4923 xram_rsp_to_cc_send_fifo_put = multi_req; 4924 r_xram_rsp_next_ptr = r_xram_rsp_victim_ptr.read(); 4925 4926 if(r_xram_rsp_victim_dirty and r_xram_rsp_victim_coherent) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4927 else if(not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 4928 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4929 4930 // std::cout << "cleanup sent for trt index =" << r_xram_rsp_trt_index.read() << std::endl; 4931 #if DEBUG_MEMC_XRAM_RSP 4932 if(m_debug) 4933 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL>" 4934 << " Send an inval request to CC_SEND FSM" 4935 << " / address = " << r_xram_rsp_victim_nline.read()*m_words*4 << std::endl; 4936 #endif 4937 } 4938 break; 4939 } 4940 ////////////////////////// 4941 case XRAM_RSP_WRITE_DIRTY: // send a write request to IXR_CMD FSM 4942 { 4943 if ( not r_xram_rsp_to_ixr_cmd_req.read() ) 4944 { 4945 r_xram_rsp_to_ixr_cmd_req = true; 4946 r_xram_rsp_to_ixr_cmd_index = r_xram_rsp_trt_index.read(); 4947 4948 m_cpt_write_dirty++; 4949 4950 bool multi_req = not r_xram_rsp_victim_is_cnt.read() and 4951 r_xram_rsp_victim_inval.read(); 4952 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 4953 4954 if(not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 4955 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4956 4957 #if DEBUG_MEMC_XRAM_RSP 4958 if(m_debug) 4959 std::cout << " <MEMC " << name() << " XRAM_RSP_WRITE_DIRTY>" 4960 << " Send the put request to IXR_CMD FSM" 4961 << " / victim line = " << r_xram_rsp_victim_nline.read() << std::endl; 4962 #endif 4963 } 4964 break; 4965 } 4966 ///////////////////////// 4967 case XRAM_RSP_HEAP_REQ: // Get the lock to the HEAP 4968 { 4969 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 4970 { 4971 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4972 m_cpt_xram_rsp_fsm_n_heap_lock++; 4973 } 4974 4975 #if DEBUG_MEMC_XRAM_RSP 4976 if(m_debug) 4977 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_REQ>" 4978 << " Requesting HEAP lock" << std::endl; 4979 #endif 4980 4981 m_cpt_xram_rsp_fsm_heap_lock++; 4982 4983 break; 4984 } 4985 ///////////////////////// 4986 case XRAM_RSP_HEAP_ERASE: // erase the copies and send invalidations 4987 { 4988 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 4989 { 4990 HeapEntry entry = m_heap.read(r_xram_rsp_next_ptr.read()); 4991 4992 xram_rsp_to_cc_send_fifo_srcid = entry.owner.srcid; 4993 xram_rsp_to_cc_send_fifo_inst = entry.owner.inst; 4994 xram_rsp_to_cc_send_fifo_put = true; 4995 if(m_xram_rsp_to_cc_send_inst_fifo.wok()) 4996 { 4997 r_xram_rsp_next_ptr = entry.next; 4998 if(entry.next == r_xram_rsp_next_ptr.read()) // last copy 4999 { 5000 r_xram_rsp_to_cc_send_multi_req = true; 5001 r_xram_rsp_fsm = XRAM_RSP_HEAP_LAST; 5002 } 5003 else 5004 { 5005 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 5006 } 5007 } 5008 else 5009 { 5010 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 5011 } 5012 5013 #if DEBUG_MEMC_XRAM_RSP 5014 if(m_debug) 5015 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_ERASE>" 5016 << " Erase copy:" 5017 << " srcid = " << std::hex << entry.owner.srcid 5018 << " / inst = " << std::dec << entry.owner.inst << std::endl; 5019 #endif 5020 } 5021 break; 5022 } 5023 ///////////////////////// 5024 case XRAM_RSP_HEAP_LAST: // last copy 5025 { 5026 if(r_alloc_heap_fsm.read() != ALLOC_HEAP_XRAM_RSP) 5027 { 5028 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_HEAP_LAST" 5029 << " bad HEAP allocation" << std::endl; 5030 exit(0); 5031 } 5032 size_t free_pointer = m_heap.next_free_ptr(); 5033 5034 HeapEntry last_entry; 5035 last_entry.owner.srcid = 0; 5036 last_entry.owner.inst = false; 5037 if(m_heap.is_full()) 5038 { 5039 last_entry.next = r_xram_rsp_next_ptr.read(); 5040 m_heap.unset_full(); 5041 } 5042 else 5043 { 5044 last_entry.next = free_pointer; 5045 } 5046 5047 m_heap.write_free_ptr(r_xram_rsp_victim_ptr.read()); 5048 m_heap.write(r_xram_rsp_next_ptr.read(),last_entry); 5049 5050 r_xram_rsp_fsm = XRAM_RSP_IDLE; 5051 5052 #if DEBUG_MEMC_XRAM_RSP 5053 if(m_debug) 5054 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_LAST>" 5055 << " Heap housekeeping" << std::endl; 5056 #endif 5057 break; 5058 } 5059 ///////////////////////// 5060 case XRAM_RSP_ERROR_ERASE: // erase TRT entry in case of error 5061 { 5062 m_trt.erase(r_xram_rsp_trt_index.read()); 5063 5064 // Next state 5065 if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_ERROR_RSP; 5066 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 5067 5068 #if DEBUG_MEMC_XRAM_RSP 5069 if(m_debug) 5070 std::cout << " <MEMC " << name() << " XRAM_RSP_ERROR_ERASE>" 5071 << " Error reported by XRAM / erase the TRT entry" << std::endl; 5072 #endif 5073 break; 5074 } 5075 //////////////////////// 5076 case XRAM_RSP_ERROR_RSP: // Request an error response to TGT_RSP FSM 5077 { 5078 if(!r_xram_rsp_to_tgt_rsp_req.read()) 5079 { 5080 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 5081 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 5082 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 5083 for(size_t i=0; i < m_words; i++) 5084 { 5085 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 5086 } 5087 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 5088 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 5089 r_xram_rsp_to_tgt_rsp_rerror = true; 5090 r_xram_rsp_to_tgt_rsp_req = true; 5091 5092 r_xram_rsp_fsm = XRAM_RSP_IDLE; 5093 5094 #if DEBUG_MEMC_XRAM_RSP 5095 if(m_debug) 5096 std::cout << " <MEMC " << name() 5097 << " XRAM_RSP_ERROR_RSP> Request a response error to TGT_RSP FSM:" 5098 << " srcid = " << std::dec << r_xram_rsp_trt_buf.srcid << std::endl; 5099 #endif 5100 } 5101 break; 5102 } 5103 } // end swich r_xram_rsp_fsm 5104 5105 //////////////////////////////////////////////////////////////////////////////////// 5106 // CLEANUP FSM 5107 //////////////////////////////////////////////////////////////////////////////////// 5108 // The CLEANUP FSM handles the cleanup request from L1 caches. 5109 // It accesses the cache directory and the heap to update the list of copies. 5110 //////////////////////////////////////////////////////////////////////////////////// 5111 5112 switch(r_cleanup_fsm.read()) 5113 { 5114 ////////////////// 5115 case CLEANUP_IDLE: // Get first DSPIN flit of the CLEANUP command 5116 { 5117 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 5118 5119 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 5120 5121 uint32_t srcid = 5122 DspinDhccpParam::dspin_get( 5123 flit, 5124 DspinDhccpParam::CLEANUP_SRCID); 5125 5126 uint8_t type = 5127 DspinDhccpParam::dspin_get( 5128 flit, 5129 DspinDhccpParam::P2M_TYPE); 5130 5131 r_cleanup_way_index = 5132 DspinDhccpParam::dspin_get( 5133 flit, 5134 DspinDhccpParam::CLEANUP_WAY_INDEX); 5135 5136 r_cleanup_nline = 5137 DspinDhccpParam::dspin_get( 5138 flit, 5139 DspinDhccpParam::CLEANUP_NLINE_MSB) << 32; 5140 5141 r_cleanup_inst = (type == DspinDhccpParam::TYPE_CLEANUP_INST); 5142 r_cleanup_srcid = srcid; 5143 r_cleanup_ncc = 5144 DspinDhccpParam::dspin_get( 5145 flit, 5146 DspinDhccpParam::CLEANUP_NCC); 5147 r_cleanup_contains_data = false; 5148 5149 assert( (srcid < m_initiators) and 5150 "MEMC ERROR in CLEANUP_IDLE state : illegal SRCID value"); 5151 5152 // <Activity Counters> 5153 if (is_local_req(srcid)) { 5154 m_cpt_cleanup_local++; 5155 } 5156 else { 5157 m_cpt_cleanup_remote++; 5158 m_cpt_cleanup_cost += req_distance(srcid); 5159 } 5160 // </Activity Counters> 5161 cc_receive_to_cleanup_fifo_get = true; 5162 r_cleanup_fsm = CLEANUP_GET_NLINE; 5163 5164 #if DEBUG_MEMC_CLEANUP 5165 if(m_debug) 5166 { 5167 std::cout 5168 << " <MEMC " << name() 5169 << " CLEANUP_IDLE> Cleanup request:" << std::hex 5170 << " / owner_id = " << srcid 5171 << " / owner_ins = " << (type == DspinDhccpParam::TYPE_CLEANUP_INST) 5172 << " / ncc = " << DspinDhccpParam::dspin_get( 5173 flit, 5174 DspinDhccpParam::CLEANUP_NCC) 5175 << std::endl; 5176 } 5177 #endif 5178 break; 5179 } 5180 5181 /////////////////////// 5182 case CLEANUP_GET_NLINE: // GET second DSPIN flit of the cleanup command 5183 { 5184 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 5185 5186 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 5187 5188 addr_t nline = r_cleanup_nline.read() | 5189 DspinDhccpParam::dspin_get(flit, DspinDhccpParam::CLEANUP_NLINE_LSB); 5190 5191 //A MODIFIER POUR DIRTY // 5192 bool eop = 5193 DspinDhccpParam::dspin_get(flit, DspinDhccpParam::P2M_EOP) == 0x1; 5194 if (! eop) 5195 { 5196 r_cleanup_fsm = CLEANUP_GET_DATA; 5197 r_cleanup_data_index = 0; 5198 r_cleanup_contains_data = true; 5199 } 5200 else 5201 { 5202 r_cleanup_fsm = CLEANUP_DIR_REQ; 5203 } 5204 cc_receive_to_cleanup_fifo_get = true; 5205 r_cleanup_nline = nline; 5206 5207 #if DEBUG_MEMC_CLEANUP 5208 if(m_debug) 5209 { 5210 std::cout 5211 << " <MEMC " << name() 5212 << " CLEANUP_GET_NLINE> Cleanup request:" 5213 << std::hex 5214 << " / address = " << nline * m_words * 4 5215 << " / contains data = " << (!eop) 5216 << std::endl; 5217 } 5218 #endif 5219 break; 5220 } 5221 ///////////////////// 5222 case CLEANUP_GET_DATA : 5223 { 5224 if (m_cc_receive_to_cleanup_fifo.rok()) 5225 { 5226 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 5227 5228 uint32_t data = 5229 DspinDhccpParam::dspin_get (flit, DspinDhccpParam::CLEANUP_DATA_UPDT); 5230 5231 r_cleanup_data[r_cleanup_data_index] = data; 5232 r_cleanup_data_index = r_cleanup_data_index.read() + 1; 5233 assert (r_cleanup_data_index.read() < m_words and "MEM_CACHE in CLEANUP_GET_DATA : too much flits in cleanup data updt"); 5234 cc_receive_to_cleanup_fifo_get = true; 5235 if (r_cleanup_data_index.read() == m_words - 1) 5236 { 5237 r_cleanup_contains_data = true; 5238 m_cpt_cleanup_data ++; 5239 r_cleanup_fsm = CLEANUP_DIR_REQ; 5240 } 5241 #if DEBUG_MEMC_CLEANUP 5242 if(m_debug) 5243 { 5244 std::cout 5245 << " <MEMC " << name() 5246 << " CLEANUP_GET_DATA> " 5247 << " / word = " << std::dec << r_cleanup_data_index.read() 5248 << " / data = " << std::hex << data 5249 << std::endl; 5250 } 5251 #endif 5252 } 5253 break; 5254 } 5255 ///////////////////// 5256 case CLEANUP_DIR_REQ: // Get the lock to the directory 5257 { 5258 m_cpt_cleanup_fsm_dir_lock++; 5259 if(r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) break; 5260 5261 r_cleanup_fsm = CLEANUP_DIR_LOCK; 5262 //std::cout << " MEM_CACHE : CLEANUP_DIR_REQ" << std::endl; 5263 5264 #if DEBUG_MEMC_CLEANUP 5265 if(m_debug) 5266 std::cout << " <MEMC " << name() << " CLEANUP_DIR_REQ> Requesting DIR lock" << std::endl; 5267 #endif 5268 5269 m_cpt_cleanup_fsm_n_dir_lock++; 5270 5271 break; 5272 } 5273 5274 ////////////////////// 5275 case CLEANUP_DIR_LOCK: 5276 { 5277 // test directory status 5278 if(r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) 5279 { 5280 std::cout 5281 << "VCI_MEM_CACHE ERROR " << name() 5282 << " CLEANUP_DIR_LOCK state" 5283 << " bad DIR allocation" << std::endl; 5284 5285 exit(0); 5286 } 5287 //std::cout << " MEM_CACHE : CLEANUP_DIR_LOCK" << std::endl; 5288 5289 // Read the directory 5290 size_t way = 0; 5291 addr_t cleanup_address = r_cleanup_nline.read() * m_words * 4; 5292 5293 DirectoryEntry entry = m_cache_directory.read(cleanup_address , way); 5294 r_cleanup_is_cnt = entry.is_cnt; 5295 r_cleanup_dirty = entry.dirty; 5296 r_cleanup_tag = entry.tag; 5297 r_cleanup_lock = entry.lock; 5298 r_cleanup_way = way; 5299 r_cleanup_count = entry.count; 5300 r_cleanup_ptr = entry.ptr; 5301 r_cleanup_copy = entry.owner.srcid; 5302 r_cleanup_copy_inst = entry.owner.inst; 5303 5304 //RWT 5305 size_t set = m_y[(addr_t)(cleanup_address)]; 5306 m_cache_data.read_line(way, set, r_cleanup_old_data); 5307 r_cleanup_coherent = entry.cache_coherent; 5308 5309 if(entry.valid) // hit : the copy must be cleared 5310 { 5311 assert( 5312 (entry.count > 0) and 5313 "VCI MEM CACHE ERROR: " 5314 "In CLEANUP_DIR_LOCK, CLEANUP command on a valid entry " 5315 "with no copies"); 5316 5317 // no access to the heap 5318 if((entry.count == 1) or (entry.is_cnt)) 5319 { 5320 r_cleanup_fsm = CLEANUP_DIR_WRITE; 5321 } 5322 // access to the heap 5323 else 5324 { 5325 r_cleanup_fsm = CLEANUP_HEAP_REQ; 5326 } 5327 } 5328 else // miss : check UPT for a pending invalidation transaction 5329 { 5330 r_cleanup_fsm = CLEANUP_IVT_LOCK; 5331 } 5332 5333 #if DEBUG_MEMC_CLEANUP 5334 if(m_debug) 5335 { 5336 std::cout 5337 << " <MEMC " << name() 5338 << " CLEANUP_DIR_LOCK> Test directory status: " 5339 << std::hex 5340 << " line = " << cleanup_address 5341 << " / hit = " << entry.valid 5342 << " / dir_id = " << entry.owner.srcid 5343 << " / dir_ins = " << entry.owner.inst 5344 << " / search_id = " << r_cleanup_srcid.read() 5345 << " / search_ins = " << r_cleanup_inst.read() 5346 << " / count = " << entry.count 5347 << " / is_cnt = " << entry.is_cnt 5348 << std::endl; 5349 } 5350 #endif 5351 break; 5352 } 5353 5354 /////////////////////// 5355 case CLEANUP_DIR_WRITE: 5356 { 5357 /*ODCCP*///std::cout << "CLEANUP_DIR_WRITE" << std::endl; 5358 // Update the directory entry without heap access 5359 if(r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) 5360 { 5361 std::cout 5362 << "VCI_MEM_CACHE ERROR " << name() 5363 << " CLEANUP_DIR_WRITE state" 5364 << " bad DIR allocation" << std::endl; 5365 5366 exit(0); 5367 } 5368 5369 size_t way = r_cleanup_way.read(); 5370 size_t set = m_y[(addr_t)(r_cleanup_nline.read()*m_words*4)]; 5371 bool match_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 5372 5373 bool match_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 5374 bool match = match_srcid and match_inst; 5375 5376 if(not r_cleanup_is_cnt.read() and not match) 5377 { 5378 std::cout 5379 << "VCI_MEM_CACHE ERROR : Cleanup request on a valid" 5380 << "entry using linked list mode with no corresponding" 5381 << "directory or heap entry" 5382 << std::endl; 5383 5384 exit(1); 5385 } 5386 5387 /*RWT*/ 5388 bool inval_request = (r_read_to_cleanup_req.read() and (r_cleanup_nline.read() == r_read_to_cleanup_nline.read())) // NCC to CC initiated by a read transaction 5389 or (r_write_to_cleanup_req.read() and (r_cleanup_nline.read() == r_write_to_cleanup_nline.read())); //NCC to CC initiated by a wrtie transaction 5390 5391 5392 if (inval_request) m_cpt_ncc_to_cc ++; 5393 5394 if (r_write_to_cleanup_req.read() and (r_cleanup_nline.read() == r_write_to_cleanup_nline.read())) 5395 { 5396 r_write_to_cleanup_req = false; 5397 m_cpt_ncc_to_cc_write ++; 5398 } 5399 5400 5401 // update the cache directory (for the copies) 5402 DirectoryEntry entry; 5403 entry.valid = true; 5404 entry.cache_coherent = inval_request or r_cleanup_coherent.read(); 5405 entry.is_cnt = r_cleanup_is_cnt.read(); 5406 entry.dirty = r_cleanup_dirty.read() or r_cleanup_contains_data.read(); 5407 entry.tag = r_cleanup_tag.read(); 5408 entry.lock = r_cleanup_lock.read(); 5409 entry.ptr = r_cleanup_ptr.read(); 5410 if (r_read_to_cleanup_req.read() and (r_cleanup_nline.read() == r_read_to_cleanup_nline.read())) //pending READ 5411 { 5412 if (r_read_to_cleanup_cached_read.read()) 5413 { 5414 entry.count = r_cleanup_count.read(); 5415 entry.owner.srcid = r_read_to_cleanup_srcid.read(); 5416 entry.owner.inst = 0; 5417 } 5418 else 5419 { 5420 entry.count = r_cleanup_count.read() - 1; 5421 entry.owner.srcid = r_cleanup_copy.read(); 5422 entry.owner.inst = r_cleanup_copy_inst.read(); 5423 } 5424 if (r_read_to_cleanup_is_ll.read()) 5425 { 5426 r_cleanup_to_tgt_rsp_ll_key = r_read_to_cleanup_ll_key.read(); 5427 } 5428 } 5429 else 5430 { 5431 entry.count = r_cleanup_count.read() - 1; 5432 entry.owner.srcid = 0; 5433 entry.owner.inst = 0; 5434 } 5435 5436 if (r_cleanup_contains_data.read()) 5437 { 5438 for (size_t word = 0; word < m_words; word ++) 5439 { 5440 m_cache_data.write(way, set, word, r_cleanup_data[word].read(), 0xF); 5441 } 5442 addr_t min = r_cleanup_nline.read()*m_words*4 ; 5443 addr_t max = r_cleanup_nline.read()*m_words*4 + (m_words - 1)*4; 5444 m_llsc_table.sw(min, max); 5445 } 5446 5447 m_cache_directory.write(set, way, entry); 5448 5449 /*RWT*/ 5450 if (inval_request) 5451 { 5452 r_cleanup_fsm = CLEANUP_IVT_LOCK_DATA; 5453 } 5454 else 5455 { 5456 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5457 } 5458 5459 #if DEBUG_MEMC_CLEANUP 5460 if(m_debug) 5461 { 5462 std::cout 5463 << " <MEMC " << name() 5464 << " CLEANUP_DIR_WRITE> Update directory:" 5465 << std::hex 5466 << " address = " << r_cleanup_nline.read() * m_words * 4 5467 << " / dir_id = " << entry.owner.srcid 5468 << " / dir_ins = " << entry.owner.inst 5469 << " / count = " << entry.count 5470 << " / is_cnt = " << entry.is_cnt 5471 << " / match_inval = " << inval_request 5472 << std::endl; 5473 } 5474 #endif 5475 5476 break; 5477 } 5478 ///////////////////// 5479 case CLEANUP_IVT_LOCK_DATA://RWT 5480 { 5481 //Search for a matching inval in the UPT (there must be one) and check if there is a pending read. 5482 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) 5483 { 5484 size_t index = 0; 5485 bool match_inval; 5486 5487 match_inval = m_ivt.search_inval(r_cleanup_nline.read(), index); 5488 assert (match_inval && "VCI MEM CACHE ERROR: In CLEANUP_IVT_LOCK_DATA, NO CORRESPONDING INVAL"); 5489 r_cleanup_read_srcid = m_ivt.srcid(index); 5490 r_cleanup_read_trdid = m_ivt.trdid(index); 5491 r_cleanup_read_pktid = 0x0 + m_ivt.pktid(index); 5492 r_cleanup_read_need_rsp = !m_ivt.need_rsp(index); 5493 r_cleanup_index = index; 5494 5495 r_cleanup_fsm = CLEANUP_IVT_CLEAR_DATA; 5496 } 5497 #if DEBUG_MC_CLEANUP 5498 if (m_debug) 5499 { 5500 std::cout 5501 << " <MEMC " << name() 5502 << " CLEANUP_IVT_LOCK_DATA> fetch pending inval" 5503 << std::endl; 5504 } 5505 #endif 5506 break; 5507 } 5508 5509 ////////////////////////// 5510 case CLEANUP_IVT_CLEAR_DATA://RWT 5511 { 5512 m_ivt.clear(r_cleanup_index.read()); 5513 assert ((r_cleanup_read_need_rsp.read() == (r_read_to_cleanup_req.read() && (r_cleanup_nline.read() == r_read_to_cleanup_nline.read()))) && "condition pending read"); 5514 if (r_cleanup_read_need_rsp.read()) 5515 { 5516 r_cleanup_fsm = CLEANUP_READ_RSP; 5517 } 5518 else 5519 { 5520 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5521 } 5522 #if DEBUG_MC_CLEANUP 5523 if (m_debug) 5524 { 5525 std::cout 5526 << " <MEMC " << name() 5527 << " CLEANUP_IVT_CLEAR_DATA> clear UPT entry" 5528 << std::endl; 5529 } 5530 #endif 5531 break; 5532 } 5533 5534 //////////////////////// 5535 case CLEANUP_READ_RSP://RWT 5536 { 5537 if(r_cleanup_to_tgt_rsp_req.read()) break; 5538 5539 r_cleanup_to_tgt_rsp_req = true; 5540 r_cleanup_to_tgt_rsp_srcid = r_cleanup_read_srcid.read(); 5541 r_cleanup_to_tgt_rsp_trdid = r_cleanup_read_trdid.read(); 5542 r_cleanup_to_tgt_rsp_pktid = 0x0 + r_cleanup_read_pktid.read();//WT 5543 r_cleanup_to_tgt_rsp_type = 0; //Read instruction 5544 r_cleanup_to_tgt_rsp_length = r_read_to_cleanup_length.read(); 5545 r_cleanup_to_tgt_rsp_first_word = r_read_to_cleanup_first_word.read(); 5546 r_read_to_cleanup_req = false; 5547 m_cpt_ncc_to_cc_read ++; 5548 if (r_cleanup_contains_data.read()) //L1 was dirty 5549 { 5550 for(size_t i = 0; i<m_words; i++) 5551 { 5552 r_cleanup_to_tgt_rsp_data[i] = r_cleanup_data[i].read(); 5553 } 5554 } 5555 else //the L2 data are up to date 5556 { 5557 for(size_t i = 0; i<m_words; i++) 5558 { 5559 r_cleanup_to_tgt_rsp_data[i] = r_cleanup_old_data[i].read(); 5560 } 5561 } 5562 5563 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5564 5565 #if DEBUG_MC_CLEANUP 5566 if (m_debug) 5567 { 5568 std::cout 5569 << " <MEMC " << name() 5570 << " CLEANUP_READ_RSP> answer READ" 5571 << std::endl; 5572 } 5573 #endif 5574 break; 5575 } 5576 ////////////////////// 5577 case CLEANUP_HEAP_REQ: 5578 { 5579 // get the lock to the HEAP directory 5580 if(r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP) break; 5581 5582 r_cleanup_fsm = CLEANUP_HEAP_LOCK; 5583 5584 #if DEBUG_MEMC_CLEANUP 5585 if(m_debug) 5586 { 5587 std::cout 5588 << " <MEMC " << name() 5589 << " CLEANUP_HEAP_REQ> HEAP lock acquired " 5590 << std::endl; 5591 } 5592 #endif 5593 m_cpt_cleanup_fsm_n_heap_lock++; 5594 break; 5595 } 5596 5597 ////////////////////// 5598 case CLEANUP_HEAP_LOCK: 5599 { 5600 // two cases are handled in this state : 5601 // 1. the matching copy is directly in the directory 5602 // 2. the matching copy is the first copy in the heap 5603 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 5604 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 5605 5606 size_t way = r_cleanup_way.read(); 5607 size_t set = m_y[(addr_t)(r_cleanup_nline.read() *m_words*4)]; 5608 5609 HeapEntry heap_entry = m_heap.read(r_cleanup_ptr.read()); 5610 bool last = (heap_entry.next == r_cleanup_ptr.read()); 5611 5612 // match_dir computation 5613 bool match_dir_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 5614 bool match_dir_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 5615 bool match_dir = match_dir_srcid and match_dir_inst; 5616 5617 // match_heap computation 5618 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 5619 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 5620 bool match_heap = match_heap_srcid and match_heap_inst; 5621 5622 r_cleanup_prev_ptr = r_cleanup_ptr.read(); 5623 r_cleanup_prev_srcid = heap_entry.owner.srcid; 5624 r_cleanup_prev_inst = heap_entry.owner.inst; 5625 5626 assert( (not last or match_dir or match_heap) and 5627 "MEMC ERROR in CLEANUP_HEAP_LOCK state: hit but no copy found"); 5628 5629 assert( (not match_dir or not match_heap) and 5630 "MEMC ERROR in CLEANUP_HEAP_LOCK state: two matching copies found"); 5631 5632 DirectoryEntry dir_entry; 5633 dir_entry.valid = true; 5634 dir_entry.cache_coherent = true; 5635 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 5636 dir_entry.dirty = r_cleanup_dirty.read(); 5637 dir_entry.tag = r_cleanup_tag.read(); 5638 dir_entry.lock = r_cleanup_lock.read(); 5639 dir_entry.count = r_cleanup_count.read()-1; 5640 5641 // the matching copy is registered in the directory and 5642 // it must be replaced by the first copy registered in 5643 // the heap. The corresponding entry must be freed 5644 if(match_dir) 5645 { 5646 dir_entry.ptr = heap_entry.next; 5647 dir_entry.owner.srcid = heap_entry.owner.srcid; 5648 dir_entry.owner.inst = heap_entry.owner.inst; 5649 r_cleanup_next_ptr = r_cleanup_ptr.read(); 5650 r_cleanup_fsm = CLEANUP_HEAP_FREE; 5651 } 5652 5653 // the matching copy is the first copy in the heap 5654 // It must be freed and the copy registered in directory 5655 // must point to the next copy in heap 5656 else if(match_heap) 5657 { 5658 dir_entry.ptr = heap_entry.next; 5659 dir_entry.owner.srcid = r_cleanup_copy.read(); 5660 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 5661 r_cleanup_next_ptr = r_cleanup_ptr.read(); 5662 r_cleanup_fsm = CLEANUP_HEAP_FREE; 5663 } 5664 5665 // The matching copy is in the heap, but is not the first copy 5666 // The directory entry must be modified to decrement count 5667 else 5668 { 5669 dir_entry.ptr = r_cleanup_ptr.read(); 5670 dir_entry.owner.srcid = r_cleanup_copy.read(); 5671 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 5672 r_cleanup_next_ptr = heap_entry.next; 5673 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 5674 } 5675 5676 m_cache_directory.write(set,way,dir_entry); 5677 5678 #if DEBUG_MEMC_CLEANUP 5679 if(m_debug) 5680 { 5681 std::cout 5682 << " <MEMC " << name() 5683 << " CLEANUP_HEAP_LOCK> Checks matching:" 5684 << " address = " << r_cleanup_nline.read() * m_words * 4 5685 << " / dir_id = " << r_cleanup_copy.read() 5686 << " / dir_ins = " << r_cleanup_copy_inst.read() 5687 << " / heap_id = " << heap_entry.owner.srcid 5688 << " / heap_ins = " << heap_entry.owner.inst 5689 << " / search_id = " << r_cleanup_srcid.read() 5690 << " / search_ins = " << r_cleanup_inst.read() 5691 << std::endl; 5692 } 5693 #endif 5694 break; 5695 } 5696 5697 //////////////////////// 5698 case CLEANUP_HEAP_SEARCH: 5699 { 5700 // This state is handling the case where the copy 5701 // is in the heap, but is not the first in the linked list 5702 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 5703 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 5704 5705 HeapEntry heap_entry = m_heap.read(r_cleanup_next_ptr.read()); 5706 5707 bool last = (heap_entry.next == r_cleanup_next_ptr.read()); 5708 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 5709 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 5710 bool match_heap = match_heap_srcid and match_heap_inst; 5711 5712 assert( (not last or match_heap) and 5713 "MEMC ERROR in CLEANUP_HEAP_SEARCH state: no copy found"); 5714 5715 // the matching copy must be removed 5716 if(match_heap) 5717 { 5718 // re-use ressources 5719 r_cleanup_ptr = heap_entry.next; 5720 r_cleanup_fsm = CLEANUP_HEAP_CLEAN; 5721 } 5722 // test the next in the linked list 5723 else 5724 { 5725 r_cleanup_prev_ptr = r_cleanup_next_ptr.read(); 5726 r_cleanup_prev_srcid = heap_entry.owner.srcid; 5727 r_cleanup_prev_inst = heap_entry.owner.inst; 5728 r_cleanup_next_ptr = heap_entry.next; 5729 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 5730 } 5731 5732 #if DEBUG_MEMC_CLEANUP 5733 if(m_debug) 5734 { 5735 if(not match_heap) 5736 { 5737 std::cout 5738 << " <MEMC " << name() 5739 << " CLEANUP_HEAP_SEARCH> Matching copy not found, search next:" 5740 << std::endl; 5741 } 5742 else 5743 { 5744 std::cout 5745 << " <MEMC " << name() 5746 << " CLEANUP_HEAP_SEARCH> Matching copy found:" 5747 << std::endl; 5748 } 5749 5750 std::cout 5751 << " address = " << r_cleanup_nline.read() * m_words * 4 5752 << " / heap_id = " << heap_entry.owner.srcid 5753 << " / heap_ins = " << heap_entry.owner.inst 5754 << " / search_id = " << r_cleanup_srcid.read() 5755 << " / search_ins = " << r_cleanup_inst.read() 5756 << " / last = " << last 5757 << std::endl; 5758 } 5759 #endif 5760 break; 5761 } 5762 //////////////////////// 5763 case CLEANUP_HEAP_CLEAN: // remove a copy in the linked list 5764 { 5765 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 5766 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 5767 5768 HeapEntry heap_entry; 5769 heap_entry.owner.srcid = r_cleanup_prev_srcid.read(); 5770 heap_entry.owner.inst = r_cleanup_prev_inst.read(); 5771 bool last = (r_cleanup_next_ptr.read() == r_cleanup_ptr.read()); 5772 5773 // this is the last entry of the list of copies 5774 if(last) 5775 { 5776 heap_entry.next = r_cleanup_prev_ptr.read(); 5777 } 5778 // this is not the last entry 5779 else 5780 { 5781 heap_entry.next = r_cleanup_ptr.read(); 5782 } 5783 5784 m_heap.write(r_cleanup_prev_ptr.read(), heap_entry); 5785 5786 r_cleanup_fsm = CLEANUP_HEAP_FREE; 5787 5788 #if DEBUG_MEMC_CLEANUP 5789 if(m_debug) 5790 std::cout << " <MEMC " << name() << " CLEANUP_HEAP_SEARCH>" 5791 << " Remove the copy in the linked list" << std::endl; 5792 #endif 5793 break; 5794 } 5795 /////////////////////// 5796 case CLEANUP_HEAP_FREE: // The heap entry pointed by r_cleanup_next_ptr is freed 5797 // and becomes the head of the list of free entries 5798 { 5799 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 5800 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 5801 HeapEntry heap_entry; 5802 heap_entry.owner.srcid = 0; 5803 heap_entry.owner.inst = false; 5804 5805 if(m_heap.is_full()) 5806 { 5807 heap_entry.next = r_cleanup_next_ptr.read(); 5808 } 5809 else 5810 { 5811 heap_entry.next = m_heap.next_free_ptr(); 5812 } 5813 5814 m_heap.write(r_cleanup_next_ptr.read(),heap_entry); 5815 m_heap.write_free_ptr(r_cleanup_next_ptr.read()); 5816 m_heap.unset_full(); 5817 5818 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5819 5820 #if DEBUG_MEMC_CLEANUP 5821 if(m_debug) 5822 std::cout << " <MEMC " << name() << " CLEANUP_HEAP_FREE>" 5823 << " Update the list of free entries" << std::endl; 5824 #endif 5825 break; 5826 } 5827 ////////////////////// 5828 case CLEANUP_IVT_LOCK: // get the lock protecting the IVT to search a pending 5829 // invalidate transaction matching the cleanup 5830 { 5831 m_cpt_cleanup_fsm_ivt_lock++; 5832 if(r_alloc_ivt_fsm.read() != ALLOC_IVT_CLEANUP) break; 5833 5834 size_t index = 0; 5835 bool match_inval; 5836 5837 match_inval = m_ivt.search_inval(r_cleanup_nline.read(), index); 5838 if ( not match_inval ) // no pending inval 5839 { 5840 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5841 5842 #if DEBUG_MEMC_CLEANUP 5843 if(m_debug) 5844 std::cout << " <MEMC " << name() 5845 << " CLEANUP_IVT_LOCK> Unexpected cleanup" 5846 << " with no corresponding IVT entry:" 5847 << " address = " << std::hex 5848 << (r_cleanup_nline.read() *4*m_words) 5849 << std::endl; 5850 #endif 5851 m_cpt_cleanup_fsm_n_upt_lock++; 5852 } 5853 else 5854 { 5855 // pending inval 5856 r_cleanup_write_srcid = m_ivt.srcid(index); 5857 r_cleanup_write_trdid = m_ivt.trdid(index); 5858 r_cleanup_write_pktid = m_ivt.pktid(index); 5859 r_cleanup_need_rsp = m_ivt.need_rsp(index); 5860 r_cleanup_need_ack = m_ivt.need_ack(index); 5861 r_cleanup_index = index; 5862 r_cleanup_fsm = CLEANUP_IVT_DECREMENT; 5863 #if DEBUG_MEMC_CLEANUP 5864 if(m_debug) 5865 std::cout << " <MEMC " << name() 5866 << " CLEANUP_IVT_LOCK> Cleanup matching pending" 5867 << " invalidate transaction on IVT:" 5868 << " address = " << std::hex << r_cleanup_nline.read() * m_words * 4 5869 << " / ivt_entry = " << index << std::endl; 5870 #endif 5871 } 5872 break; 5873 } 5874 /////////////////////////// 5875 case CLEANUP_IVT_DECREMENT: // decrement response counter in IVT matching entry 5876 { 5877 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) and 5878 "MEMC ERROR in CLEANUP_IVT_DECREMENT state: Bad IVT allocation"); 5879 5880 size_t count = 0; 5881 m_ivt.decrement(r_cleanup_index.read(), count); 5882 5883 if(count == 0) // multi inval transaction completed 5884 { 5885 r_cleanup_fsm = CLEANUP_IVT_CLEAR; 5886 } 5887 else // multi inval transaction not completed 5888 { 5889 if (r_cleanup_ncc.read()) //need to put data to the XRAM 5890 { 5891 r_cleanup_fsm = CLEANUP_IXR_REQ; 5892 } 5893 else 5894 { 5895 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5896 } 5897 } 5898 5899 #if DEBUG_MEMC_CLEANUP 5900 if(m_debug) 5901 std::cout << " <MEMC " << name() << " CLEANUP_IVT_DECREMENT>" 5902 << " Decrement response counter in IVT:" 5903 << " IVT_index = " << r_cleanup_index.read() 5904 << " / rsp_count = " << count << std::endl; 5905 #endif 5906 break; 5907 } 5908 /////////////////////// 5909 case CLEANUP_IVT_CLEAR: // Clear IVT entry 5910 { 5911 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) and 5912 "MEMC ERROR in CLEANUP_IVT_CLEAR state : bad IVT allocation"); 5913 5914 m_ivt.clear(r_cleanup_index.read()); 5915 5916 if ( r_cleanup_need_ack.read() ) 5917 { 5918 assert( (r_config_rsp_lines.read() > 0) and 5919 "MEMC ERROR in CLEANUP_IVT_CLEAR state"); 5920 5921 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 5922 } 5923 5924 if ( r_cleanup_need_rsp.read() ) r_cleanup_fsm = CLEANUP_WRITE_RSP; 5925 else if ( r_cleanup_ncc.read() ) r_cleanup_fsm = CLEANUP_IXR_REQ; 5926 else r_cleanup_fsm = CLEANUP_SEND_CLACK; 5927 5928 #if DEBUG_MEMC_CLEANUP 5929 if(m_debug) 5930 std::cout << " <MEMC " << name() 5931 << " CLEANUP_IVT_CLEAR> Clear entry in IVT:" 5932 << " IVT_index = " << r_cleanup_index.read() << std::endl; 5933 #endif 5934 break; 5935 } 5936 /////////////////////// 5937 case CLEANUP_WRITE_RSP: // response to a previous write on the direct network 5938 // wait if pending request to the TGT_RSP FSM 5939 { 5940 if(r_cleanup_to_tgt_rsp_req.read()) break; 5941 5942 // no pending request 5943 r_cleanup_to_tgt_rsp_req = true; 5944 r_cleanup_to_tgt_rsp_srcid = r_cleanup_write_srcid.read(); 5945 r_cleanup_to_tgt_rsp_trdid = r_cleanup_write_trdid.read(); 5946 r_cleanup_to_tgt_rsp_pktid = r_cleanup_write_pktid.read(); 5947 r_cleanup_to_tgt_rsp_type = true; 5948 5949 if (r_cleanup_ncc.read()) 5950 { 5951 r_cleanup_fsm = CLEANUP_IXR_REQ;//need to put data to the XRAM 5952 } 5953 else 5954 { 5955 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5956 } 5957 5958 #if DEBUG_MEMC_CLEANUP 5959 if(m_debug) 5960 std::cout << " <MEMC " << name() << " CLEANUP_WRITE_RSP>" 5961 << " Send a response to a previous write request: " 5962 << " rsrcid = " << std::hex << r_cleanup_write_srcid.read() 5963 << " / rtrdid = " << r_cleanup_write_trdid.read() 5964 << " / rpktid = " << r_cleanup_write_pktid.read() << std::endl; 5965 #endif 5966 break; 5967 } 5968 ///////////////////////// 5969 case CLEANUP_IXR_REQ: 5970 { 5971 //Send a request to the ixr to write the data in the XRAM using the prereserved TRT entry 5972 if (r_alloc_trt_fsm.read() == ALLOC_TRT_CLEANUP) 5973 { 5974 if( not r_cleanup_to_ixr_cmd_req.read()) 5975 { 5976 size_t index = 0; 5977 bool hit = m_trt.hit_write(r_cleanup_nline.read(), &index); 5978 5979 assert (hit and "CLEANUP_IXR_REQ found no matching entry in TRT"); 5980 5981 r_cleanup_to_ixr_cmd_req = true; 5982 5983 if (r_cleanup_contains_data.read()) 5984 { 5985 std::vector<data_t> data_vector; 5986 data_vector.clear(); 5987 5988 for(size_t i=0; i<m_words; i++) 5989 { 5990 data_vector.push_back(r_cleanup_data[i]); 5991 } 5992 5993 m_trt.set(index, 5994 false, // write to XRAM 5995 r_cleanup_nline.read(), // line index 5996 0, 5997 0, 5998 0, 5999 false, 6000 0, 6001 0, 6002 std::vector<be_t> (m_words,0), 6003 data_vector); 6004 } 6005 //std::cout << "cleanup with a non coherent ligne in trt index = " << index << std::endl; 6006 r_cleanup_to_ixr_cmd_srcid = r_cleanup_srcid.read(); 6007 r_cleanup_to_ixr_cmd_index = index; 6008 r_cleanup_to_ixr_cmd_pktid = r_cleanup_pktid.read(); 6009 r_cleanup_to_ixr_cmd_nline = r_cleanup_nline.read(); 6010 //r_cleanup_to_ixr_cmd_l1_dirty_ncc = r_cleanup_contains_data.read(); 6011 r_cleanup_fsm = CLEANUP_SEND_CLACK; 6012 #if DEBUG_MEMC_CLEANUP 6013 if(m_debug) 6014 { 6015 std::cout 6016 << " <MEMC " << name() 6017 << " CLEANUP_IXR_REQ>" 6018 << " request send to IXR_CMD" 6019 << std::endl; 6020 } 6021 #endif 6022 } 6023 else 6024 { 6025 r_cleanup_fsm = CLEANUP_WAIT; 6026 #if DEBUG_MEMC_CLEANUP 6027 if(m_debug) 6028 { 6029 std::cout 6030 << " <MEMC " << name() 6031 << " CLEANUP_IXR_REQ>" 6032 << " waiting completion of previous request" 6033 << std::endl; 6034 } 6035 #endif 6036 } 6037 } 6038 break; 6039 } 6040 6041 ///////////////////// 6042 case CLEANUP_WAIT : 6043 { 6044 r_cleanup_fsm = CLEANUP_IXR_REQ; 6045 break; 6046 } 6047 6048 //////////////////////// 6049 case CLEANUP_SEND_CLACK: // acknowledgement to a cleanup command 6050 // on the coherence CLACK network. 6051 { 6052 if(not p_dspin_clack.read) break; 6053 6054 r_cleanup_fsm = CLEANUP_IDLE; 6055 6056 #if DEBUG_MEMC_CLEANUP 6057 if(m_debug) 6058 std::cout << " <MEMC " << name() 6059 << " CLEANUP_SEND_CLACK> Send the response to a cleanup request:" 6060 << " nline = " << std::hex << r_cleanup_nline.read() 6061 << " / way = " << std::dec << r_cleanup_way.read() 6062 << " / srcid = " << std::dec << r_cleanup_srcid.read() 6063 << std::endl; 6064 #endif 6065 break; 6066 } 6067 } // end switch cleanup fsm 6068 6069 //////////////////////////////////////////////////////////////////////////////////// 6070 // CAS FSM 6071 //////////////////////////////////////////////////////////////////////////////////// 6072 // The CAS FSM handles the CAS (Store Conditionnal) atomic commands, 6073 // that are handled as "compare-and-swap instructions. 6074 // 6075 // This command contains two or four flits: 6076 // - In case of 32 bits atomic access, the first flit contains the value read 6077 // by a previous LL instruction, the second flit contains the value to be writen. 6078 // - In case of 64 bits atomic access, the 2 first flits contains the value read 6079 // by a previous LL instruction, the 2 next flits contains the value to be writen. 6080 // 6081 // The target address is cachable. If it is replicated in other L1 caches 6082 // than the writer, a coherence operation is done. 6083 // 6084 // It access the directory to check hit / miss. 6085 // - In case of miss, the CAS FSM must register a GET transaction in TRT. 6086 // If a read transaction to the XRAM for this line already exists, 6087 // or if the transaction table is full, it goes to the WAIT state 6088 // to release the locks and try again. When the GET transaction has been 6089 // launched, it goes to the WAIT state and try again. 6090 // The CAS request is not consumed in the FIFO until a HIT is obtained. 6091 // - In case of hit... 6092 /////////////////////////////////////////////////////////////////////////////////// 6093 6094 switch(r_cas_fsm.read()) 6095 { 6096 ///////////// 6097 case CAS_IDLE: // fill the local rdata buffers 6098 { 6099 if(m_cmd_cas_addr_fifo.rok()) 6100 { 6101 6102 #if DEBUG_MEMC_CAS 6103 if(m_debug) 6104 { 6105 std::cout << " <MEMC " << name() << " CAS_IDLE> CAS command: " << std::hex 6106 << " srcid = " << std::dec << m_cmd_cas_srcid_fifo.read() 6107 << " addr = " << std::hex << m_cmd_cas_addr_fifo.read() 6108 << " wdata = " << m_cmd_cas_wdata_fifo.read() 6109 << " eop = " << std::dec << m_cmd_cas_eop_fifo.read() 6110 << " cpt = " << std::dec << r_cas_cpt.read() << std::endl; 6111 } 6112 #endif 6113 if(m_cmd_cas_eop_fifo.read()) 6114 { 6115 r_cas_fsm = CAS_DIR_REQ; 6116 } 6117 else // we keep the last word in the FIFO 6118 { 6119 cmd_cas_fifo_get = true; 6120 } 6121 // We fill the two buffers 6122 if(r_cas_cpt.read() < 2) // 32 bits access 6123 r_cas_rdata[r_cas_cpt.read()] = m_cmd_cas_wdata_fifo.read(); 6124 6125 if((r_cas_cpt.read() == 1) and m_cmd_cas_eop_fifo.read()) 6126 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 6127 6128 assert( (r_cas_cpt.read() <= 3) and // no more than 4 flits... 6129 "MEMC ERROR in CAS_IDLE state: illegal CAS command"); 6130 6131 if(r_cas_cpt.read() ==2) 6132 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 6133 6134 r_cas_cpt = r_cas_cpt.read() +1; 6135 } 6136 break; 6137 } 6138 6139 ///////////////// 6140 case CAS_DIR_REQ: 6141 { 6142 if(r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) 6143 { 6144 r_cas_fsm = CAS_DIR_LOCK; 6145 m_cpt_cas_fsm_n_dir_lock++; 6146 } 6147 6148 #if DEBUG_MEMC_CAS 6149 if(m_debug) 6150 { 6151 std::cout 6152 << " <MEMC " << name() << " CAS_DIR_REQ> Requesting DIR lock " 6153 << std::endl; 6154 } 6155 #endif 6156 6157 m_cpt_cas_fsm_dir_lock++; 6158 6159 break; 6160 } 6161 6162 ///////////////// 6163 case CAS_DIR_LOCK: // Read the directory 6164 { 6165 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 6166 "MEMC ERROR in CAS_DIR_LOCK: Bad DIR allocation"); 6167 6168 size_t way = 0; 6169 DirectoryEntry entry(m_cache_directory.read(m_cmd_cas_addr_fifo.read(), way)); 6170 6171 r_cas_is_cnt = entry.is_cnt; 6172 r_cas_coherent = entry.cache_coherent; 6173 r_cas_dirty = entry.dirty; 6174 r_cas_tag = entry.tag; 6175 r_cas_way = way; 6176 r_cas_copy = entry.owner.srcid; 6177 r_cas_copy_inst = entry.owner.inst; 6178 r_cas_ptr = entry.ptr; 6179 r_cas_count = entry.count; 6180 6181 if(entry.valid) r_cas_fsm = CAS_DIR_HIT_READ; 6182 else r_cas_fsm = CAS_MISS_TRT_LOCK; 6183 6184 #if DEBUG_MEMC_CAS 6185 if(m_debug) 6186 { 6187 std::cout << " <MEMC " << name() << " CAS_DIR_LOCK> Directory acces" 6188 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 6189 << " / hit = " << std::dec << entry.valid 6190 << " / count = " << entry.count 6191 << " / is_cnt = " << entry.is_cnt << std::endl; 6192 } 6193 #endif 6194 break; 6195 } 6196 ///////////////////// 6197 case CAS_DIR_HIT_READ: // update directory for lock and dirty bit 6198 // and check data change in cache 6199 { 6200 size_t way = r_cas_way.read(); 6201 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6202 6203 // update directory (lock & dirty bits) 6204 DirectoryEntry entry; 6205 entry.valid = true; 6206 entry.cache_coherent = r_cas_coherent.read(); 6207 entry.is_cnt = r_cas_is_cnt.read(); 6208 entry.dirty = true; 6209 entry.lock = true; 6210 entry.tag = r_cas_tag.read(); 6211 entry.owner.srcid = r_cas_copy.read(); 6212 entry.owner.inst = r_cas_copy_inst.read(); 6213 entry.count = r_cas_count.read(); 6214 entry.ptr = r_cas_ptr.read(); 6215 6216 m_cache_directory.write(set, way, entry); 6217 6218 // Stored data from cache in buffer to do the comparison in next state 6219 m_cache_data.read_line(way, set, r_cas_data); 6220 6221 r_cas_fsm = CAS_DIR_HIT_COMPARE; 6222 6223 #if DEBUG_MEMC_CAS 6224 if(m_debug) 6225 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_READ> Read data from " 6226 << " cache and store it in buffer" << std::endl; 6227 #endif 6228 break; 6229 } 6230 6231 case CAS_DIR_HIT_COMPARE: 6232 { 6233 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6234 6235 // Read data in buffer & check data change 6236 bool ok = (r_cas_rdata[0].read() == r_cas_data[word].read()); 6237 6238 if(r_cas_cpt.read() == 4) // 64 bits CAS 6239 ok &= (r_cas_rdata[1] == r_cas_data[word+1]); 6240 6241 // to avoid livelock, force the atomic access to fail pseudo-randomly 6242 bool forced_fail = ((r_cas_lfsr % (64) == 0) and RANDOMIZE_CAS); 6243 r_cas_lfsr = (r_cas_lfsr >> 1) ^ ((- (r_cas_lfsr & 1)) & 0xd0000001); 6244 6245 // cas success 6246 if(ok and not forced_fail) 6247 { 6248 r_cas_fsm = CAS_DIR_HIT_WRITE; 6249 } 6250 // cas failure 6251 else 6252 { 6253 r_cas_fsm = CAS_RSP_FAIL; 6254 } 6255 6256 #if DEBUG_MEMC_CAS 6257 if(m_debug) 6258 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_COMPARE> Compare the old" 6259 << " and the new data" 6260 << " / expected value = " << r_cas_rdata[0].read() 6261 << " / actual value = " << r_cas_data[word].read() 6262 << " / forced_fail = " << forced_fail << std::endl; 6263 #endif 6264 break; 6265 } 6266 ////////////////////// 6267 case CAS_DIR_HIT_WRITE: // test if a CC transaction is required 6268 // write data in cache if no CC request 6269 { 6270 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 6271 "MEMC ERROR in CAS_DIR_HIT_WRITE: Bad DIR allocation"); 6272 6273 // The CAS is a success => sw access to the llsc_global_table 6274 m_llsc_table.sw(m_cmd_cas_addr_fifo.read(), m_cmd_cas_addr_fifo.read()); 6275 // test coherence request 6276 if(r_cas_count.read()) // replicated line 6277 { 6278 if(r_cas_is_cnt.read()) 6279 { 6280 r_cas_fsm = CAS_BC_TRT_LOCK; // broadcast invalidate required 6281 #if DEBUG_MEMC_CAS 6282 if(m_debug) 6283 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 6284 << " Broacast Inval required" 6285 << " / copies = " << r_cas_count.read() << std::endl; 6286 #endif 6287 6288 } 6289 else if(!r_cas_to_cc_send_multi_req.read() and 6290 !r_cas_to_cc_send_brdcast_req.read()) 6291 { 6292 r_cas_fsm = CAS_UPT_LOCK; // multi update required 6293 #if DEBUG_MEMC_CAS 6294 if(m_debug) 6295 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 6296 << " Multi Inval required" 6297 << " / copies = " << r_cas_count.read() << std::endl; 6298 #endif 6299 } 6300 else 6301 { 6302 r_cas_fsm = CAS_WAIT; 6303 #if DEBUG_MEMC_CAS 6304 if(m_debug) 6305 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 6306 << " CC_SEND FSM busy: release all locks and retry" << std::endl; 6307 #endif 6308 } 6309 } 6310 else // no copies 6311 { 6312 size_t way = r_cas_way.read(); 6313 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6314 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6315 6316 // cache update 6317 m_cache_data.write(way, set, word, r_cas_wdata.read()); 6318 if(r_cas_cpt.read() == 4) 6319 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 6320 6321 r_cas_fsm = CAS_RSP_SUCCESS; 6322 6323 #if DEBUG_MEMC_CAS 6324 if(m_debug) 6325 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE> Update cache:" 6326 << " way = " << std::dec << way 6327 << " / set = " << set 6328 << " / word = " << word 6329 << " / value = " << r_cas_wdata.read() 6330 << " / count = " << r_cas_count.read() 6331 << " / global_llsc_table access" << std::endl; 6332 #endif 6333 } 6334 break; 6335 } 6336 ///////////////// 6337 case CAS_UPT_LOCK: // try to register the transaction in UPT 6338 // and write data in cache if successful registration 6339 // releases locks to retry later if UPT full 6340 { 6341 if(r_alloc_upt_fsm.read() == ALLOC_UPT_CAS) 6342 { 6343 bool wok = false; 6344 size_t index = 0; 6345 size_t srcid = m_cmd_cas_srcid_fifo.read(); 6346 size_t trdid = m_cmd_cas_trdid_fifo.read(); 6347 size_t pktid = m_cmd_cas_pktid_fifo.read(); 6348 addr_t nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6349 size_t nb_copies = r_cas_count.read(); 6350 6351 wok = m_upt.set(true, // it's an update transaction 6352 false, // it's not a broadcast 6353 true, // response required 6354 false, // no acknowledge required 6355 srcid, 6356 trdid, 6357 pktid, 6358 nline, 6359 nb_copies, 6360 index); 6361 if(wok) // coherence transaction registered in UPT 6362 { 6363 // cache update 6364 size_t way = r_cas_way.read(); 6365 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6366 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6367 6368 m_cache_data.write(way, set, word, r_cas_wdata.read()); 6369 if(r_cas_cpt.read() ==4) 6370 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 6371 6372 r_cas_upt_index = index; 6373 r_cas_fsm = CAS_UPT_HEAP_LOCK; 6374 6375 } 6376 else // releases the locks protecting UPT and DIR UPT full 6377 { 6378 r_cas_fsm = CAS_WAIT; 6379 } 6380 6381 #if DEBUG_MEMC_CAS 6382 if(m_debug) 6383 std::cout << " <MEMC " << name() 6384 << " CAS_UPT_LOCK> Register multi-update transaction in UPT" 6385 << " / wok = " << wok 6386 << " / nline = " << std::hex << nline 6387 << " / count = " << nb_copies << std::endl; 6388 #endif 6389 m_cpt_cas_fsm_n_upt_lock++; 6390 } 6391 6392 m_cpt_cas_fsm_upt_lock++; 6393 6394 break; 6395 } 6396 ///////////// 6397 case CAS_WAIT: // release all locks and retry from beginning 6398 { 6399 6400 #if DEBUG_MEMC_CAS 6401 if(m_debug) 6402 { 6403 std::cout << " <MEMC " << name() 6404 << " CAS_WAIT> Release all locks" << std::endl; 6405 } 6406 #endif 6407 r_cas_fsm = CAS_DIR_REQ; 6408 break; 6409 } 6410 ////////////////// 6411 case CAS_UPT_HEAP_LOCK: // lock the heap 6412 { 6413 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) 6414 { 6415 6416 #if DEBUG_MEMC_CAS 6417 if(m_debug) 6418 { 6419 std::cout << " <MEMC " << name() 6420 << " CAS_UPT_HEAP_LOCK> Get access to the heap" << std::endl; 6421 } 6422 #endif 6423 r_cas_fsm = CAS_UPT_REQ; 6424 m_cpt_cas_fsm_n_heap_lock++; 6425 } 6426 6427 m_cpt_cas_fsm_heap_lock++; 6428 6429 break; 6430 } 6431 //////////////// 6432 case CAS_UPT_REQ: // send a first update request to CC_SEND FSM 6433 { 6434 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) and 6435 "VCI_MEM_CACHE ERROR : bad HEAP allocation"); 6436 6437 if(!r_cas_to_cc_send_multi_req.read() and !r_cas_to_cc_send_brdcast_req.read()) 6438 { 6439 r_cas_to_cc_send_brdcast_req = false; 6440 r_cas_to_cc_send_trdid = r_cas_upt_index.read(); 6441 r_cas_to_cc_send_nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6442 r_cas_to_cc_send_index = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6443 r_cas_to_cc_send_wdata = r_cas_wdata.read(); 6444 6445 if(r_cas_cpt.read() == 4) 6446 { 6447 r_cas_to_cc_send_is_long = true; 6448 r_cas_to_cc_send_wdata_high = m_cmd_cas_wdata_fifo.read(); 6449 } 6450 else 6451 { 6452 r_cas_to_cc_send_is_long = false; 6453 r_cas_to_cc_send_wdata_high = 0; 6454 } 6455 6456 // We put the first copy in the fifo 6457 cas_to_cc_send_fifo_put = true; 6458 cas_to_cc_send_fifo_inst = r_cas_copy_inst.read(); 6459 cas_to_cc_send_fifo_srcid = r_cas_copy.read(); 6460 if(r_cas_count.read() == 1) // one single copy 6461 { 6462 r_cas_fsm = CAS_IDLE; // Response will be sent after receiving 6463 // update responses 6464 cmd_cas_fifo_get = true; 6465 r_cas_to_cc_send_multi_req = true; 6466 r_cas_cpt = 0; 6467 } 6468 else // several copies 6469 { 6470 r_cas_fsm = CAS_UPT_NEXT; 6471 } 6472 6473 #if DEBUG_MEMC_CAS 6474 if(m_debug) 6475 { 6476 std::cout << " <MEMC " << name() << " CAS_UPT_REQ> Send the first update request to CC_SEND FSM " 6477 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 6478 << " / wdata = " << std::hex << r_cas_wdata.read() 6479 << " / srcid = " << std::dec << r_cas_copy.read() 6480 << " / inst = " << std::dec << r_cas_copy_inst.read() << std::endl; 6481 } 6482 #endif 6483 } 6484 break; 6485 } 6486 ///////////////// 6487 case CAS_UPT_NEXT: // send a multi-update request to CC_SEND FSM 6488 { 6489 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) 6490 and "VCI_MEM_CACHE ERROR : bad HEAP allocation"); 6491 6492 HeapEntry entry = m_heap.read(r_cas_ptr.read()); 6493 cas_to_cc_send_fifo_srcid = entry.owner.srcid; 6494 cas_to_cc_send_fifo_inst = entry.owner.inst; 6495 cas_to_cc_send_fifo_put = true; 6496 6497 if(m_cas_to_cc_send_inst_fifo.wok()) // request accepted by CC_SEND FSM 6498 { 6499 r_cas_ptr = entry.next; 6500 if(entry.next == r_cas_ptr.read()) // last copy 6501 { 6502 r_cas_to_cc_send_multi_req = true; 6503 r_cas_fsm = CAS_IDLE; // Response will be sent after receiving 6504 // all update responses 6505 cmd_cas_fifo_get = true; 6506 r_cas_cpt = 0; 6507 } 6508 } 6509 6510 #if DEBUG_MEMC_CAS 6511 if(m_debug) 6512 { 6513 std::cout << " <MEMC " << name() << " CAS_UPT_NEXT> Send the next update request to CC_SEND FSM " 6514 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 6515 << " / wdata = " << std::hex << r_cas_wdata.read() 6516 << " / srcid = " << std::dec << entry.owner.srcid 6517 << " / inst = " << std::dec << entry.owner.inst << std::endl; 6518 } 6519 #endif 6520 break; 6521 } 6522 ///////////////////// 6523 case CAS_BC_TRT_LOCK: // check the TRT to register a PUT transaction 6524 { 6525 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 6526 "MEMC ERROR in CAS_BC_TRT_LOCK state: Bas DIR allocation"); 6527 6528 if(r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 6529 { 6530 size_t wok_index = 0; 6531 bool wok = !m_trt.full(wok_index); 6532 if( wok ) 6533 { 6534 r_cas_trt_index = wok_index; 6535 r_cas_fsm = CAS_BC_IVT_LOCK; 6536 } 6537 else 6538 { 6539 r_cas_fsm = CAS_WAIT; 6540 m_cpt_cas_fsm_n_trt_lock++; 6541 } 6542 6543 #if DEBUG_MEMC_CAS 6544 if(m_debug) 6545 std::cout << " <MEMC " << name() << " CAS_BC_TRT_LOCK> Check TRT" 6546 << " : wok = " << wok << " / index = " << wok_index << std::endl; 6547 #endif 6548 } 6549 m_cpt_cas_fsm_trt_lock++; 6550 6551 break; 6552 } 6553 ///////////////////// 6554 case CAS_BC_IVT_LOCK: // register a broadcast inval transaction in IVT 6555 // write data in cache in case of successful registration 6556 { 6557 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS) 6558 { 6559 bool wok = false; 6560 size_t index = 0; 6561 size_t srcid = m_cmd_cas_srcid_fifo.read(); 6562 size_t trdid = m_cmd_cas_trdid_fifo.read(); 6563 size_t pktid = m_cmd_cas_pktid_fifo.read(); 6564 addr_t nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6565 size_t nb_copies = r_cas_count.read(); 6566 6567 // register a broadcast inval transaction in IVT 6568 wok = m_ivt.set(false, // it's an inval transaction 6569 true, // it's a broadcast 6570 true, // response required 6571 false, // no acknowledge required 6572 srcid, 6573 trdid, 6574 pktid, 6575 nline, 6576 nb_copies, 6577 index); 6578 6579 if(wok) // IVT not full 6580 { 6581 // cache update 6582 size_t way = r_cas_way.read(); 6583 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6584 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6585 6586 m_cache_data.write(way, set, word, r_cas_wdata.read()); 6587 if(r_cas_cpt.read() ==4) 6588 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 6589 6590 r_cas_upt_index = index; 6591 r_cas_fsm = CAS_BC_DIR_INVAL; 6592 6593 #if DEBUG_MEMC_CAS 6594 if(m_debug) 6595 std::cout << " <MEMC " << name() 6596 << " CAS_BC_IVT_LOCK> Register a broadcast inval transaction in IVT" 6597 << " / nline = " << std::hex << nline 6598 << " / count = " << std::dec << nb_copies 6599 << " / ivt_index = " << index << std::endl; 6600 #endif 6601 } 6602 else // releases the lock protecting IVT 6603 { 6604 r_cas_fsm = CAS_WAIT; 6605 } 6606 m_cpt_cas_fsm_n_upt_lock++; 6607 } 6608 6609 m_cpt_cas_fsm_upt_lock++; 6610 6611 break; 6612 } 6613 ////////////////////// 6614 case CAS_BC_DIR_INVAL: // Register the PUT transaction in TRT, and inval the DIR entry 6615 { 6616 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 6617 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad DIR allocation"); 6618 6619 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 6620 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad TRT allocation"); 6621 6622 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS) and 6623 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad IVT allocation"); 6624 6625 std::vector<data_t> data_vector; 6626 data_vector.clear(); 6627 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6628 for(size_t i=0; i<m_words; i++) 6629 { 6630 if(i == word) // first modified word 6631 data_vector.push_back( r_cas_wdata.read() ); 6632 else if((i == word+1) and (r_cas_cpt.read() == 4)) // second modified word 6633 data_vector.push_back( m_cmd_cas_wdata_fifo.read() ); 6634 else // unmodified words 6635 data_vector.push_back( r_cas_data[i].read() ); 6636 } 6637 m_trt.set( r_cas_trt_index.read(), 6638 false, // PUT request 6639 m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())], 6640 0, 6641 0, 6642 0, 6643 false, // not a processor read 6644 0, 6645 0, 6646 std::vector<be_t> (m_words,0), 6647 data_vector ); 6648 6649 // invalidate directory entry 6650 DirectoryEntry entry; 6651 entry.valid = false; 6652 entry.dirty = false; 6653 entry.tag = 0; 6654 entry.is_cnt = false; 6655 entry.lock = false; 6656 entry.count = 0; 6657 entry.owner.srcid = 0; 6658 entry.owner.inst = false; 6659 entry.ptr = 0; 6660 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6661 size_t way = r_cas_way.read(); 6662 m_cache_directory.write(set, way, entry); 6663 6664 r_cas_fsm = CAS_BC_CC_SEND; 6665 6666 #if DEBUG_MEMC_CAS 6667 if(m_debug) 6668 std::cout << " <MEMC " << name() << " CAS_BC_DIR_INVAL> Inval DIR & register in TRT:" 6669 << " address = " << m_cmd_cas_addr_fifo.read() << std::endl; 6670 #endif 6671 break; 6672 } 6673 /////////////////// 6674 case CAS_BC_CC_SEND: // Request the broadcast inval to CC_SEND FSM 6675 { 6676 if( not r_cas_to_cc_send_multi_req.read() and 6677 not r_cas_to_cc_send_brdcast_req.read()) 6678 { 6679 r_cas_to_cc_send_multi_req = false; 6680 r_cas_to_cc_send_brdcast_req = true; 6681 r_cas_to_cc_send_trdid = r_cas_upt_index.read(); 6682 r_cas_to_cc_send_nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6683 r_cas_to_cc_send_index = 0; 6684 r_cas_to_cc_send_wdata = 0; 6685 6686 r_cas_fsm = CAS_BC_XRAM_REQ; 6687 } 6688 break; 6689 } 6690 //////////////////// 6691 case CAS_BC_XRAM_REQ: // request the IXR FSM to start a put transaction 6692 { 6693 if( not r_cas_to_ixr_cmd_req.read() ) 6694 { 6695 r_cas_to_ixr_cmd_req = true; 6696 r_cas_to_ixr_cmd_index = r_cas_trt_index.read(); 6697 r_cas_fsm = CAS_IDLE; 6698 cmd_cas_fifo_get = true; 6699 r_cas_cpt = 0; 6700 6701 #if DEBUG_MEMC_CAS 6702 if(m_debug) 6703 std::cout << " <MEMC " << name() 6704 << " CAS_BC_XRAM_REQ> Request a PUT transaction to IXR_CMD FSM" << std::hex 6705 << " / address = " << (addr_t) m_cmd_cas_addr_fifo.read() 6706 << " / trt_index = " << r_cas_trt_index.read() << std::endl; 6707 #endif 6708 } 6709 6710 break; 6711 } 6712 ///////////////// 6713 case CAS_RSP_FAIL: // request TGT_RSP FSM to send a failure response 6714 { 6715 if( not r_cas_to_tgt_rsp_req.read() ) 6716 { 6717 cmd_cas_fifo_get = true; 6718 r_cas_cpt = 0; 6719 r_cas_to_tgt_rsp_req = true; 6720 r_cas_to_tgt_rsp_data = 1; 6721 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read(); 6722 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read(); 6723 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read(); 6724 r_cas_fsm = CAS_IDLE; 6725 6726 #if DEBUG_MEMC_CAS 6727 if(m_debug) 6728 std::cout << " <MEMC " << name() 6729 << " CAS_RSP_FAIL> Request TGT_RSP to send a failure response" << std::endl; 6730 #endif 6731 } 6732 break; 6733 } 6734 //////////////////// 6735 case CAS_RSP_SUCCESS: // request TGT_RSP FSM to send a success response 6736 { 6737 if( not r_cas_to_tgt_rsp_req.read() ) 6738 { 6739 cmd_cas_fifo_get = true; 6740 r_cas_cpt = 0; 6741 r_cas_to_tgt_rsp_req = true; 6742 r_cas_to_tgt_rsp_data = 0; 6743 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read(); 6744 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read(); 6745 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read(); 6746 r_cas_fsm = CAS_IDLE; 6747 6748 #if DEBUG_MEMC_CAS 6749 if(m_debug) 6750 std::cout << " <MEMC " << name() 6751 << " CAS_RSP_SUCCESS> Request TGT_RSP to send a success response" << std::endl; 6752 #endif 6753 } 6754 break; 6755 } 6756 ///////////////////// 6757 case CAS_MISS_TRT_LOCK: // cache miss : request access to transaction Table 6758 { 6759 if(r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 6760 { 6761 size_t index = 0; 6762 bool hit_read = m_trt.hit_read( 6763 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()],index); 6764 bool hit_write = m_trt.hit_write( 6765 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()]); 6766 bool wok = not m_trt.full(index); 6767 6768 #if DEBUG_MEMC_CAS 6769 if(m_debug) 6770 { 6771 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_LOCK> Check TRT state" 6772 << " / hit_read = " << hit_read 6773 << " / hit_write = " << hit_write 6774 << " / wok = " << wok 6775 << " / index = " << index << std::endl; 6776 } 6777 #endif 6778 6779 if(hit_read or !wok or hit_write) // missing line already requested or no space in TRT 6780 { 6781 r_cas_fsm = CAS_WAIT; 6782 } 6783 else 6784 { 6785 r_cas_trt_index = index; 6786 r_cas_fsm = CAS_MISS_TRT_SET; 6787 } 6788 m_cpt_cas_fsm_n_trt_lock++; 6789 } 6790 6791 m_cpt_cas_fsm_trt_lock++; 6792 6793 break; 6794 } 6795 //////////////////// 6796 case CAS_MISS_TRT_SET: // register the GET transaction in TRT 6797 { 6798 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 6799 "MEMC ERROR in CAS_MISS_TRT_SET state: Bad TRT allocation"); 6800 6801 std::vector<be_t> be_vector; 6802 std::vector<data_t> data_vector; 6803 be_vector.clear(); 6804 data_vector.clear(); 6805 for(size_t i=0; i<m_words; i++) 6806 { 6807 be_vector.push_back(0); 6808 data_vector.push_back(0); 6809 } 6810 6811 m_trt.set(r_cas_trt_index.read(), 6812 true, // read request 6813 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()], 6814 m_cmd_cas_srcid_fifo.read(), 6815 m_cmd_cas_trdid_fifo.read(), 6816 m_cmd_cas_pktid_fifo.read(), 6817 false, // write request from processor 6818 0, 6819 0, 6820 be_vector, 6821 data_vector); 6822 r_cas_fsm = CAS_MISS_XRAM_REQ; 6823 6824 #if DEBUG_MEMC_CAS 6825 if(m_debug) 6826 { 6827 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_SET> Register a GET transaction in TRT" << std::hex 6828 << " / nline = " << m_nline[(addr_t) m_cmd_cas_addr_fifo.read()] 6829 << " / trt_index = " << r_cas_trt_index.read() << std::endl; 6830 } 6831 #endif 6832 break; 6833 } 6834 ////////////////////// 6835 case CAS_MISS_XRAM_REQ: // request the IXR_CMD FSM to fetch the missing line 6836 { 6837 if( not r_cas_to_ixr_cmd_req.read() ) 6838 { 6839 r_cas_to_ixr_cmd_req = true; 6840 r_cas_to_ixr_cmd_index = r_cas_trt_index.read(); 6841 r_cas_fsm = CAS_WAIT; 6842 6843 #if DEBUG_MEMC_CAS 6844 if(m_debug) 6845 std::cout << " <MEMC " << name() << " CAS_MISS_XRAM_REQ> Request a GET transaction" 6846 << " / address = " << std::hex << (addr_t) m_cmd_cas_addr_fifo.read() 6847 << " / trt_index = " << std::dec << r_cas_trt_index.read() << std::endl; 6848 #endif 6849 } 6850 break; 6851 } 6852 } // end switch r_cas_fsm 6853 6854 6855 ////////////////////////////////////////////////////////////////////////////// 6856 // CC_SEND FSM 6857 ////////////////////////////////////////////////////////////////////////////// 6858 // The CC_SEND fsm controls the DSPIN initiator port on the coherence 6859 // network, used to update or invalidate cache lines in L1 caches. 6860 // 6861 // It implements a round-robin priority between the four possible client FSMs 6862 // XRAM_RSP > CAS > WRITE > CONFIG 6863 // 6864 // Each FSM can request the next services: 6865 // - r_xram_rsp_to_cc_send_multi_req : multi-inval 6866 // r_xram_rsp_to_cc_send_brdcast_req : broadcast-inval 6867 // - r_write_to_cc_send_multi_req : multi-update 6868 // r_write_to_cc_send_brdcast_req : broadcast-inval 6869 // - r_cas_to_cc_send_multi_req : multi-update 6870 // r_cas_to_cc_send_brdcast_req : broadcast-inval 6871 // - r_config_to_cc_send_multi_req : multi-inval 6872 // r_config_to_cc_send_brdcast_req : broadcast-inval 6873 // 6874 // An inval request is a double DSPIN flit command containing: 6875 // 1. the index of the line to be invalidated. 6876 // 6877 // An update request is a multi-flit DSPIN command containing: 6878 // 1. the index of the cache line to be updated. 6879 // 2. the index of the first modified word in the line. 6880 // 3. the data to update 6881 /////////////////////////////////////////////////////////////////////////////// 6882 6883 switch(r_cc_send_fsm.read()) 6884 { 6885 ///////////////////////// 6886 case CC_SEND_CONFIG_IDLE: // XRAM_RSP FSM has highest priority 6887 { 6888 // XRAM_RSP 6889 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 6890 r_xram_rsp_to_cc_send_multi_req.read()) 6891 { 6892 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 6893 break; 6894 } 6895 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 6896 { 6897 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 6898 break; 6899 } 6900 // CAS 6901 if(m_cas_to_cc_send_inst_fifo.rok() or 6902 r_cas_to_cc_send_multi_req.read()) 6903 { 6904 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6905 break; 6906 } 6907 if(r_cas_to_cc_send_brdcast_req.read()) 6908 { 6909 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 6910 break; 6911 } 6912 6913 if(r_read_to_cc_send_req.read()) 6914 { 6915 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 6916 break; 6917 } 6918 6919 if(r_write_to_cc_send_req.read()) 6920 { 6921 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 6922 break; 6923 } 6924 6925 6926 // WRITE 6927 if(r_read_to_cc_send_req.read()) 6928 { 6929 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 6930 break; 6931 } 6932 6933 if(r_write_to_cc_send_req.read()) 6934 { 6935 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 6936 break; 6937 } 6938 if(m_write_to_cc_send_inst_fifo.rok() or 6939 r_write_to_cc_send_multi_req.read()) 6940 { 6941 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 6942 break; 6943 } 6944 if(r_write_to_cc_send_brdcast_req.read()) 6945 { 6946 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 6947 break; 6948 } 6949 // CONFIG 6950 if(r_config_to_cc_send_multi_req.read()) 6951 { 6952 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 6953 break; 6954 } 6955 if(r_config_to_cc_send_brdcast_req.read()) 6956 { 6957 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 6958 break; 6959 } 6960 break; 6961 } 6962 //////////////////////// 6963 case CC_SEND_WRITE_IDLE: // CONFIG FSM has highest priority 6964 { 6965 // CONFIG 6966 if(r_config_to_cc_send_multi_req.read()) 6967 { 6968 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 6969 break; 6970 } 6971 if(r_config_to_cc_send_brdcast_req.read()) 6972 { 6973 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 6974 break; 6975 } 6976 // XRAM_RSP 6977 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 6978 r_xram_rsp_to_cc_send_multi_req.read()) 6979 { 6980 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 6981 break; 6982 } 6983 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 6984 { 6985 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 6986 break; 6987 } 6988 // CAS 6989 if(m_cas_to_cc_send_inst_fifo.rok() or 6990 r_cas_to_cc_send_multi_req.read()) 6991 { 6992 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6993 break; 6994 } 6995 if(r_cas_to_cc_send_brdcast_req.read()) 6996 { 6997 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 6998 break; 6999 } 7000 // WRITE 7001 if(r_read_to_cc_send_req.read()) 7002 { 7003 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 7004 break; 7005 } 7006 7007 if(r_write_to_cc_send_req.read()) 7008 { 7009 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 7010 break; 7011 } 7012 if(m_write_to_cc_send_inst_fifo.rok() or 7013 r_write_to_cc_send_multi_req.read()) 7014 { 7015 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 7016 break; 7017 } 7018 if(r_write_to_cc_send_brdcast_req.read()) 7019 { 7020 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 7021 break; 7022 } 7023 break; 7024 } 7025 /////////////////////////// 7026 case CC_SEND_XRAM_RSP_IDLE: // CAS FSM has highest priority 7027 { 7028 // CAS 7029 if(m_cas_to_cc_send_inst_fifo.rok() or 7030 r_cas_to_cc_send_multi_req.read()) 7031 { 7032 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 7033 break; 7034 } 7035 if(r_cas_to_cc_send_brdcast_req.read()) 7036 { 7037 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 7038 break; 7039 } 7040 7041 if(r_read_to_cc_send_req.read()) 7042 { 7043 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 7044 break; 7045 } 7046 7047 if(r_write_to_cc_send_req.read()) 7048 { 7049 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 7050 break; 7051 } 7052 7053 7054 // WRITE 7055 if(r_read_to_cc_send_req.read()) 7056 { 7057 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 7058 break; 7059 } 7060 7061 if(r_write_to_cc_send_req.read()) 7062 { 7063 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 7064 break; 7065 } 7066 if(m_write_to_cc_send_inst_fifo.rok() or 7067 r_write_to_cc_send_multi_req.read()) 7068 { 7069 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 7070 break; 7071 } 7072 7073 if(r_write_to_cc_send_brdcast_req.read()) 7074 { 7075 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 7076 break; 7077 } 7078 // CONFIG 7079 if(r_config_to_cc_send_multi_req.read()) 7080 { 7081 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 7082 break; 7083 } 7084 if(r_config_to_cc_send_brdcast_req.read()) 7085 { 7086 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 7087 break; 7088 } 7089 // XRAM_RSP 7090 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 7091 r_xram_rsp_to_cc_send_multi_req.read()) 7092 { 7093 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 7094 break; 7095 } 7096 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 7097 { 7098 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 7099 break; 7100 } 7101 break; 7102 } 7103 ////////////////////// 7104 case CC_SEND_CAS_IDLE: // CLEANUP FSM has highest priority 7105 { 7106 7107 if(r_read_to_cc_send_req.read()) 7108 { 7109 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 7110 break; 7111 } 7112 7113 if(r_write_to_cc_send_req.read()) 7114 { 7115 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 7116 break; 7117 } 7118 7119 7120 if(m_write_to_cc_send_inst_fifo.rok() or 7121 r_write_to_cc_send_multi_req.read()) 7122 { 7123 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 7124 break; 7125 } 7126 if(r_write_to_cc_send_brdcast_req.read()) 7127 { 7128 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 7129 break; 7130 } 7131 // CONFIG 7132 if(r_config_to_cc_send_multi_req.read()) 7133 { 7134 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 7135 break; 7136 } 7137 if(r_config_to_cc_send_brdcast_req.read()) 7138 { 7139 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 7140 break; 7141 } 7142 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 7143 r_xram_rsp_to_cc_send_multi_req.read()) 7144 { 7145 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 7146 break; 7147 } 7148 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 7149 { 7150 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 7151 break; 7152 } 7153 if(m_cas_to_cc_send_inst_fifo.rok() or 7154 r_cas_to_cc_send_multi_req.read()) 7155 { 7156 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 7157 break; 7158 } 7159 if(r_cas_to_cc_send_brdcast_req.read()) 7160 { 7161 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 7162 break; 7163 } 7164 break; 7165 } 7166 ///////////////////////////////// 7167 case CC_SEND_CONFIG_INVAL_HEADER: // send first flit multi-inval (from CONFIG FSM) 7168 { 7169 if(m_config_to_cc_send_inst_fifo.rok()) 7170 { 7171 if(not p_dspin_m2p.read) break; 7172 // <Activity Counters> 7173 if (is_local_req(m_config_to_cc_send_srcid_fifo.read())) 7174 { 7175 m_cpt_m_inval_local++; 7176 } 7177 else 7178 { 7179 m_cpt_m_inval_remote++; 7180 m_cpt_m_inval_cost += req_distance(m_config_to_cc_send_srcid_fifo.read()); 7181 } 7182 // </Activity Counters> 7183 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_NLINE; 7184 break; 7185 } 7186 if(r_config_to_cc_send_multi_req.read()) r_config_to_cc_send_multi_req = false; 7187 // <Activity Counters> 7188 m_cpt_m_inval++; 7189 // </Activity Counters> 7190 r_cc_send_fsm = CC_SEND_CONFIG_IDLE; 7191 break; 7192 } 7193 //////////////////////////////// 7194 case CC_SEND_CONFIG_INVAL_NLINE: // send second flit multi-inval (from CONFIG FSM) 7195 { 7196 if(not p_dspin_m2p.read) break; 7197 config_to_cc_send_fifo_get = true; 7198 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 7199 7200 #if DEBUG_MEMC_CC_SEND 7201 if(m_debug) 7202 std::cout << " <MEMC " << name() 7203 << " CC_SEND_CONFIG_INVAL_NLINE> multi-inval for line " 7204 << std::hex << r_config_to_cc_send_nline.read() << std::endl; 7205 #endif 7206 break; 7207 } 7208 /////////////////////////////////// 7209 case CC_SEND_CONFIG_BRDCAST_HEADER: // send first flit BC-inval (from CONFIG FSM) 7210 { 7211 if(not p_dspin_m2p.read) break; 7212 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_NLINE; 7213 break; 7214 } 7215 ////////////////////////////////// 7216 case CC_SEND_CONFIG_BRDCAST_NLINE: // send second flit BC-inval (from CONFIG FSM) 7217 { 7218 if(not p_dspin_m2p.read) break; 7219 // <Activity Counters> 7220 m_cpt_br_inval++; 7221 // </Activity Counters> 7222 r_config_to_cc_send_brdcast_req = false; 7223 r_cc_send_fsm = CC_SEND_CONFIG_IDLE; 7224 7225 #if DEBUG_MEMC_CC_SEND 7226 if(m_debug) 7227 std::cout << " <MEMC " << name() 7228 << " CC_SEND_CONFIG_BRDCAST_NLINE> BC-Inval for line " 7229 << std::hex << r_config_to_cc_send_nline.read() << std::endl; 7230 #endif 7231 break; 7232 } 7233 /////////////////////////////////// 7234 case CC_SEND_XRAM_RSP_INVAL_HEADER: // send first flit multi-inval (from XRAM_RSP FSM) 7235 { 7236 if(m_xram_rsp_to_cc_send_inst_fifo.rok()) 7237 { 7238 if(not p_dspin_m2p.read) break; 7239 // <Activity Counters> 7240 if (is_local_req(m_xram_rsp_to_cc_send_srcid_fifo.read())) 7241 { 7242 m_cpt_m_inval_local++; 7243 } 7244 else 7245 { 7246 m_cpt_m_inval_remote++; 7247 m_cpt_m_inval_cost += req_distance(m_xram_rsp_to_cc_send_srcid_fifo.read()); 7248 } 7249 // </Activity Counters> 7250 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_NLINE; 7251 break; 7252 } 7253 if(r_xram_rsp_to_cc_send_multi_req.read()) r_xram_rsp_to_cc_send_multi_req = false; 7254 // <Activity Counters> 7255 m_cpt_m_inval++; 7256 // </Activity Counters> 7257 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 7258 break; 7259 } 7260 ////////////////////////////////// 7261 case CC_SEND_XRAM_RSP_INVAL_NLINE: // send second flit multi-inval (from XRAM_RSP FSM) 7262 { 7263 if(not p_dspin_m2p.read) break; 7264 xram_rsp_to_cc_send_fifo_get = true; 7265 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 7266 7267 #if DEBUG_MEMC_CC_SEND 7268 if(m_debug) 7269 std::cout << " <MEMC " << name() 7270 << " CC_SEND_XRAM_RSP_INVAL_NLINE> Multicast-Inval for line " 7271 << std::hex << r_xram_rsp_to_cc_send_nline.read() << std::endl; 7272 #endif 7273 break; 7274 } 7275 ///////////////////////////////////// 7276 case CC_SEND_XRAM_RSP_BRDCAST_HEADER: // send first flit broadcast-inval (from XRAM_RSP FSM) 7277 { 7278 if(not p_dspin_m2p.read) break; 7279 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_NLINE; 7280 break; 7281 } 7282 //////////////////////////////////// 7283 case CC_SEND_XRAM_RSP_BRDCAST_NLINE: // send second flit broadcast-inval (from XRAM_RSP FSM) 7284 { 7285 if(not p_dspin_m2p.read) break; 7286 // <Activity Counters> 7287 m_cpt_br_inval++; 7288 // </Activity Counters> 7289 r_xram_rsp_to_cc_send_brdcast_req = false; 7290 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 7291 7292 #if DEBUG_MEMC_CC_SEND 7293 if(m_debug) 7294 std::cout << " <MEMC " << name() 7295 << " CC_SEND_XRAM_RSP_BRDCAST_NLINE> BC-Inval for line " 7296 << std::hex << r_xram_rsp_to_cc_send_nline.read() << std::endl; 7297 #endif 7298 break; 7299 } 7300 7301 case CC_SEND_READ_NCC_INVAL_HEADER: 7302 { 7303 if(not p_dspin_m2p.read) break; 7304 7305 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_NLINE; 7306 break; 7307 } 7308 7309 case CC_SEND_READ_NCC_INVAL_NLINE: 7310 { 7311 if(not p_dspin_m2p.read) break; 7312 7313 r_read_to_cc_send_req = false; 7314 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 7315 7316 #if DEBUG_MEMC_CC_SEND 7317 if(m_debug) 7318 { 7319 std::cout 7320 << " <MEMC " << name() 7321 << " CC_SEND_READ_NCC_INVAL_HEADER> Inval for line " 7322 << r_read_to_cc_send_nline.read() 7323 << std::endl; 7324 } 7325 #endif 7326 break; 7327 } 7328 7329 7330 case CC_SEND_WRITE_NCC_INVAL_HEADER: 7331 { 7332 if(not p_dspin_m2p.read) break; 7333 7334 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_NLINE; 7335 break; 7336 } 7337 7338 case CC_SEND_WRITE_NCC_INVAL_NLINE: 7339 { 7340 if(not p_dspin_m2p.read) break; 7341 7342 r_write_to_cc_send_req = false; 7343 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 7344 7345 #if DEBUG_MEMC_CC_SEND 7346 if(m_debug) 7347 { 7348 std::cout 7349 << " <MEMC " << name() 7350 << " CC_SEND_WRITE_NCC_INVAL_HEADER> Inval for line " 7351 << r_write_to_cc_send_nline.read() 7352 << std::endl; 7353 } 7354 #endif 7355 break; 7356 } 7357 7358 7359 ////////////////////////////////// 7360 case CC_SEND_WRITE_BRDCAST_HEADER: // send first flit broadcast-inval (from WRITE FSM) 7361 { 7362 if(not p_dspin_m2p.read) break; 7363 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_NLINE; 7364 break; 7365 } 7366 ///////////////////////////////// 7367 case CC_SEND_WRITE_BRDCAST_NLINE: // send second flit broadcast-inval (from WRITE FSM) 7368 { 7369 if(not p_dspin_m2p.read) break; 7370 7371 // <Activity Counters> 7372 m_cpt_br_inval++; 7373 // </Activity Counters> 7374 7375 r_write_to_cc_send_brdcast_req = false; 7376 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 7377 7378 #if DEBUG_MEMC_CC_SEND 7379 if(m_debug) 7380 std::cout << " <MEMC " << name() 7381 << " CC_SEND_WRITE_BRDCAST_NLINE> BC-Inval for line " 7382 << std::hex << r_write_to_cc_send_nline.read() << std::endl; 7383 #endif 7384 break; 7385 } 7386 /////////////////////////////// 7387 case CC_SEND_WRITE_UPDT_HEADER: // send first flit for a multi-update (from WRITE FSM) 7388 { 7389 if(m_write_to_cc_send_inst_fifo.rok()) 7390 { 7391 if(not p_dspin_m2p.read) break; 7392 // <Activity Counters> 7393 if (is_local_req(m_write_to_cc_send_srcid_fifo.read())) 7394 { 7395 m_cpt_update_local++; 7396 } 7397 else 7398 { 7399 m_cpt_update_remote++; 7400 m_cpt_update_cost += req_distance(m_write_to_cc_send_srcid_fifo.read()); 7401 } 7402 // </Activity Counters> 7403 7404 r_cc_send_fsm = CC_SEND_WRITE_UPDT_NLINE; 7405 break; 7406 } 7407 7408 if(r_write_to_cc_send_multi_req.read()) 7409 { 7410 r_write_to_cc_send_multi_req = false; 7411 } 7412 7413 // <Activity Counters> 7414 m_cpt_update++; 7415 // </Activity Counters> 7416 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 7417 break; 7418 } 7419 ////////////////////////////// 7420 case CC_SEND_WRITE_UPDT_NLINE: // send second flit for a multi-update (from WRITE FSM) 7421 { 7422 if(not p_dspin_m2p.read) break; 7423 7424 r_cc_send_cpt = 0; 7425 r_cc_send_fsm = CC_SEND_WRITE_UPDT_DATA; 7426 7427 #if DEBUG_MEMC_CC_SEND 7428 if(m_debug) 7429 std::cout << " <MEMC " << name() 7430 << " CC_SEND_WRITE_UPDT_NLINE> Multicast-Update for line " 7431 << r_write_to_cc_send_nline.read() << std::endl; 7432 #endif 7433 break; 7434 } 7435 ///////////////////////////// 7436 case CC_SEND_WRITE_UPDT_DATA: // send N data flits for a multi-update (from WRITE FSM) 7437 { 7438 if(not p_dspin_m2p.read) break; 7439 if(r_cc_send_cpt.read() == r_write_to_cc_send_count.read()) 7440 { 7441 write_to_cc_send_fifo_get = true; 7442 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 7443 break; 7444 } 7445 7446 r_cc_send_cpt = r_cc_send_cpt.read() + 1; 7447 break; 7448 } 7449 //////////////////////////////// 7450 case CC_SEND_CAS_BRDCAST_HEADER: // send first flit broadcast-inval (from CAS FSM) 7451 { 7452 if(not p_dspin_m2p.read) break; 7453 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_NLINE; 7454 break; 7455 } 7456 /////////////////////////////// 7457 case CC_SEND_CAS_BRDCAST_NLINE: // send second flit broadcast-inval (from CAS FSM) 7458 { 7459 if(not p_dspin_m2p.read) break; 7460 // <Activity Counters> 7461 m_cpt_br_inval++; 7462 // </Activity Counters> 7463 7464 r_cas_to_cc_send_brdcast_req = false; 7465 r_cc_send_fsm = CC_SEND_CAS_IDLE; 7466 7467 #if DEBUG_MEMC_CC_SEND 7468 if(m_debug) 7469 std::cout << " <MEMC " << name() 7470 << " CC_SEND_CAS_BRDCAST_NLINE> Broadcast-Inval for line " 7471 << r_cas_to_cc_send_nline.read() << std::endl; 7472 #endif 7473 break; 7474 } 7475 ///////////////////////////// 7476 case CC_SEND_CAS_UPDT_HEADER: // send first flit for a multi-update (from CAS FSM) 7477 { 7478 if(m_cas_to_cc_send_inst_fifo.rok()) 7479 { 7480 if(not p_dspin_m2p.read) break; 7481 // <Activity Counters> 7482 if (is_local_req(m_cas_to_cc_send_srcid_fifo.read())) 7483 { 7484 m_cpt_update_local++; 7485 } 7486 else 7487 { 7488 m_cpt_update_remote++; 7489 m_cpt_update_cost += req_distance(m_cas_to_cc_send_srcid_fifo.read()); 7490 } 7491 // </Activity Counters> 7492 r_cc_send_fsm = CC_SEND_CAS_UPDT_NLINE; 7493 break; 7494 } 7495 7496 // no more packets to send for the multi-update 7497 if(r_cas_to_cc_send_multi_req.read()) 7498 { 7499 r_cas_to_cc_send_multi_req = false; 7500 } 7501 7502 // <Activity Counters> 7503 m_cpt_update++; 7504 // </Activity Counters> 7505 r_cc_send_fsm = CC_SEND_CAS_IDLE; 7506 break; 7507 } 7508 //////////////////////////// 7509 case CC_SEND_CAS_UPDT_NLINE: // send second flit for a multi-update (from CAS FSM) 7510 { 7511 if(not p_dspin_m2p.read) break; 7512 r_cc_send_cpt = 0; 7513 r_cc_send_fsm = CC_SEND_CAS_UPDT_DATA; 7514 7515 #if DEBUG_MEMC_CC_SEND 7516 if(m_debug) 7517 std::cout << " <MEMC " << name() 7518 << " CC_SEND_CAS_UPDT_NLINE> Multicast-Update for line " 7519 << r_cas_to_cc_send_nline.read() << std::endl; 7520 #endif 7521 break; 7522 } 7523 /////////////////////////// 7524 case CC_SEND_CAS_UPDT_DATA: // send first data for a multi-update (from CAS FSM) 7525 { 7526 if(not p_dspin_m2p.read) break; 7527 7528 if(r_cas_to_cc_send_is_long.read()) 7529 { 7530 r_cc_send_fsm = CC_SEND_CAS_UPDT_DATA_HIGH; 7531 break; 7532 } 7533 7534 cas_to_cc_send_fifo_get = true; 7535 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 7536 break; 7537 } 7538 //////////////////////////////// 7539 case CC_SEND_CAS_UPDT_DATA_HIGH: // send second data for a multi-update (from CAS FSM) 7540 { 7541 if(not p_dspin_m2p.read) break; 7542 cas_to_cc_send_fifo_get = true; 7543 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 7544 break; 7545 } 7546 } 7547 // end switch r_cc_send_fsm 7548 7549 ////////////////////////////////////////////////////////////////////////////// 7550 // CC_RECEIVE FSM 7551 ////////////////////////////////////////////////////////////////////////////// 7552 // The CC_RECEIVE fsm controls the DSPIN target port on the coherence 7553 // network. 7554 ////////////////////////////////////////////////////////////////////////////// 7555 7556 switch(r_cc_receive_fsm.read()) 7557 { 7558 ///////////////////// 7559 case CC_RECEIVE_IDLE: 7560 { 7561 if(not p_dspin_p2m.write) break; 7562 7563 uint8_t type = 7564 DspinDhccpParam::dspin_get( 7565 p_dspin_p2m.data.read(), 7566 DspinDhccpParam::P2M_TYPE); 7567 7568 if((type == DspinDhccpParam::TYPE_CLEANUP_DATA) or 7569 (type == DspinDhccpParam::TYPE_CLEANUP_INST)) 7570 { 7571 r_cc_receive_fsm = CC_RECEIVE_CLEANUP; 7572 break; 7573 } 7574 7575 if(type == DspinDhccpParam::TYPE_MULTI_ACK) 7576 { 7577 r_cc_receive_fsm = CC_RECEIVE_MULTI_ACK; 7578 break; 7579 } 7580 7581 assert(false and 7582 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 7583 "Illegal type in coherence request"); 7584 7585 break; 7586 } 7587 //////////////////////// 7588 case CC_RECEIVE_CLEANUP: 7589 { 7590 // write first CLEANUP flit in CC_RECEIVE to CLEANUP fifo 7591 7592 if(not p_dspin_p2m.write or not m_cc_receive_to_cleanup_fifo.wok()) 7593 break; 7594 7595 cc_receive_to_cleanup_fifo_put = true; 7596 if(p_dspin_p2m.eop.read()) 7597 r_cc_receive_fsm = CC_RECEIVE_IDLE; 7598 7599 break; 7600 } 7601 //////////////////////////// 7602 case CC_RECEIVE_CLEANUP_EOP: 7603 { 7604 // write second CLEANUP flit in CC_RECEIVE to CLEANUP fifo 7605 7606 if(not p_dspin_p2m.write or not m_cc_receive_to_cleanup_fifo.wok()) 7607 break; 7608 7609 assert(p_dspin_p2m.eop.read() and 7610 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 7611 "CLEANUP command must have two flits"); 7612 7613 cc_receive_to_cleanup_fifo_put = true; 7614 if(p_dspin_p2m.eop.read()) 7615 r_cc_receive_fsm = CC_RECEIVE_IDLE; 7616 break; 7617 } 7618 7619 ////////////////////////// 7620 case CC_RECEIVE_MULTI_ACK: 7621 { 7622 // write MULTI_ACK flit in CC_RECEIVE to MULTI_ACK fifo 7623 7624 // wait for a WOK in the CC_RECEIVE to MULTI_ACK fifo 7625 if(not p_dspin_p2m.write or not m_cc_receive_to_multi_ack_fifo.wok()) 7626 break; 7627 7628 assert(p_dspin_p2m.eop.read() and 7629 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 7630 "MULTI_ACK command must have one flit"); 7631 7632 cc_receive_to_multi_ack_fifo_put = true; 7633 r_cc_receive_fsm = CC_RECEIVE_IDLE; 7634 break; 7635 } 7636 } 7637 ////////////////////////////////////////////////////////////////////////// 7638 // TGT_RSP FSM 7639 ////////////////////////////////////////////////////////////////////////// 7640 // The TGT_RSP fsm sends the responses on the VCI target port 7641 // with a round robin priority between eigth requests : 7642 // - r_config_to_tgt_rsp_req 7643 // - r_tgt_cmd_to_tgt_rsp_req 7644 // - r_read_to_tgt_rsp_req 7645 // - r_write_to_tgt_rsp_req 7646 // - r_cas_to_tgt_rsp_req 7647 // - r_cleanup_to_tgt_rsp_req 7648 // - r_xram_rsp_to_tgt_rsp_req 7649 // - r_multi_ack_to_tgt_rsp_req 7650 // 7651 // The ordering is : 7652 // config >tgt_cmd > read > write > cas > xram > multi_ack > cleanup 7653 ////////////////////////////////////////////////////////////////////////// 7654 7655 switch(r_tgt_rsp_fsm.read()) 7656 { 7657 ///////////////////////// 7658 case TGT_RSP_CONFIG_IDLE: // tgt_cmd requests have the highest priority 7659 { 7660 if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7661 else if(r_read_to_tgt_rsp_req) 7662 { 7663 r_tgt_rsp_fsm = TGT_RSP_READ; 7664 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7665 } 7666 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7667 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 7668 else if(r_xram_rsp_to_tgt_rsp_req) 7669 { 7670 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7671 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7672 } 7673 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 7674 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7675 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7676 break; 7677 } 7678 ////////////////////////// 7679 case TGT_RSP_TGT_CMD_IDLE: // read requests have the highest priority 7680 { 7681 if(r_read_to_tgt_rsp_req) 7682 { 7683 r_tgt_rsp_fsm = TGT_RSP_READ; 7684 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7685 } 7686 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7687 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 7688 else if(r_xram_rsp_to_tgt_rsp_req) 7689 { 7690 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7691 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7692 } 7693 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 7694 else if(r_cleanup_to_tgt_rsp_req) 7695 { 7696 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7697 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 7698 } 7699 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7700 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7701 break; 7702 } 7703 /////////////////////// 7704 case TGT_RSP_READ_IDLE: // write requests have the highest priority 7705 { 7706 if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7707 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 7708 else if(r_xram_rsp_to_tgt_rsp_req) 7709 { 7710 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7711 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7712 } 7713 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 7714 else if(r_cleanup_to_tgt_rsp_req) 7715 { 7716 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7717 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 7718 } 7719 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7720 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7721 else if(r_read_to_tgt_rsp_req) 7722 { 7723 r_tgt_rsp_fsm = TGT_RSP_READ; 7724 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7725 } 7726 break; 7727 } 7728 //////////////////////// 7729 case TGT_RSP_WRITE_IDLE: // cas requests have the highest priority 7730 { 7731 if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 7732 else if(r_xram_rsp_to_tgt_rsp_req) 7733 { 7734 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7735 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7736 } 7737 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 7738 else if(r_cleanup_to_tgt_rsp_req) 7739 { 7740 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7741 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 7742 } 7743 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7744 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7745 else if(r_read_to_tgt_rsp_req) 7746 { 7747 r_tgt_rsp_fsm = TGT_RSP_READ; 7748 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7749 } 7750 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7751 break; 7752 } 7753 /////////////////////// 7754 case TGT_RSP_CAS_IDLE: // xram_rsp requests have the highest priority 7755 { 7756 if(r_xram_rsp_to_tgt_rsp_req) 7757 { 7758 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7759 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7760 } 7761 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK ; 7762 else if(r_cleanup_to_tgt_rsp_req) 7763 { 7764 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7765 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 7766 } 7767 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7768 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7769 else if(r_read_to_tgt_rsp_req) 7770 { 7771 r_tgt_rsp_fsm = TGT_RSP_READ; 7772 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7773 } 7774 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7775 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 7776 break; 7777 } 7778 /////////////////////// 7779 case TGT_RSP_XRAM_IDLE: // multi ack requests have the highest priority 7780 { 7781 7782 if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK ; 7783 else if(r_cleanup_to_tgt_rsp_req) 7784 { 7785 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7786 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 7787 } 7788 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7789 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7790 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7791 else if(r_read_to_tgt_rsp_req) 7792 { 7793 r_tgt_rsp_fsm = TGT_RSP_READ; 7794 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7795 } 7796 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7797 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 7798 else if(r_xram_rsp_to_tgt_rsp_req) 7799 { 7800 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7801 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7802 } 7803 break; 7804 } 7805 //////////////////////////// 7806 case TGT_RSP_MULTI_ACK_IDLE: // cleanup requests have the highest priority 7807 { 7808 if(r_cleanup_to_tgt_rsp_req) 7809 { 7810 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7811 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 7812 } 7813 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7814 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7815 else if(r_read_to_tgt_rsp_req) 7816 { 7817 r_tgt_rsp_fsm = TGT_RSP_READ; 7818 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7819 } 7820 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7821 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 7822 else if(r_xram_rsp_to_tgt_rsp_req) 7823 { 7824 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7825 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7826 } 7827 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 7828 break; 7829 } 7830 ////////////////////////// 7831 case TGT_RSP_CLEANUP_IDLE: // tgt cmd requests have the highest priority 7832 { 7833 if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7834 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7835 else if(r_read_to_tgt_rsp_req) 7836 { 7837 r_tgt_rsp_fsm = TGT_RSP_READ; 7838 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7839 } 7840 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7841 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 7842 else if(r_xram_rsp_to_tgt_rsp_req) 7843 { 7844 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7845 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7846 } 7847 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK ; 7848 else if(r_cleanup_to_tgt_rsp_req) 7849 { 7850 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7851 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 7852 } 7853 break; 7854 } 7855 //////////////////// 7856 case TGT_RSP_CONFIG: // send the response for a config transaction 7857 { 7858 if ( p_vci_tgt.rspack ) 7859 { 7860 r_config_to_tgt_rsp_req = false; 7861 r_tgt_rsp_fsm = TGT_RSP_CONFIG_IDLE; 7862 7863 #if DEBUG_MEMC_TGT_RSP 7864 if( m_debug ) 7865 { 7866 std::cout 7867 << " <MEMC " << name() 7868 << " TGT_RSP_CONFIG> Config transaction completed response" 7869 << " / rsrcid = " << std::hex << r_config_to_tgt_rsp_srcid.read() 7870 << " / rtrdid = " << r_config_to_tgt_rsp_trdid.read() 7871 << " / rpktid = " << r_config_to_tgt_rsp_pktid.read() 7872 << std::endl; 7873 } 7874 #endif 7875 } 7876 break; 7877 } 7878 ///////////////////// 7879 case TGT_RSP_TGT_CMD: // send the response for a configuration access 7880 { 7881 if ( p_vci_tgt.rspack ) 7882 { 7883 r_tgt_cmd_to_tgt_rsp_req = false; 7884 r_tgt_rsp_fsm = TGT_RSP_TGT_CMD_IDLE; 7885 7886 #if DEBUG_MEMC_TGT_RSP 7887 if( m_debug ) 7888 { 7889 std::cout 7890 << " <MEMC " << name() 7891 << " TGT_RSP_TGT_CMD> Send response for a configuration access" 7892 << " / rsrcid = " << std::hex << r_tgt_cmd_to_tgt_rsp_srcid.read() 7893 << " / rtrdid = " << r_tgt_cmd_to_tgt_rsp_trdid.read() 7894 << " / rpktid = " << r_tgt_cmd_to_tgt_rsp_pktid.read() 7895 << " / error = " << r_tgt_cmd_to_tgt_rsp_error.read() 7896 << std::endl; 7897 } 7898 #endif 7899 } 7900 break; 7901 } 7902 ////////////////// 7903 case TGT_RSP_READ: // send the response to a read 7904 { 7905 if ( p_vci_tgt.rspack ) 7906 { 7907 7908 #if DEBUG_MEMC_TGT_RSP 7909 if( m_debug ) 7910 { 7911 std::cout 7912 << " <MEMC " << name() << " TGT_RSP_READ> Read response" 7913 << " / rsrcid = " << std::hex << r_read_to_tgt_rsp_srcid.read() 7914 << " / rtrdid = " << r_read_to_tgt_rsp_trdid.read() 7915 << " / rpktid = " << r_read_to_tgt_rsp_pktid.read() 7916 << " / rdata = " << r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() 7917 << " / cpt = " << std::dec << r_tgt_rsp_cpt.read() << std::endl; 7918 } 7919 #endif 7920 7921 7922 uint32_t last_word_idx = r_read_to_tgt_rsp_word.read() + 7923 r_read_to_tgt_rsp_length.read() - 1; 7924 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 7925 bool is_ll = ((r_read_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 7926 7927 if ((is_last_word and not is_ll) or 7928 (r_tgt_rsp_key_sent.read() and is_ll)) 7929 { 7930 // Last word in case of READ or second flit in case if LL 7931 r_tgt_rsp_key_sent = false; 7932 r_read_to_tgt_rsp_req = false; 7933 r_tgt_rsp_fsm = TGT_RSP_READ_IDLE; 7934 if (r_read_to_tgt_rsp_pktid.read() == 0x0) 7935 { 7936 m_cpt_read_data_unc ++; 7937 } 7938 else if (r_read_to_tgt_rsp_pktid.read() == 0x1) 7939 { 7940 m_cpt_read_data_miss_CC ++; 7941 } 7942 else if (r_read_to_tgt_rsp_pktid.read() == 0x2) 7943 { 7944 m_cpt_read_ins_unc ++; 7945 } 7946 else if (r_read_to_tgt_rsp_pktid.read() == 0x3) 7947 { 7948 m_cpt_read_ins_miss ++; 7949 } 7950 else if (r_read_to_tgt_rsp_pktid.read() == 0x6) 7951 { 7952 m_cpt_read_ll_CC ++; 7953 } 7954 else if (r_read_to_tgt_rsp_pktid.read() == 0x9) 7955 { 7956 m_cpt_read_data_miss_NCC ++; 7957 } 7958 else if (r_read_to_tgt_rsp_pktid.read() == 0x14) 7959 { 7960 m_cpt_read_ll_NCC ++; 7961 } 7962 else 7963 { 7964 m_cpt_read_WTF ++; 7965 } 7966 } 7967 else 7968 { 7969 if (is_ll) 7970 { 7971 r_tgt_rsp_key_sent = true; // Send second flit of ll 7972 } 7973 else 7974 { 7975 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; // Send next word of read 7976 } 7977 } 7978 } 7979 break; 7980 } 7981 ////////////////// 7982 case TGT_RSP_WRITE: // send the write acknowledge 7983 { 7984 if(p_vci_tgt.rspack) 7985 { 7986 7987 #if DEBUG_MEMC_TGT_RSP 7988 if(m_debug) 7989 std::cout << " <MEMC " << name() << " TGT_RSP_WRITE> Write response" 7990 << " / rsrcid = " << std::hex << r_write_to_tgt_rsp_srcid.read() 7991 << " / rtrdid = " << r_write_to_tgt_rsp_trdid.read() 7992 << " / rpktid = " << r_write_to_tgt_rsp_pktid.read() << std::endl; 7993 #endif 7994 r_tgt_rsp_fsm = TGT_RSP_WRITE_IDLE; 7995 r_write_to_tgt_rsp_req = false; 7996 } 7997 break; 7998 } 7999 ///////////////////// 8000 case TGT_RSP_CLEANUP: // pas clair pour moi (AG) 8001 { 8002 if(p_vci_tgt.rspack) 8003 { 8004 8005 #if DEBUG_MEMC_TGT_RSP 8006 if(m_debug) 8007 { 8008 std::cout << " <MEMC " << name() << " TGT_RSP_CLEANUP> Cleanup response" 8009 << " / rsrcid = " << std::dec << r_cleanup_to_tgt_rsp_srcid.read() 8010 << " / rtrdid = " << r_cleanup_to_tgt_rsp_trdid.read() 8011 << " / rpktid = " << r_cleanup_to_tgt_rsp_pktid.read() << std::endl 8012 << " / data = " << std::hex << r_cleanup_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() << std::dec << std::endl; 8013 } 8014 #endif 8015 8016 uint32_t last_word_idx = r_cleanup_to_tgt_rsp_first_word.read() + r_cleanup_to_tgt_rsp_length.read() - 1; 8017 bool is_ll = ((r_cleanup_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 8018 8019 if (r_cleanup_to_tgt_rsp_type.read() or ((r_tgt_rsp_cpt.read() == last_word_idx) and not is_ll) or (r_tgt_rsp_key_sent.read() and is_ll) ) 8020 { 8021 r_tgt_rsp_fsm = TGT_RSP_CLEANUP_IDLE; 8022 r_cleanup_to_tgt_rsp_req = false; 8023 r_tgt_rsp_key_sent = false; 8024 8025 8026 if (r_cleanup_to_tgt_rsp_pktid.read() == 0x0) 8027 { 8028 m_cpt_read_data_unc ++; 8029 } 8030 else if (r_cleanup_to_tgt_rsp_pktid.read() == 0x1) 8031 { 8032 m_cpt_read_data_miss_CC ++; 8033 } 8034 else if (r_cleanup_to_tgt_rsp_pktid.read() == 0x2) 8035 { 8036 m_cpt_read_ins_unc ++; 8037 } 8038 else if (r_cleanup_to_tgt_rsp_pktid.read() == 0x3) 8039 { 8040 m_cpt_read_ins_miss ++; 8041 } 8042 else if (r_cleanup_to_tgt_rsp_pktid.read() == 0x6) 8043 { 8044 m_cpt_read_ll_CC ++; 8045 } 8046 else if (r_cleanup_to_tgt_rsp_pktid.read() == 0x9) 8047 { 8048 m_cpt_read_data_miss_NCC ++; 8049 } 8050 else if (r_cleanup_to_tgt_rsp_pktid.read() == 0x14) 8051 { 8052 m_cpt_read_ll_NCC ++; 8053 } 8054 else if (!r_cleanup_to_tgt_rsp_type.read()) 8055 { 8056 m_cpt_read_WTF ++; 8057 } 8058 8059 } 8060 else 8061 { 8062 if (is_ll) 8063 { 8064 r_tgt_rsp_key_sent = true; 8065 } 8066 else 8067 { 8068 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; 8069 } 8070 } 8071 } 8072 break; 8073 } 8074 ///////////////// 8075 case TGT_RSP_CAS: // send one atomic word response 8076 { 8077 if(p_vci_tgt.rspack) 8078 { 8079 8080 #if DEBUG_MEMC_TGT_RSP 8081 if(m_debug) 8082 std::cout << " <MEMC " << name() << " TGT_RSP_CAS> CAS response" 8083 << " / rsrcid = " << std::hex << r_cas_to_tgt_rsp_srcid.read() 8084 << " / rtrdid = " << r_cas_to_tgt_rsp_trdid.read() 8085 << " / rpktid = " << r_cas_to_tgt_rsp_pktid.read() << std::endl; 8086 #endif 8087 r_tgt_rsp_fsm = TGT_RSP_CAS_IDLE; 8088 r_cas_to_tgt_rsp_req = false; 8089 } 8090 break; 8091 } 8092 ////////////////// 8093 case TGT_RSP_XRAM: // send the response after XRAM access 8094 { 8095 if ( p_vci_tgt.rspack ) 8096 { 8097 8098 #if DEBUG_MEMC_TGT_RSP 8099 if( m_debug ) 8100 std::cout << " <MEMC " << name() << " TGT_RSP_XRAM> Response following XRAM access" 8101 << " / rsrcid = " << std::hex << r_xram_rsp_to_tgt_rsp_srcid.read() 8102 << " / rtrdid = " << r_xram_rsp_to_tgt_rsp_trdid.read() 8103 << " / rpktid = " << r_xram_rsp_to_tgt_rsp_pktid.read() 8104 << " / rdata = " << r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() 8105 << " / cpt = " << std::dec << r_tgt_rsp_cpt.read() << std::endl; 8106 #endif 8107 uint32_t last_word_idx = r_xram_rsp_to_tgt_rsp_word.read() + 8108 r_xram_rsp_to_tgt_rsp_length.read() - 1; 8109 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 8110 bool is_ll = ((r_xram_rsp_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 8111 bool is_error = r_xram_rsp_to_tgt_rsp_rerror.read(); 8112 8113 if (((is_last_word or is_error) and not is_ll) or 8114 (r_tgt_rsp_key_sent.read() and is_ll)) 8115 { 8116 // Last word sent in case of READ or second flit sent in case if LL 8117 r_tgt_rsp_key_sent = false; 8118 r_xram_rsp_to_tgt_rsp_req = false; 8119 r_tgt_rsp_fsm = TGT_RSP_XRAM_IDLE; 8120 8121 8122 if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x0) 8123 { 8124 m_cpt_read_data_unc ++; 8125 } 8126 else if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x1) 8127 { 8128 m_cpt_read_data_miss_CC ++; 8129 } 8130 else if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x2) 8131 { 8132 m_cpt_read_ins_unc ++; 8133 } 8134 else if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x3) 8135 { 8136 m_cpt_read_ins_miss ++; 8137 } 8138 else if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x6) 8139 { 8140 m_cpt_read_ll_CC ++; 8141 } 8142 else if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x9) 8143 { 8144 m_cpt_read_data_miss_NCC ++; 8145 } 8146 else if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x14) 8147 { 8148 m_cpt_read_ll_NCC ++; 8149 } 8150 else 8151 { 8152 m_cpt_read_WTF ++; 8153 } 8154 8155 } 8156 else 8157 { 8158 if (is_ll) 8159 { 8160 r_tgt_rsp_key_sent = true; // Send second flit of ll 8161 } 8162 else 8163 { 8164 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; // Send next word of read 8165 } 8166 } 8167 } 8168 break; 8169 } 8170 /////////////////////// 8171 case TGT_RSP_MULTI_ACK: // send the write response after coherence transaction 8172 { 8173 if(p_vci_tgt.rspack) 8174 { 8175 8176 #if DEBUG_MEMC_TGT_RSP 8177 if(m_debug) 8178 std::cout << " <MEMC " << name() << " TGT_RSP_MULTI_ACK> Write response after coherence transaction" 8179 << " / rsrcid = " << std::hex << r_multi_ack_to_tgt_rsp_srcid.read() 8180 << " / rtrdid = " << r_multi_ack_to_tgt_rsp_trdid.read() 8181 << " / rpktid = " << r_multi_ack_to_tgt_rsp_pktid.read() << std::endl; 8182 #endif 8183 r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK_IDLE; 8184 r_multi_ack_to_tgt_rsp_req = false; 8185 } 8186 break; 8187 } 8188 } // end switch tgt_rsp_fsm 8189 8190 //////////////////////////////////////////////////////////////////////////////////// 8191 // ALLOC_UPT FSM 8192 //////////////////////////////////////////////////////////////////////////////////// 8193 // The ALLOC_UPT FSM allocates the access to the Update Table (UPT), 8194 // with a round robin priority between three FSMs, with the following order: 8195 // WRITE -> CAS -> MULTI_ACK 8196 // - The WRITE FSM initiates update transaction and sets a new entry in UPT. 8197 // - The CAS FSM does the same thing as the WRITE FSM. 8198 // - The MULTI_ACK FSM complete those trasactions and erase the UPT entry. 8199 // The resource is always allocated. 8200 ///////////////////////////////////////////////////////////////////////////////////// 8201 switch(r_alloc_upt_fsm.read()) 8202 { 8203 ///////////////////////// 8204 case ALLOC_UPT_WRITE: // allocated to WRITE FSM 8205 if (r_write_fsm.read() != WRITE_UPT_LOCK) 8206 { 8207 if (r_cas_fsm.read() == CAS_UPT_LOCK) 8208 r_alloc_upt_fsm = ALLOC_UPT_CAS; 8209 8210 else if (r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK) 8211 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK; 8212 else 8213 m_cpt_upt_unused++; 8214 } 8215 break; 8216 8217 ///////////////////////// 8218 case ALLOC_UPT_CAS: // allocated to CAS FSM 8219 if (r_cas_fsm.read() != CAS_UPT_LOCK) 8220 { 8221 if (r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK) 8222 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK; 8223 8224 else if (r_write_fsm.read() == WRITE_UPT_LOCK) 8225 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 8226 8227 else 8228 m_cpt_upt_unused++; 8229 } 8230 break; 8231 8232 ///////////////////////// 8233 case ALLOC_UPT_MULTI_ACK: // allocated to MULTI_ACK FSM 8234 if ((r_multi_ack_fsm.read() != MULTI_ACK_UPT_LOCK ) and 8235 (r_multi_ack_fsm.read() != MULTI_ACK_UPT_CLEAR)) 8236 { 8237 if (r_write_fsm.read() == WRITE_UPT_LOCK) 8238 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 8239 8240 else if (r_cas_fsm.read() == CAS_UPT_LOCK) 8241 r_alloc_upt_fsm = ALLOC_UPT_CAS; 8242 else 8243 m_cpt_upt_unused++; 8244 } 8245 break; 8246 } // end switch r_alloc_upt_fsm 8247 8248 //////////////////////////////////////////////////////////////////////////////////// 8249 // ALLOC_IVT FSM 8250 //////////////////////////////////////////////////////////////////////////////////// 8251 // The ALLOC_IVT FSM allocates the access to the Invalidate Table (IVT), 8252 // with a round robin priority between five FSMs, with the following order: 8253 // WRITE -> XRAM_RSP -> CLEANUP -> CAS -> CONFIG 8254 // - The WRITE FSM initiates broadcast invalidate transactions and sets a new entry 8255 // in IVT. 8256 // - The CAS FSM does the same thing as the WRITE FSM. 8257 // - The XRAM_RSP FSM initiates broadcast/multicast invalidate transaction and sets 8258 // a new entry in the IVT 8259 // - The CONFIG FSM does the same thing as the XRAM_RSP FSM 8260 // - The CLEANUP FSM complete those trasactions and erase the IVT entry. 8261 // The resource is always allocated. 8262 ///////////////////////////////////////////////////////////////////////////////////// 8263 switch(r_alloc_ivt_fsm.read()) 8264 { 8265 ////////////////////////// 8266 case ALLOC_IVT_WRITE: // allocated to WRITE FSM 8267 if ((r_write_fsm.read() != WRITE_BC_IVT_LOCK) and 8268 (r_write_fsm.read() != WRITE_IVT_LOCK_HIT_WB) and 8269 (r_write_fsm.read() != WRITE_MISS_IVT_LOCK)) 8270 { 8271 if(r_read_fsm.read() == READ_IVT_LOCK) 8272 r_alloc_ivt_fsm = ALLOC_IVT_READ; 8273 8274 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 8275 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 8276 8277 else if ((r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) or 8278 (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK_DATA)) 8279 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 8280 8281 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 8282 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 8283 8284 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 8285 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 8286 8287 else 8288 m_cpt_ivt_unused++; 8289 } 8290 break; 8291 8292 ////////////////////////// 8293 case ALLOC_IVT_READ: // allocated to READ FSM 8294 if (r_read_fsm.read() != READ_IVT_LOCK) 8295 { 8296 if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 8297 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 8298 8299 else if ((r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) or 8300 (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK_DATA)) 8301 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 8302 8303 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 8304 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 8305 8306 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 8307 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 8308 8309 else if ((r_write_fsm.read() == WRITE_BC_IVT_LOCK) or 8310 (r_write_fsm.read() == WRITE_IVT_LOCK_HIT_WB) or 8311 (r_write_fsm.read() == WRITE_MISS_IVT_LOCK)) 8312 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 8313 else 8314 m_cpt_ivt_unused++; 8315 } 8316 break; 8317 8318 ////////////////////////// 8319 case ALLOC_IVT_XRAM_RSP: // allocated to XRAM_RSP FSM 8320 if(r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK) 8321 { 8322 if ((r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) or 8323 (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK_DATA)) 8324 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 8325 8326 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 8327 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 8328 8329 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 8330 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 8331 8332 else if ((r_write_fsm.read() == WRITE_BC_IVT_LOCK) or 8333 (r_write_fsm.read() == WRITE_IVT_LOCK_HIT_WB) or 8334 (r_write_fsm.read() == WRITE_MISS_IVT_LOCK)) 8335 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 8336 8337 else if(r_read_fsm.read() == READ_IVT_LOCK) 8338 r_alloc_ivt_fsm = ALLOC_IVT_READ; 8339 8340 else 8341 m_cpt_ivt_unused++; 8342 } 8343 break; 8344 8345 ////////////////////////// 8346 case ALLOC_IVT_CLEANUP: // allocated to CLEANUP FSM 8347 if ((r_cleanup_fsm.read() != CLEANUP_IVT_LOCK ) and 8348 (r_cleanup_fsm.read() != CLEANUP_IVT_DECREMENT) and 8349 (r_cleanup_fsm.read() != CLEANUP_IVT_LOCK_DATA)) 8350 { 8351 if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 8352 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 8353 8354 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 8355 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 8356 8357 else if ((r_write_fsm.read() == WRITE_BC_IVT_LOCK) or 8358 (r_write_fsm.read() == WRITE_IVT_LOCK_HIT_WB) or 8359 (r_write_fsm.read() == WRITE_MISS_IVT_LOCK)) 8360 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 8361 8362 else if(r_read_fsm.read() == READ_IVT_LOCK) 8363 r_alloc_ivt_fsm = ALLOC_IVT_READ; 8364 8365 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 8366 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 8367 8368 else 8369 m_cpt_ivt_unused++; 8370 } 8371 break; 8372 8373 ////////////////////////// 8374 case ALLOC_IVT_CAS: // allocated to CAS FSM 8375 if (r_cas_fsm.read() != CAS_BC_IVT_LOCK) 8376 { 8377 if (r_config_fsm.read() == CONFIG_IVT_LOCK) 8378 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 8379 8380 else if ((r_write_fsm.read() == WRITE_BC_IVT_LOCK) or 8381 (r_write_fsm.read() == WRITE_IVT_LOCK_HIT_WB) or 8382 (r_write_fsm.read() == WRITE_MISS_IVT_LOCK)) 8383 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 8384 8385 else if(r_read_fsm.read() == READ_IVT_LOCK) 8386 r_alloc_ivt_fsm = ALLOC_IVT_READ; 8387 8388 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 8389 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 8390 8391 else if ((r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) or 8392 (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK_DATA)) 8393 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 8394 8395 else 8396 m_cpt_ivt_unused++; 8397 } 8398 break; 8399 8400 ////////////////////////// 8401 case ALLOC_IVT_CONFIG: // allocated to CONFIG FSM 8402 if (r_config_fsm.read() != CONFIG_IVT_LOCK) 8403 { 8404 if ((r_write_fsm.read() == WRITE_BC_IVT_LOCK) or 8405 (r_write_fsm.read() == WRITE_IVT_LOCK_HIT_WB) or 8406 (r_write_fsm.read() == WRITE_MISS_IVT_LOCK)) 8407 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 8408 8409 else if(r_read_fsm.read() == READ_IVT_LOCK) 8410 r_alloc_ivt_fsm = ALLOC_IVT_READ; 8411 8412 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 8413 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 8414 8415 else if ((r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) or 8416 (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK_DATA)) 8417 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 8418 8419 else if(r_cas_fsm.read() == CAS_BC_IVT_LOCK) 8420 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 8421 else 8422 m_cpt_ivt_unused++; 8423 } 8424 break; 8425 } // end switch r_alloc_ivt_fsm 8426 8427 //////////////////////////////////////////////////////////////////////////////////// 8428 // ALLOC_DIR FSM 8429 //////////////////////////////////////////////////////////////////////////////////// 8430 // The ALLOC_DIR FSM allocates the access to the directory and 8431 // the data cache with a round robin priority between 6 user FSMs : 8432 // The cyclic ordering is CONFIG > READ > WRITE > CAS > CLEANUP > XRAM_RSP 8433 // The ressource is always allocated. 8434 ///////////////////////////////////////////////////////////////////////////////////// 8435 8436 switch(r_alloc_dir_fsm.read()) 8437 { 8438 ///////////////////// 8439 case ALLOC_DIR_RESET: // Initializes the directory one SET per cycle. 8440 // All the WAYS of a SET initialized in parallel 8441 8442 r_alloc_dir_reset_cpt.write(r_alloc_dir_reset_cpt.read() + 1); 8443 8444 if(r_alloc_dir_reset_cpt.read() == (m_sets - 1)) 8445 { 8446 m_cache_directory.init(); 8447 r_alloc_dir_fsm = ALLOC_DIR_READ; 8448 } 8449 break; 8450 8451 ////////////////////// 8452 case ALLOC_DIR_CONFIG: // allocated to CONFIG FSM 8453 if ( (r_config_fsm.read() != CONFIG_DIR_REQ) and 8454 (r_config_fsm.read() != CONFIG_DIR_ACCESS) and 8455 (r_config_fsm.read() != CONFIG_TRT_LOCK) and 8456 (r_config_fsm.read() != CONFIG_TRT_SET) and 8457 (r_config_fsm.read() != CONFIG_IVT_LOCK) ) 8458 { 8459 if(r_read_fsm.read() == READ_DIR_REQ) 8460 r_alloc_dir_fsm = ALLOC_DIR_READ; 8461 8462 else if(r_write_fsm.read() == WRITE_DIR_REQ) 8463 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 8464 8465 else if(r_cas_fsm.read() == CAS_DIR_REQ) 8466 r_alloc_dir_fsm = ALLOC_DIR_CAS; 8467 8468 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 8469 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 8470 8471 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 8472 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 8473 } 8474 break; 8475 8476 //////////////////// 8477 case ALLOC_DIR_READ: // allocated to READ FSM 8478 if( ((r_read_fsm.read() != READ_DIR_REQ) and 8479 (r_read_fsm.read() != READ_DIR_LOCK) and 8480 (r_read_fsm.read() != READ_TRT_LOCK) and 8481 (r_read_fsm.read() != READ_HEAP_REQ) and 8482 (r_read_fsm.read() != READ_IVT_LOCK)) 8483 or 8484 ((r_read_fsm.read() == READ_TRT_LOCK) and 8485 (r_alloc_trt_fsm.read() == ALLOC_TRT_READ)) ) 8486 { 8487 if(r_write_fsm.read() == WRITE_DIR_REQ) 8488 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 8489 8490 else if(r_cas_fsm.read() == CAS_DIR_REQ) 8491 r_alloc_dir_fsm = ALLOC_DIR_CAS; 8492 8493 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 8494 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 8495 8496 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 8497 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 8498 8499 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 8500 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 8501 8502 else 8503 m_cpt_dir_unused++; 8504 } 8505 else 8506 m_cpt_read_fsm_dir_used++; 8507 break; 8508 8509 ///////////////////// 8510 case ALLOC_DIR_WRITE: 8511 if(((r_write_fsm.read() != WRITE_DIR_REQ) and 8512 (r_write_fsm.read() != WRITE_DIR_LOCK) and 8513 (r_write_fsm.read() != WRITE_BC_DIR_READ) and 8514 (r_write_fsm.read() != WRITE_DIR_HIT) and 8515 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 8516 (r_write_fsm.read() != WRITE_BC_IVT_LOCK) and 8517 (r_write_fsm.read() != WRITE_MISS_IVT_LOCK) and 8518 (r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 8519 (r_write_fsm.read() != WRITE_UPT_LOCK) and 8520 (r_write_fsm.read() != WRITE_UPT_HEAP_LOCK) and 8521 (r_write_fsm.read() != WRITE_IVT_LOCK_HIT_WB)) 8522 or 8523 ((r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) and 8524 (r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE)) 8525 or 8526 ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) and 8527 (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE))) 8528 { 8529 if(r_cas_fsm.read() == CAS_DIR_REQ) 8530 r_alloc_dir_fsm = ALLOC_DIR_CAS; 8531 8532 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 8533 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 8534 8535 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 8536 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 8537 8538 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 8539 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 8540 8541 else if(r_read_fsm.read() == READ_DIR_REQ) 8542 r_alloc_dir_fsm = ALLOC_DIR_READ; 8543 8544 else 8545 m_cpt_dir_unused++; 8546 } 8547 else 8548 m_cpt_write_fsm_dir_used++; 8549 break; 8550 8551 /////////////////// 8552 case ALLOC_DIR_CAS: // allocated to CAS FSM 8553 if(((r_cas_fsm.read() != CAS_DIR_REQ) and 8554 (r_cas_fsm.read() != CAS_DIR_LOCK) and 8555 (r_cas_fsm.read() != CAS_DIR_HIT_READ) and 8556 (r_cas_fsm.read() != CAS_DIR_HIT_COMPARE) and 8557 (r_cas_fsm.read() != CAS_DIR_HIT_WRITE) and 8558 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 8559 (r_cas_fsm.read() != CAS_BC_IVT_LOCK) and 8560 (r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 8561 (r_cas_fsm.read() != CAS_UPT_LOCK) and 8562 (r_cas_fsm.read() != CAS_UPT_HEAP_LOCK)) 8563 or 8564 ((r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) and 8565 (r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS)) 8566 or 8567 ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) and 8568 (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS))) 8569 { 8570 if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 8571 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 8572 8573 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 8574 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 8575 8576 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 8577 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 8578 8579 else if(r_read_fsm.read() == READ_DIR_REQ) 8580 r_alloc_dir_fsm = ALLOC_DIR_READ; 8581 8582 else if(r_write_fsm.read() == WRITE_DIR_REQ) 8583 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 8584 8585 else 8586 m_cpt_dir_unused++; 8587 } 8588 else 8589 m_cpt_cas_fsm_dir_used++; 8590 break; 8591 8592 /////////////////////// 8593 case ALLOC_DIR_CLEANUP: // allocated to CLEANUP FSM 8594 if((r_cleanup_fsm.read() != CLEANUP_DIR_REQ) and 8595 (r_cleanup_fsm.read() != CLEANUP_DIR_LOCK) and 8596 (r_cleanup_fsm.read() != CLEANUP_HEAP_REQ) and 8597 (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK) and 8598 (r_cleanup_fsm.read() != CLEANUP_IVT_LOCK_DATA)) 8599 { 8600 if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 8601 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 8602 8603 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 8604 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 8605 8606 else if(r_read_fsm.read() == READ_DIR_REQ) 8607 r_alloc_dir_fsm = ALLOC_DIR_READ; 8608 8609 else if(r_write_fsm.read() == WRITE_DIR_REQ) 8610 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 8611 8612 else if(r_cas_fsm.read() == CAS_DIR_REQ) 8613 r_alloc_dir_fsm = ALLOC_DIR_CAS; 8614 8615 else 8616 m_cpt_dir_unused++; 8617 } 8618 else 8619 m_cpt_cleanup_fsm_dir_used++; 8620 break; 8621 8622 //////////////////////// 8623 case ALLOC_DIR_XRAM_RSP: // allocated to XRAM_RSP FSM 8624 if( (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) and 8625 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) and 8626 (r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK)) 8627 { 8628 if(r_config_fsm.read() == CONFIG_DIR_REQ) 8629 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 8630 8631 else if(r_read_fsm.read() == READ_DIR_REQ) 8632 r_alloc_dir_fsm = ALLOC_DIR_READ; 8633 8634 else if(r_write_fsm.read() == WRITE_DIR_REQ) 8635 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 8636 8637 else if(r_cas_fsm.read() == CAS_DIR_REQ) 8638 r_alloc_dir_fsm = ALLOC_DIR_CAS; 8639 8640 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 8641 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 8642 8643 else 8644 m_cpt_dir_unused++; 8645 } 8646 else 8647 m_cpt_xram_rsp_fsm_dir_used++; 8648 break; 8649 8650 } // end switch alloc_dir_fsm 8651 8652 //////////////////////////////////////////////////////////////////////////////////// 8653 // ALLOC_TRT FSM 8654 //////////////////////////////////////////////////////////////////////////////////// 8655 // The ALLOC_TRT fsm allocates the access to the Transaction Table (write buffer) 8656 // with a round robin priority between 4 user FSMs : 8657 // The cyclic priority is READ > WRITE > CAS > XRAM_RSP 8658 // The ressource is always allocated. 8659 /////////////////////////////////////////////////////////////////////////////////// 8660 8661 switch(r_alloc_trt_fsm.read()) 8662 { 8663 //////////////////// 8664 case ALLOC_TRT_READ: 8665 if(r_read_fsm.read() != READ_TRT_LOCK) 8666 { 8667 if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 8668 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 8669 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 8670 8671 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 8672 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 8673 r_alloc_trt_fsm = ALLOC_TRT_CAS; 8674 8675 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 8676 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 8677 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 8678 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 8679 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 8680 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 8681 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 8682 8683 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 8684 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 8685 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 8686 8687 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 8688 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 8689 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 8690 8691 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 8692 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 8693 8694 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 8695 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 8696 8697 else 8698 m_cpt_trt_unused++; 8699 } 8700 else 8701 m_cpt_read_fsm_trt_used++; 8702 break; 8703 8704 ///////////////////// 8705 case ALLOC_TRT_WRITE: 8706 if((r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 8707 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 8708 (r_write_fsm.read() != WRITE_BC_IVT_LOCK)) 8709 { 8710 if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 8711 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 8712 r_alloc_trt_fsm = ALLOC_TRT_CAS; 8713 8714 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 8715 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 8716 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 8717 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 8718 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 8719 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 8720 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 8721 8722 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 8723 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 8724 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 8725 8726 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 8727 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 8728 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 8729 8730 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 8731 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 8732 8733 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 8734 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 8735 8736 else if(r_read_fsm.read() == READ_TRT_LOCK) 8737 r_alloc_trt_fsm = ALLOC_TRT_READ; 8738 8739 else 8740 m_cpt_trt_unused++; 8741 } 8742 else 8743 m_cpt_write_fsm_trt_used++; 8744 break; 8745 8746 //////////////////// 8747 case ALLOC_TRT_CAS: 8748 if((r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 8749 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 8750 (r_cas_fsm.read() != CAS_BC_IVT_LOCK)) 8751 { 8752 if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 8753 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 8754 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 8755 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 8756 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 8757 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 8758 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 8759 8760 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 8761 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 8762 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 8763 8764 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 8765 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 8766 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 8767 8768 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 8769 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 8770 8771 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 8772 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 8773 8774 else if(r_read_fsm.read() == READ_TRT_LOCK) 8775 r_alloc_trt_fsm = ALLOC_TRT_READ; 8776 8777 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 8778 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 8779 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 8780 8781 else 8782 m_cpt_trt_unused++; 8783 } 8784 else 8785 m_cpt_cas_fsm_trt_used++; 8786 break; 8787 /////////////////////// 8788 case ALLOC_TRT_IXR_CMD: 8789 if((r_ixr_cmd_fsm.read() != IXR_CMD_READ_TRT) and 8790 (r_ixr_cmd_fsm.read() != IXR_CMD_WRITE_TRT) and 8791 (r_ixr_cmd_fsm.read() != IXR_CMD_CAS_TRT) and 8792 (r_ixr_cmd_fsm.read() != IXR_CMD_XRAM_TRT) and 8793 (r_ixr_cmd_fsm.read() != IXR_CMD_CLEANUP_TRT) and 8794 (r_ixr_cmd_fsm.read() != IXR_CMD_CONFIG_TRT)) 8795 { 8796 if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 8797 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 8798 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 8799 8800 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 8801 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 8802 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 8803 8804 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 8805 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 8806 8807 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 8808 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 8809 8810 else if(r_read_fsm.read() == READ_TRT_LOCK) 8811 r_alloc_trt_fsm = ALLOC_TRT_READ; 8812 8813 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 8814 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 8815 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 8816 8817 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 8818 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 8819 r_alloc_trt_fsm = ALLOC_TRT_CAS; 8820 } 8821 break; 8822 8823 //////////////////////// 8824 case ALLOC_TRT_XRAM_RSP: 8825 if(((r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) or 8826 (r_alloc_dir_fsm.read() != ALLOC_DIR_XRAM_RSP)) and 8827 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) and 8828 (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_UPDT) and 8829 (r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK)) 8830 { 8831 if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 8832 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 8833 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 8834 8835 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 8836 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 8837 8838 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 8839 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 8840 8841 else if(r_read_fsm.read() == READ_TRT_LOCK) 8842 r_alloc_trt_fsm = ALLOC_TRT_READ; 8843 8844 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 8845 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 8846 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 8847 8848 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 8849 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 8850 r_alloc_trt_fsm = ALLOC_TRT_CAS; 8851 8852 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 8853 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 8854 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 8855 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 8856 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 8857 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 8858 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 8859 8860 else 8861 m_cpt_trt_unused++; 8862 } 8863 else 8864 m_cpt_xram_rsp_fsm_trt_used++; 8865 break; 8866 8867 //////////////////////// 8868 case ALLOC_TRT_IXR_RSP: 8869 if((r_ixr_rsp_fsm.read() != IXR_RSP_TRT_ERASE) and 8870 (r_ixr_rsp_fsm.read() != IXR_RSP_TRT_READ)) 8871 { 8872 if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 8873 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 8874 8875 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 8876 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 8877 8878 else if(r_read_fsm.read() == READ_TRT_LOCK) 8879 r_alloc_trt_fsm = ALLOC_TRT_READ; 8880 8881 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) || 8882 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 8883 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 8884 8885 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) || 8886 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 8887 r_alloc_trt_fsm = ALLOC_TRT_CAS; 8888 8889 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 8890 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 8891 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 8892 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 8893 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 8894 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 8895 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 8896 8897 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) && 8898 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 8899 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 8900 8901 else 8902 m_cpt_trt_unused++; 8903 } 8904 else 8905 m_cpt_ixr_fsm_trt_used++; 8906 break; 8907 8908 ////////////////////// 8909 case ALLOC_TRT_CONFIG: 8910 if((r_config_fsm.read() != CONFIG_TRT_LOCK) and 8911 (r_config_fsm.read() != CONFIG_TRT_SET)) 8912 { 8913 if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 8914 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 8915 8916 else if(r_read_fsm.read() == READ_TRT_LOCK) 8917 r_alloc_trt_fsm = ALLOC_TRT_READ; 8918 8919 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 8920 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 8921 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 8922 8923 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 8924 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 8925 r_alloc_trt_fsm = ALLOC_TRT_CAS; 8926 8927 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 8928 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 8929 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 8930 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 8931 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 8932 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 8933 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 8934 8935 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 8936 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 8937 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 8938 8939 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 8940 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 8941 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 8942 } 8943 break; 8944 8945 //////////////////////// 8946 case ALLOC_TRT_CLEANUP: 8947 /*ODCCP*///std::cout << "TRT ALLOCATED TO CLEANUP" << std::endl; 8948 if(r_cleanup_fsm.read() != CLEANUP_IXR_REQ) 8949 { 8950 if(r_read_fsm.read() == READ_TRT_LOCK) 8951 r_alloc_trt_fsm = ALLOC_TRT_READ; 8952 8953 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 8954 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 8955 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 8956 8957 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 8958 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 8959 r_alloc_trt_fsm = ALLOC_TRT_CAS; 8960 8961 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 8962 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 8963 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 8964 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 8965 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 8966 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 8967 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 8968 8969 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 8970 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 8971 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 8972 8973 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) || 8974 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 8975 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 8976 8977 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 8978 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 8979 } 8980 break; 8981 8982 8983 } // end switch alloc_trt_fsm 8984 8985 //////////////////////////////////////////////////////////////////////////////////// 8986 // ALLOC_HEAP FSM 8987 //////////////////////////////////////////////////////////////////////////////////// 8988 // The ALLOC_HEAP FSM allocates the access to the heap 8989 // with a round robin priority between 6 user FSMs : 8990 // The cyclic ordering is READ > WRITE > CAS > CLEANUP > XRAM_RSP > CONFIG 8991 // The ressource is always allocated. 8992 ///////////////////////////////////////////////////////////////////////////////////// 8993 8994 switch(r_alloc_heap_fsm.read()) 8995 { 8996 //////////////////// 8997 case ALLOC_HEAP_RESET: 8998 // Initializes the heap one ENTRY each cycle. 8999 9000 r_alloc_heap_reset_cpt.write(r_alloc_heap_reset_cpt.read() + 1); 9001 9002 if(r_alloc_heap_reset_cpt.read() == (m_heap_size-1)) 9003 { 9004 m_heap.init(); 9005 9006 r_alloc_heap_fsm = ALLOC_HEAP_READ; 9007 } 9008 break; 9009 9010 //////////////////// 9011 case ALLOC_HEAP_READ: 9012 if((r_read_fsm.read() != READ_HEAP_REQ) and 9013 (r_read_fsm.read() != READ_HEAP_LOCK) and 9014 (r_read_fsm.read() != READ_HEAP_ERASE)) 9015 { 9016 if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 9017 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 9018 9019 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 9020 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 9021 9022 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 9023 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 9024 9025 else if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 9026 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 9027 9028 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 9029 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 9030 9031 else 9032 m_cpt_heap_unused++; 9033 } 9034 else 9035 m_cpt_read_fsm_heap_used++; 9036 break; 9037 9038 ///////////////////// 9039 case ALLOC_HEAP_WRITE: 9040 if((r_write_fsm.read() != WRITE_UPT_HEAP_LOCK) and 9041 (r_write_fsm.read() != WRITE_UPT_REQ) and 9042 (r_write_fsm.read() != WRITE_UPT_NEXT)) 9043 { 9044 if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 9045 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 9046 9047 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 9048 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 9049 9050 else if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 9051 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 9052 9053 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 9054 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 9055 9056 else if(r_read_fsm.read() == READ_HEAP_REQ) 9057 r_alloc_heap_fsm = ALLOC_HEAP_READ; 9058 9059 else 9060 m_cpt_heap_unused++; 9061 } 9062 else 9063 m_cpt_write_fsm_heap_used++; 9064 break; 9065 9066 //////////////////// 9067 case ALLOC_HEAP_CAS: 9068 if((r_cas_fsm.read() != CAS_UPT_HEAP_LOCK) and 9069 (r_cas_fsm.read() != CAS_UPT_REQ) and 9070 (r_cas_fsm.read() != CAS_UPT_NEXT)) 9071 { 9072 if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 9073 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 9074 9075 else if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 9076 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 9077 9078 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 9079 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 9080 9081 else if(r_read_fsm.read() == READ_HEAP_REQ) 9082 r_alloc_heap_fsm = ALLOC_HEAP_READ; 9083 9084 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 9085 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 9086 9087 else 9088 m_cpt_heap_unused++; 9089 } 9090 else 9091 m_cpt_cas_fsm_heap_used++; 9092 break; 9093 9094 /////////////////////// 9095 case ALLOC_HEAP_CLEANUP: 9096 if((r_cleanup_fsm.read() != CLEANUP_HEAP_REQ) and 9097 (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK) and 9098 (r_cleanup_fsm.read() != CLEANUP_HEAP_SEARCH) and 9099 (r_cleanup_fsm.read() != CLEANUP_HEAP_CLEAN)) 9100 { 9101 if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 9102 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 9103 9104 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 9105 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 9106 9107 else if(r_read_fsm.read() == READ_HEAP_REQ) 9108 r_alloc_heap_fsm = ALLOC_HEAP_READ; 9109 9110 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 9111 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 9112 9113 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 9114 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 9115 9116 else 9117 m_cpt_heap_unused++; 9118 } 9119 else 9120 m_cpt_cleanup_fsm_heap_used++; 9121 break; 9122 9123 //////////////////////// 9124 case ALLOC_HEAP_XRAM_RSP: 9125 if((r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_REQ) and 9126 (r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_ERASE)) 9127 { 9128 if(r_config_fsm.read() == CONFIG_HEAP_REQ) 9129 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 9130 9131 else if(r_read_fsm.read() == READ_HEAP_REQ) 9132 r_alloc_heap_fsm = ALLOC_HEAP_READ; 9133 9134 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 9135 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 9136 9137 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 9138 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 9139 9140 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 9141 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 9142 9143 } 9144 break; 9145 9146 /////////////////////// 9147 case ALLOC_HEAP_CONFIG: 9148 if((r_config_fsm.read() != CONFIG_HEAP_REQ) and 9149 (r_config_fsm.read() != CONFIG_HEAP_SCAN)) 9150 { 9151 if(r_read_fsm.read() == READ_HEAP_REQ) 9152 r_alloc_heap_fsm = ALLOC_HEAP_READ; 9153 9154 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 9155 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 9156 9157 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 9158 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 9159 9160 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 9161 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 9162 9163 else if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 9164 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 9165 9166 else 9167 m_cpt_heap_unused++; 9168 } 9169 else 9170 m_cpt_xram_rsp_fsm_heap_used++; 9171 break; 9172 9173 } // end switch alloc_heap_fsm 9174 9175 ///////////////////////////////////////////////////////////////////// 9176 // TGT_CMD to READ FIFO 9177 ///////////////////////////////////////////////////////////////////// 9178 9179 m_cmd_read_addr_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 9180 p_vci_tgt.address.read() ); 9181 m_cmd_read_length_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 9182 p_vci_tgt.plen.read()>>2 ); 9183 m_cmd_read_srcid_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 9184 p_vci_tgt.srcid.read() ); 9185 m_cmd_read_trdid_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 9186 p_vci_tgt.trdid.read() ); 9187 m_cmd_read_pktid_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 9188 p_vci_tgt.pktid.read() ); 9189 9190 ///////////////////////////////////////////////////////////////////// 9191 // TGT_CMD to WRITE FIFO 9192 ///////////////////////////////////////////////////////////////////// 9193 9194 m_cmd_write_addr_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 9195 (addr_t)p_vci_tgt.address.read() ); 9196 m_cmd_write_eop_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 9197 p_vci_tgt.eop.read() ); 9198 m_cmd_write_srcid_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 9199 p_vci_tgt.srcid.read() ); 9200 m_cmd_write_trdid_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 9201 p_vci_tgt.trdid.read() ); 9202 m_cmd_write_pktid_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 9203 p_vci_tgt.pktid.read() ); 9204 m_cmd_write_data_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 9205 p_vci_tgt.wdata.read() ); 9206 m_cmd_write_be_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 9207 p_vci_tgt.be.read() ); 9208 9209 //////////////////////////////////////////////////////////////////////////////////// 9210 // TGT_CMD to CAS FIFO 9211 //////////////////////////////////////////////////////////////////////////////////// 9212 9213 m_cmd_cas_addr_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 9214 (addr_t)p_vci_tgt.address.read() ); 9215 m_cmd_cas_eop_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 9216 p_vci_tgt.eop.read() ); 9217 m_cmd_cas_srcid_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 9218 p_vci_tgt.srcid.read() ); 9219 m_cmd_cas_trdid_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 9220 p_vci_tgt.trdid.read() ); 9221 m_cmd_cas_pktid_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 9222 p_vci_tgt.pktid.read() ); 9223 m_cmd_cas_wdata_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 9224 p_vci_tgt.wdata.read() ); 9225 9226 //////////////////////////////////////////////////////////////////////////////////// 9227 // CC_RECEIVE to CLEANUP FIFO 9228 //////////////////////////////////////////////////////////////////////////////////// 9229 9230 if(cc_receive_to_cleanup_fifo_put) 9231 { 9232 if(cc_receive_to_cleanup_fifo_get) 1170 9233 { 1171 found = true; 1172 if ( m_seg[seg_id]->special() ) config = true; 1173 } 1174 } 1175 1176 if ( not found ) /////////// out of segment error 1177 { 1178 r_tgt_cmd_fsm = TGT_CMD_ERROR; 1179 } 1180 else if ( config ) /////////// configuration command 1181 { 1182 if ( not p_vci_tgt.eop.read() ) r_tgt_cmd_fsm = TGT_CMD_ERROR; 1183 else r_tgt_cmd_fsm = TGT_CMD_CONFIG; 1184 } 1185 else //////////// memory access 1186 { 1187 if ( p_vci_tgt.cmd.read() == vci_param_int::CMD_READ ) 1188 { 1189 // check that the pktid is either : 1190 // TYPE_READ_DATA_UNC 1191 // TYPE_READ_DATA_MISS 1192 // TYPE_READ_INS_UNC 1193 // TYPE_READ_INS_MISS 1194 // ==> bit2 must be zero with the TSAR encoding 1195 // ==> mask = 0b0100 = 0x4 1196 assert( ((p_vci_tgt.pktid.read() & 0x4) == 0x0) and 1197 "The type specified in the pktid field is incompatible with the READ CMD"); 1198 r_tgt_cmd_fsm = TGT_CMD_READ; 1199 } 1200 else if(p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) 1201 { 1202 // check that the pktid is TYPE_WRITE 1203 // ==> TYPE_WRITE = X100 with the TSAR encoding 1204 // ==> mask = 0b0111 = 0x7 1205 assert(((p_vci_tgt.pktid.read() & 0x7) == 0x4) and 1206 "The type specified in the pktid field is incompatible with the WRITE CMD"); 1207 r_tgt_cmd_fsm = TGT_CMD_WRITE; 1208 } 1209 else if(p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) 1210 { 1211 // check that the pktid is TYPE_LL 1212 // ==> TYPE_LL = X110 with the TSAR encoding 1213 // ==> mask = 0b0111 = 0x7 1214 assert(((p_vci_tgt.pktid.read() & 0x7) == 0x6) and 1215 "The type specified in the pktid field is incompatible with the LL CMD"); 1216 r_tgt_cmd_fsm = TGT_CMD_READ; 1217 } 1218 else if(p_vci_tgt.cmd.read() == vci_param_int::CMD_NOP) 1219 { 1220 // check that the pktid is either : 1221 // TYPE_CAS 1222 // TYPE_SC 1223 // ==> TYPE_CAS = X101 with the TSAR encoding 1224 // ==> TYPE_SC = X111 with the TSAR encoding 1225 // ==> mask = 0b0101 = 0x5 1226 assert(((p_vci_tgt.pktid.read() & 0x5) == 0x5) and 1227 "The type specified in the pktid field is incompatible with the NOP CMD"); 1228 1229 if((p_vci_tgt.pktid.read() & 0x7) == TYPE_CAS) r_tgt_cmd_fsm = TGT_CMD_CAS; 1230 else r_tgt_cmd_fsm = TGT_CMD_WRITE; 9234 m_cc_receive_to_cleanup_fifo.put_and_get( ((uint64_t)(p_dspin_p2m.eop.read()&0x1) << 32) | p_dspin_p2m.data.read() ); 1231 9235 } 1232 9236 else 1233 9237 { 1234 r_tgt_cmd_fsm = TGT_CMD_ERROR;9238 m_cc_receive_to_cleanup_fifo.simple_put( ((uint64_t)(p_dspin_p2m.eop.read()&0x1) << 32) | p_dspin_p2m.data.read() ); 1235 9239 } 1236 }1237 }1238 break;1239 1240 ///////////////////1241 case TGT_CMD_ERROR: // response error must be sent1242 1243 // wait if pending request1244 if(r_tgt_cmd_to_tgt_rsp_req.read()) break;1245 1246 // consume all the command packet flits before sending response error1247 if ( p_vci_tgt.cmdval and p_vci_tgt.eop )1248 {1249 r_tgt_cmd_to_tgt_rsp_req = true;1250 r_tgt_cmd_to_tgt_rsp_error = 1;1251 r_tgt_cmd_fsm = TGT_CMD_IDLE;1252 1253 #if DEBUG_MEMC_TGT_CMD1254 if(m_debug)1255 std::cout << " <MEMC " << name()1256 << " TGT_CMD_ERROR> Segmentation violation:"1257 << " address = " << std::hex << p_vci_tgt.address.read()1258 << " / srcid = " << p_vci_tgt.srcid.read()1259 << " / trdid = " << p_vci_tgt.trdid.read()1260 << " / pktid = " << p_vci_tgt.pktid.read()1261 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl;1262 #endif1263 1264 }1265 break;1266 1267 ////////////////////1268 case TGT_CMD_CONFIG: // execute config request and return response1269 {1270 addr_t seg_base = m_seg[m_seg_config]->baseAddress();1271 addr_t address = p_vci_tgt.address.read();1272 size_t cell = (address - seg_base)/vci_param_int::B;1273 1274 bool need_rsp;1275 size_t error;1276 uint32_t rdata = 0; // default value1277 uint32_t wdata = p_vci_tgt.wdata.read();1278 1279 if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_READ) // get lock1280 and (cell == MEMC_LOCK) )1281 {1282 rdata = (uint32_t)r_config_lock.read();1283 need_rsp = true;1284 error = 0;1285 r_config_lock = true;1286 if ( rdata == 0 )1287 {1288 r_tgt_cmd_srcid = p_vci_tgt.srcid.read();1289 r_tgt_cmd_trdid = p_vci_tgt.trdid.read();1290 r_tgt_cmd_pktid = p_vci_tgt.pktid.read();1291 }1292 1293 }1294 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // release lock1295 and (cell == MEMC_LOCK)1296 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) )1297 {1298 need_rsp = true;1299 error = 0;1300 r_config_lock = false;1301 }1302 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_lo1303 and (cell == MEMC_ADDR_LO)1304 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) )1305 {1306 assert( ((wdata % (m_words*vci_param_int::B)) == 0) and1307 "VCI_MEM_CACHE CONFIG ERROR: The buffer must be aligned on a cache line");1308 1309 need_rsp = true;1310 error = 0;1311 r_config_address = (r_config_address.read() & 0xFFFFFFFF00000000LL) |1312 (addr_t)p_vci_tgt.wdata.read();1313 }1314 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_hi1315 and (cell == MEMC_ADDR_HI)1316 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) )1317 {1318 need_rsp = true;1319 error = 0;1320 r_config_address = (r_config_address.read() & 0x00000000FFFFFFFFLL) |1321 ((uint64_t)p_vci_tgt.wdata.read())<<32;1322 }1323 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set buf_lines1324 and (cell == MEMC_BUF_LENGTH)1325 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) )1326 {1327 need_rsp = true;1328 error = 0;1329 size_t lines = (size_t)(p_vci_tgt.wdata.read()/(m_words<<2));1330 if ( r_config_address.read()%(m_words*4) ) lines++;1331 r_config_cmd_lines = lines;1332 r_config_rsp_lines = lines;1333 }1334 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set cmd type1335 and (cell == MEMC_CMD_TYPE)1336 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) )1337 {1338 need_rsp = false;1339 error = 0;1340 r_config_cmd = p_vci_tgt.wdata.read();1341 r_config_srcid = p_vci_tgt.srcid.read();1342 r_config_trdid = p_vci_tgt.trdid.read();1343 r_config_pktid = p_vci_tgt.pktid.read();1344 9240 } 1345 9241 else 1346 9242 { 1347 need_rsp = true; 1348 error = 1; 9243 if(cc_receive_to_cleanup_fifo_get) 9244 { 9245 m_cc_receive_to_cleanup_fifo.simple_get(); 9246 } 1349 9247 } 1350 1351 if ( need_rsp ) 9248 //m_cc_receive_to_cleanup_fifo.update( cc_receive_to_cleanup_fifo_get, 9249 // cc_receive_to_cleanup_fifo_put, 9250 // p_dspin_p2m.data.read() ); 9251 9252 //////////////////////////////////////////////////////////////////////////////////// 9253 // CC_RECEIVE to MULTI_ACK FIFO 9254 //////////////////////////////////////////////////////////////////////////////////// 9255 9256 m_cc_receive_to_multi_ack_fifo.update( cc_receive_to_multi_ack_fifo_get, 9257 cc_receive_to_multi_ack_fifo_put, 9258 p_dspin_p2m.data.read() ); 9259 9260 //////////////////////////////////////////////////////////////////////////////////// 9261 // WRITE to CC_SEND FIFO 9262 //////////////////////////////////////////////////////////////////////////////////// 9263 9264 m_write_to_cc_send_inst_fifo.update( write_to_cc_send_fifo_get, write_to_cc_send_fifo_put, 9265 write_to_cc_send_fifo_inst ); 9266 m_write_to_cc_send_srcid_fifo.update( write_to_cc_send_fifo_get, write_to_cc_send_fifo_put, 9267 write_to_cc_send_fifo_srcid ); 9268 9269 //////////////////////////////////////////////////////////////////////////////////// 9270 // CONFIG to CC_SEND FIFO 9271 //////////////////////////////////////////////////////////////////////////////////// 9272 9273 m_config_to_cc_send_inst_fifo.update( config_to_cc_send_fifo_get, config_to_cc_send_fifo_put, 9274 config_to_cc_send_fifo_inst ); 9275 m_config_to_cc_send_srcid_fifo.update( config_to_cc_send_fifo_get, config_to_cc_send_fifo_put, 9276 config_to_cc_send_fifo_srcid ); 9277 9278 //////////////////////////////////////////////////////////////////////////////////// 9279 // XRAM_RSP to CC_SEND FIFO 9280 //////////////////////////////////////////////////////////////////////////////////// 9281 9282 m_xram_rsp_to_cc_send_inst_fifo.update( xram_rsp_to_cc_send_fifo_get, xram_rsp_to_cc_send_fifo_put, 9283 xram_rsp_to_cc_send_fifo_inst ); 9284 m_xram_rsp_to_cc_send_srcid_fifo.update( xram_rsp_to_cc_send_fifo_get, xram_rsp_to_cc_send_fifo_put, 9285 xram_rsp_to_cc_send_fifo_srcid ); 9286 9287 //////////////////////////////////////////////////////////////////////////////////// 9288 // CAS to CC_SEND FIFO 9289 //////////////////////////////////////////////////////////////////////////////////// 9290 9291 m_cas_to_cc_send_inst_fifo.update( cas_to_cc_send_fifo_get, cas_to_cc_send_fifo_put, 9292 cas_to_cc_send_fifo_inst ); 9293 m_cas_to_cc_send_srcid_fifo.update( cas_to_cc_send_fifo_get, cas_to_cc_send_fifo_put, 9294 cas_to_cc_send_fifo_srcid ); 9295 m_cpt_cycles++; 9296 9297 } // end transition() 9298 9299 ///////////////////////////// 9300 tmpl(void)::genMoore() 9301 ///////////////////////////// 9302 { 9303 #if MONITOR_MEMCACHE_FSM == 1 9304 p_read_fsm.write (r_read_fsm.read() ); 9305 p_write_fsm.write (r_write_fsm.read() ); 9306 p_xram_rsp_fsm.write (r_xram_rsp_fsm.read() ); 9307 p_cas_fsm.write (r_cas_fsm.read() ); 9308 p_cleanup_fsm.write (r_cleanup_fsm.read() ); 9309 p_config_fsm.write (r_config_fsm.read() ); 9310 p_alloc_heap_fsm.write (r_alloc_heap_fsm.read() ); 9311 p_alloc_dir_fsm.write (r_alloc_dir_fsm.read() ); 9312 p_alloc_trt_fsm.write (r_alloc_trt_fsm.read() ); 9313 p_alloc_upt_fsm.write (r_alloc_upt_fsm.read() ); 9314 p_alloc_ivt_fsm.write (r_alloc_ivt_fsm.read() ); 9315 p_tgt_cmd_fsm.write (r_tgt_cmd_fsm.read() ); 9316 p_tgt_rsp_fsm.write (r_tgt_rsp_fsm.read() ); 9317 p_ixr_cmd_fsm.write (r_ixr_cmd_fsm.read() ); 9318 p_ixr_rsp_fsm.write (r_ixr_rsp_fsm.read() ); 9319 p_cc_send_fsm.write (r_cc_send_fsm.read() ); 9320 p_cc_receive_fsm.write (r_cc_receive_fsm.read() ); 9321 p_multi_ack_fsm.write (r_multi_ack_fsm.read() ); 9322 #endif 9323 9324 //////////////////////////////////////////////////////////// 9325 // Command signals on the p_vci_ixr port 9326 //////////////////////////////////////////////////////////// 9327 // DATA width is 8 bytes 9328 // The following values are not transmitted to XRAM 9329 // p_vci_ixr.be 9330 // p_vci_ixr.pktid 9331 // p_vci_ixr.cons 9332 // p_vci_ixr.wrap 9333 // p_vci_ixr.contig 9334 // p_vci_ixr.clen 9335 // p_vci_ixr.cfixed 9336 9337 p_vci_ixr.plen = 64; 9338 p_vci_ixr.srcid = m_srcid_x; 9339 p_vci_ixr.trdid = r_ixr_cmd_trdid.read(); 9340 p_vci_ixr.address = (addr_t)r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2); 9341 p_vci_ixr.be = 0xFF; 9342 p_vci_ixr.pktid = 0; 9343 p_vci_ixr.cons = false; 9344 p_vci_ixr.wrap = false; 9345 p_vci_ixr.contig = true; 9346 p_vci_ixr.clen = 0; 9347 p_vci_ixr.cfixed = false; 9348 9349 if ( (r_ixr_cmd_fsm.read() == IXR_CMD_READ_SEND) or 9350 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_SEND) or 9351 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_SEND) or 9352 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_SEND) or 9353 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_SEND) ) 1352 9354 { 1353 // blocked if previous pending request to TGT_RSP FSM 1354 if ( r_tgt_cmd_to_tgt_rsp_req.read() ) break; 1355 1356 r_tgt_cmd_to_tgt_rsp_req = true; 1357 r_tgt_cmd_to_tgt_rsp_error = error; 1358 r_tgt_cmd_to_tgt_rsp_rdata = rdata; 1359 r_tgt_cmd_fsm = TGT_CMD_IDLE; 9355 p_vci_ixr.cmdval = true; 9356 9357 if ( r_ixr_cmd_get.read() ) // GET 9358 { 9359 p_vci_ixr.cmd = vci_param_ext::CMD_READ; 9360 p_vci_ixr.wdata = 0; 9361 p_vci_ixr.eop = true; 9362 } 9363 else // PUT 9364 { 9365 size_t word = r_ixr_cmd_word.read(); 9366 p_vci_ixr.cmd = vci_param_ext::CMD_WRITE; 9367 p_vci_ixr.wdata = ((wide_data_t)(r_ixr_cmd_wdata[word].read())) | 9368 ((wide_data_t)(r_ixr_cmd_wdata[word+1].read()) << 32); 9369 p_vci_ixr.eop = (word == (m_words-2)); 9370 } 9371 } 9372 else if (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_DATA_SEND) 9373 { 9374 p_vci_ixr.cmd = vci_param_ext::CMD_WRITE; 9375 p_vci_ixr.cmdval = true; 9376 /*p_vci_ixr.address = (addr_t)((r_cleanup_to_ixr_cmd_nline.read() * m_words + 9377 r_ixr_cmd_word.read()) * 4);*/ 9378 p_vci_ixr.address = (addr_t)r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2); 9379 p_vci_ixr.wdata = ((wide_data_t)(r_ixr_cmd_wdata[r_ixr_cmd_word.read()].read()) | 9380 ((wide_data_t)(r_ixr_cmd_wdata[r_ixr_cmd_word.read() + 1].read()) << 32)); 9381 p_vci_ixr.trdid = r_cleanup_to_ixr_cmd_index.read(); 9382 p_vci_ixr.eop = (r_ixr_cmd_word == (m_words - 2)); 9383 } 9384 9385 else 9386 { 9387 p_vci_ixr.cmdval = false; 9388 } 9389 9390 //////////////////////////////////////////////////// 9391 // Response signals on the p_vci_ixr port 9392 //////////////////////////////////////////////////// 9393 9394 if( (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ) or 9395 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) ) 9396 { 9397 p_vci_ixr.rspack = (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP); 9398 } 9399 else if (r_ixr_rsp_fsm.read() == IXR_RSP_ACK) 9400 { 9401 p_vci_ixr.rspack = true; 9402 } 9403 else // r_ixr_rsp_fsm == IXR_RSP_IDLE 9404 { 9405 p_vci_ixr.rspack = false; 9406 } 9407 9408 //////////////////////////////////////////////////// 9409 // Command signals on the p_vci_tgt port 9410 //////////////////////////////////////////////////// 9411 9412 switch((tgt_cmd_fsm_state_e) r_tgt_cmd_fsm.read()) 9413 { 9414 case TGT_CMD_IDLE: 9415 p_vci_tgt.cmdack = false; 9416 break; 9417 9418 case TGT_CMD_CONFIG: 9419 case TGT_CMD_ERROR: 9420 p_vci_tgt.cmdack = not r_tgt_cmd_to_tgt_rsp_req.read(); 9421 break; 9422 9423 case TGT_CMD_READ: 9424 p_vci_tgt.cmdack = m_cmd_read_addr_fifo.wok(); 9425 break; 9426 9427 case TGT_CMD_WRITE: 9428 p_vci_tgt.cmdack = m_cmd_write_addr_fifo.wok(); 9429 break; 9430 9431 case TGT_CMD_CAS: 9432 p_vci_tgt.cmdack = m_cmd_cas_addr_fifo.wok(); 9433 break; 9434 } 9435 9436 //////////////////////////////////////////////////// 9437 // Response signals on the p_vci_tgt port 9438 //////////////////////////////////////////////////// 9439 9440 switch(r_tgt_rsp_fsm.read()) 9441 { 9442 case TGT_RSP_CONFIG_IDLE: 9443 case TGT_RSP_TGT_CMD_IDLE: 9444 case TGT_RSP_READ_IDLE: 9445 case TGT_RSP_WRITE_IDLE: 9446 case TGT_RSP_CAS_IDLE: 9447 case TGT_RSP_XRAM_IDLE: 9448 case TGT_RSP_MULTI_ACK_IDLE: 9449 case TGT_RSP_CLEANUP_IDLE: 9450 { 9451 p_vci_tgt.rspval = false; 9452 p_vci_tgt.rsrcid = 0; 9453 p_vci_tgt.rdata = 0; 9454 p_vci_tgt.rpktid = 0; 9455 p_vci_tgt.rtrdid = 0; 9456 p_vci_tgt.rerror = 0; 9457 p_vci_tgt.reop = false; 9458 break; 9459 } 9460 case TGT_RSP_CONFIG: 9461 { 9462 p_vci_tgt.rspval = true; 9463 p_vci_tgt.rdata = 0; 9464 p_vci_tgt.rsrcid = r_config_to_tgt_rsp_srcid.read(); 9465 p_vci_tgt.rtrdid = r_config_to_tgt_rsp_trdid.read(); 9466 p_vci_tgt.rpktid = r_config_to_tgt_rsp_pktid.read(); 9467 p_vci_tgt.rerror = r_config_to_tgt_rsp_error.read(); 9468 p_vci_tgt.reop = true; 9469 9470 break; 9471 } 9472 9473 case TGT_RSP_TGT_CMD: 9474 { 9475 p_vci_tgt.rspval = true; 9476 p_vci_tgt.rdata = r_tgt_cmd_to_tgt_rsp_rdata.read(); 9477 p_vci_tgt.rsrcid = r_tgt_cmd_to_tgt_rsp_srcid.read(); 9478 p_vci_tgt.rtrdid = r_tgt_cmd_to_tgt_rsp_trdid.read(); 9479 p_vci_tgt.rpktid = r_tgt_cmd_to_tgt_rsp_pktid.read(); 9480 p_vci_tgt.rerror = r_tgt_cmd_to_tgt_rsp_error.read(); 9481 p_vci_tgt.reop = true; 9482 9483 break; 9484 } 9485 9486 case TGT_RSP_READ: 9487 { 9488 uint32_t last_word_idx = r_read_to_tgt_rsp_word.read() + r_read_to_tgt_rsp_length - 1; 9489 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 9490 bool is_ll = ((r_read_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 9491 9492 p_vci_tgt.rspval = true; 9493 9494 if ( is_ll and not r_tgt_rsp_key_sent.read() ) 9495 { 9496 // LL response first flit 9497 p_vci_tgt.rdata = r_read_to_tgt_rsp_ll_key.read(); 9498 } 9499 else 9500 { 9501 // LL response second flit or READ response 9502 p_vci_tgt.rdata = r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 9503 } 9504 9505 p_vci_tgt.rsrcid = r_read_to_tgt_rsp_srcid.read(); 9506 p_vci_tgt.rtrdid = r_read_to_tgt_rsp_trdid.read(); 9507 p_vci_tgt.rpktid = r_read_to_tgt_rsp_pktid.read(); 9508 p_vci_tgt.rerror = 0; 9509 p_vci_tgt.reop = (is_last_word and not is_ll) or (r_tgt_rsp_key_sent.read() and is_ll); 9510 break; 9511 } 9512 9513 case TGT_RSP_WRITE: 9514 p_vci_tgt.rspval = true; 9515 if(((r_write_to_tgt_rsp_pktid.read() & 0x7) == TYPE_SC) and r_write_to_tgt_rsp_sc_fail.read()) 9516 p_vci_tgt.rdata = 1; 9517 else 9518 p_vci_tgt.rdata = 0; 9519 p_vci_tgt.rsrcid = r_write_to_tgt_rsp_srcid.read(); 9520 p_vci_tgt.rtrdid = r_write_to_tgt_rsp_trdid.read(); 9521 p_vci_tgt.rpktid = r_write_to_tgt_rsp_pktid.read(); 9522 p_vci_tgt.rerror = 0; 9523 p_vci_tgt.reop = true; 9524 break; 9525 9526 case TGT_RSP_CLEANUP: 9527 { 9528 uint32_t last_word_idx = r_cleanup_to_tgt_rsp_first_word.read() + r_cleanup_to_tgt_rsp_length - 1; 9529 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 9530 bool is_ll = ((r_cleanup_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 9531 9532 p_vci_tgt.rspval = true; 9533 if (is_ll and not r_tgt_rsp_key_sent.read()) 9534 { 9535 p_vci_tgt.rdata = r_cleanup_to_tgt_rsp_ll_key.read(); 9536 } 9537 else if (!r_cleanup_to_tgt_rsp_type.read()) 9538 { 9539 p_vci_tgt.rdata = r_cleanup_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 9540 } 9541 else //if the CLEANUP fsm sends a SC_RSP, then it is a success (and it caused an inval) 9542 { 9543 p_vci_tgt.rdata = 0; 9544 } 9545 p_vci_tgt.rsrcid = r_cleanup_to_tgt_rsp_srcid.read(); 9546 p_vci_tgt.rtrdid = r_cleanup_to_tgt_rsp_trdid.read(); 9547 p_vci_tgt.rpktid = r_cleanup_to_tgt_rsp_pktid.read(); 9548 p_vci_tgt.rerror = 0; // Can be a CAS rsp 9549 p_vci_tgt.reop = r_cleanup_to_tgt_rsp_type.read() or (is_last_word and not is_ll) or (r_tgt_rsp_key_sent.read() and is_ll); 9550 break; 9551 } 9552 9553 case TGT_RSP_CAS: 9554 p_vci_tgt.rspval = true; 9555 p_vci_tgt.rdata = r_cas_to_tgt_rsp_data.read(); 9556 p_vci_tgt.rsrcid = r_cas_to_tgt_rsp_srcid.read(); 9557 p_vci_tgt.rtrdid = r_cas_to_tgt_rsp_trdid.read(); 9558 p_vci_tgt.rpktid = r_cas_to_tgt_rsp_pktid.read(); 9559 p_vci_tgt.rerror = 0; 9560 p_vci_tgt.reop = true; 9561 break; 9562 9563 case TGT_RSP_XRAM: 9564 { 9565 uint32_t last_word_idx = r_xram_rsp_to_tgt_rsp_word.read() + r_xram_rsp_to_tgt_rsp_length.read() - 1; 9566 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 9567 bool is_ll = ((r_xram_rsp_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 9568 bool is_error = r_xram_rsp_to_tgt_rsp_rerror.read(); 9569 9570 p_vci_tgt.rspval = true; 9571 9572 if( is_ll and not r_tgt_rsp_key_sent.read() ) 9573 { 9574 // LL response first flit 9575 p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_ll_key.read(); 9576 } 9577 else 9578 { 9579 // LL response second flit or READ response 9580 p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 9581 } 9582 9583 p_vci_tgt.rsrcid = r_xram_rsp_to_tgt_rsp_srcid.read(); 9584 p_vci_tgt.rtrdid = r_xram_rsp_to_tgt_rsp_trdid.read(); 9585 p_vci_tgt.rpktid = r_xram_rsp_to_tgt_rsp_pktid.read(); 9586 p_vci_tgt.rerror = is_error; 9587 p_vci_tgt.reop = (((is_last_word or is_error) and not is_ll) or 9588 (r_tgt_rsp_key_sent.read() and is_ll)); 9589 9590 break; 9591 } 9592 9593 case TGT_RSP_MULTI_ACK: 9594 p_vci_tgt.rspval = true; 9595 p_vci_tgt.rdata = 0; // Can be a CAS or SC rsp 9596 p_vci_tgt.rsrcid = r_multi_ack_to_tgt_rsp_srcid.read(); 9597 p_vci_tgt.rtrdid = r_multi_ack_to_tgt_rsp_trdid.read(); 9598 p_vci_tgt.rpktid = r_multi_ack_to_tgt_rsp_pktid.read(); 9599 p_vci_tgt.rerror = 0; 9600 p_vci_tgt.reop = true; 9601 break; 9602 } // end switch r_tgt_rsp_fsm 9603 9604 //////////////////////////////////////////////////////////////////// 9605 // p_dspin_m2p port (CC_SEND FSM) 9606 //////////////////////////////////////////////////////////////////// 9607 9608 p_dspin_m2p.write = false; 9609 p_dspin_m2p.eop = false; 9610 p_dspin_m2p.data = 0; 9611 9612 switch(r_cc_send_fsm.read()) 9613 { 9614 /////////////////////////// 9615 case CC_SEND_CONFIG_IDLE: 9616 case CC_SEND_XRAM_RSP_IDLE: 9617 case CC_SEND_WRITE_IDLE: 9618 case CC_SEND_CAS_IDLE: 9619 { 9620 break; 9621 } 9622 //////////////////////////////// 9623 case CC_SEND_CONFIG_INVAL_HEADER: 9624 { 9625 uint8_t multi_inval_type; 9626 if(m_config_to_cc_send_inst_fifo.read()) 9627 { 9628 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_INST; 9629 } 9630 else 9631 { 9632 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_DATA; 9633 } 9634 9635 uint64_t flit = 0; 9636 uint64_t dest = m_config_to_cc_send_srcid_fifo.read() << 9637 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 9638 9639 DspinDhccpParam::dspin_set( flit, 9640 dest, 9641 DspinDhccpParam::MULTI_INVAL_DEST); 9642 9643 DspinDhccpParam::dspin_set( flit, 9644 m_cc_global_id, 9645 DspinDhccpParam::MULTI_INVAL_SRCID); 9646 9647 DspinDhccpParam::dspin_set( flit, 9648 r_config_to_cc_send_trdid.read(), 9649 DspinDhccpParam::MULTI_INVAL_UPDT_INDEX); 9650 9651 DspinDhccpParam::dspin_set( flit, 9652 multi_inval_type, 9653 DspinDhccpParam::M2P_TYPE); 9654 p_dspin_m2p.write = true; 9655 p_dspin_m2p.data = flit; 9656 break; 9657 } 9658 //////////////////////////////// 9659 case CC_SEND_CONFIG_INVAL_NLINE: 9660 { 9661 uint64_t flit = 0; 9662 DspinDhccpParam::dspin_set( flit, 9663 r_config_to_cc_send_nline.read(), 9664 DspinDhccpParam::MULTI_INVAL_NLINE); 9665 p_dspin_m2p.eop = true; 9666 p_dspin_m2p.write = true; 9667 p_dspin_m2p.data = flit; 9668 break; 9669 } 9670 /////////////////////////////////// 9671 case CC_SEND_XRAM_RSP_INVAL_HEADER: 9672 { 9673 if(not m_xram_rsp_to_cc_send_inst_fifo.rok()) break; 9674 9675 uint8_t multi_inval_type; 9676 if(m_xram_rsp_to_cc_send_inst_fifo.read()) 9677 { 9678 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_INST; 9679 } 9680 else 9681 { 9682 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_DATA; 9683 } 9684 9685 uint64_t flit = 0; 9686 uint64_t dest = m_xram_rsp_to_cc_send_srcid_fifo.read() << 9687 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 9688 9689 DspinDhccpParam::dspin_set( flit, 9690 dest, 9691 DspinDhccpParam::MULTI_INVAL_DEST); 9692 9693 DspinDhccpParam::dspin_set( flit, 9694 m_cc_global_id, 9695 DspinDhccpParam::MULTI_INVAL_SRCID); 9696 9697 DspinDhccpParam::dspin_set( flit, 9698 r_xram_rsp_to_cc_send_trdid.read(), 9699 DspinDhccpParam::MULTI_INVAL_UPDT_INDEX); 9700 9701 DspinDhccpParam::dspin_set( flit, 9702 multi_inval_type, 9703 DspinDhccpParam::M2P_TYPE); 9704 p_dspin_m2p.write = true; 9705 p_dspin_m2p.data = flit; 9706 break; 9707 } 9708 9709 ////////////////////////////////// 9710 case CC_SEND_XRAM_RSP_INVAL_NLINE: 9711 { 9712 uint64_t flit = 0; 9713 9714 DspinDhccpParam::dspin_set( flit, 9715 r_xram_rsp_to_cc_send_nline.read(), 9716 DspinDhccpParam::MULTI_INVAL_NLINE); 9717 p_dspin_m2p.eop = true; 9718 p_dspin_m2p.write = true; 9719 p_dspin_m2p.data = flit; 9720 break; 9721 } 9722 9723 ///////////////////////////////////// 9724 case CC_SEND_CONFIG_BRDCAST_HEADER: 9725 case CC_SEND_XRAM_RSP_BRDCAST_HEADER: 9726 case CC_SEND_WRITE_BRDCAST_HEADER: 9727 case CC_SEND_CAS_BRDCAST_HEADER: 9728 { 9729 uint64_t flit = 0; 9730 9731 DspinDhccpParam::dspin_set( flit, 9732 m_broadcast_boundaries, 9733 DspinDhccpParam::BROADCAST_BOX); 9734 9735 DspinDhccpParam::dspin_set( flit, 9736 m_cc_global_id, 9737 DspinDhccpParam::BROADCAST_SRCID); 9738 9739 DspinDhccpParam::dspin_set( flit, 9740 1ULL, 9741 DspinDhccpParam::M2P_BC); 9742 p_dspin_m2p.write = true; 9743 p_dspin_m2p.data = flit; 9744 break; 9745 } 9746 //////////////////////////////////// 9747 case CC_SEND_XRAM_RSP_BRDCAST_NLINE: 9748 { 9749 uint64_t flit = 0; 9750 DspinDhccpParam::dspin_set( flit, 9751 r_xram_rsp_to_cc_send_nline.read(), 9752 DspinDhccpParam::BROADCAST_NLINE); 9753 p_dspin_m2p.write = true; 9754 p_dspin_m2p.eop = true; 9755 p_dspin_m2p.data = flit; 9756 break; 9757 } 9758 ////////////////////////////////// 9759 case CC_SEND_CONFIG_BRDCAST_NLINE: 9760 { 9761 uint64_t flit = 0; 9762 DspinDhccpParam::dspin_set( flit, 9763 r_config_to_cc_send_nline.read(), 9764 DspinDhccpParam::BROADCAST_NLINE); 9765 p_dspin_m2p.write = true; 9766 p_dspin_m2p.eop = true; 9767 p_dspin_m2p.data = flit; 9768 break; 9769 } 9770 ///////////////////////////////// 9771 9772 case CC_SEND_READ_NCC_INVAL_HEADER: 9773 { 9774 uint64_t flit = 0; 9775 9776 uint8_t multi_inval_type; 9777 if (r_read_to_cc_send_inst.read()) 9778 { 9779 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_INST; 9780 } 9781 else 9782 { 9783 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_DATA; 9784 } 9785 9786 DspinDhccpParam::dspin_set( 9787 flit, 9788 r_read_to_cc_send_dest.read(), 9789 DspinDhccpParam::MULTI_INVAL_DEST); 9790 9791 DspinDhccpParam::dspin_set( 9792 flit, 9793 m_cc_global_id, 9794 DspinDhccpParam::MULTI_INVAL_SRCID); 9795 9796 DspinDhccpParam::dspin_set( 9797 flit, 9798 DspinDhccpParam::TYPE_MULTI_INVAL_DATA, 9799 DspinDhccpParam::M2P_TYPE); 9800 9801 p_dspin_m2p.write = true; 9802 p_dspin_m2p.data = flit; 9803 9804 break; 9805 9806 } 9807 9808 9809 case CC_SEND_READ_NCC_INVAL_NLINE: 9810 { 9811 uint64_t flit = 0; 9812 9813 DspinDhccpParam::dspin_set( 9814 flit, 9815 r_read_to_cc_send_nline.read(), 9816 DspinDhccpParam::MULTI_INVAL_NLINE); 9817 9818 9819 p_dspin_m2p.write = true; 9820 p_dspin_m2p.data = flit; 9821 p_dspin_m2p.eop = true; 9822 9823 break; 9824 9825 } 9826 9827 case CC_SEND_WRITE_NCC_INVAL_HEADER: 9828 { 9829 uint64_t flit = 0; 9830 9831 DspinDhccpParam::dspin_set( 9832 flit, 9833 r_write_to_cc_send_dest.read(), 9834 DspinDhccpParam::MULTI_INVAL_DEST); 9835 9836 DspinDhccpParam::dspin_set( 9837 flit, 9838 m_cc_global_id, 9839 DspinDhccpParam::MULTI_INVAL_SRCID); 9840 9841 DspinDhccpParam::dspin_set( 9842 flit, 9843 DspinDhccpParam::TYPE_MULTI_INVAL_DATA, 9844 DspinDhccpParam::M2P_TYPE); 9845 9846 p_dspin_m2p.write = true; 9847 p_dspin_m2p.data = flit; 9848 9849 break; 9850 9851 } 9852 9853 case CC_SEND_WRITE_NCC_INVAL_NLINE: 9854 { 9855 uint64_t flit = 0; 9856 9857 DspinDhccpParam::dspin_set( 9858 flit, 9859 r_write_to_cc_send_nline.read(), 9860 DspinDhccpParam::MULTI_INVAL_NLINE); 9861 9862 9863 p_dspin_m2p.write = true; 9864 p_dspin_m2p.data = flit; 9865 p_dspin_m2p.eop = true; 9866 9867 break; 9868 9869 } 9870 9871 9872 case CC_SEND_WRITE_BRDCAST_NLINE: 9873 { 9874 uint64_t flit = 0; 9875 DspinDhccpParam::dspin_set( flit, 9876 r_write_to_cc_send_nline.read(), 9877 DspinDhccpParam::BROADCAST_NLINE); 9878 p_dspin_m2p.write = true; 9879 p_dspin_m2p.eop = true; 9880 p_dspin_m2p.data = flit; 9881 break; 9882 } 9883 /////////////////////////////// 9884 case CC_SEND_CAS_BRDCAST_NLINE: 9885 { 9886 uint64_t flit = 0; 9887 DspinDhccpParam::dspin_set( flit, 9888 r_cas_to_cc_send_nline.read(), 9889 DspinDhccpParam::BROADCAST_NLINE); 9890 p_dspin_m2p.write = true; 9891 p_dspin_m2p.eop = true; 9892 p_dspin_m2p.data = flit; 9893 break; 9894 } 9895 /////////////////////////////// 9896 case CC_SEND_WRITE_UPDT_HEADER: 9897 { 9898 if(not m_write_to_cc_send_inst_fifo.rok()) break; 9899 9900 uint8_t multi_updt_type; 9901 if(m_write_to_cc_send_inst_fifo.read()) 9902 { 9903 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_INST; 9904 } 9905 else 9906 { 9907 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_DATA; 9908 } 9909 9910 uint64_t flit = 0; 9911 uint64_t dest = 9912 m_write_to_cc_send_srcid_fifo.read() << 9913 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 9914 9915 DspinDhccpParam::dspin_set( 9916 flit, 9917 dest, 9918 DspinDhccpParam::MULTI_UPDT_DEST); 9919 9920 DspinDhccpParam::dspin_set( 9921 flit, 9922 m_cc_global_id, 9923 DspinDhccpParam::MULTI_UPDT_SRCID); 9924 9925 DspinDhccpParam::dspin_set( 9926 flit, 9927 r_write_to_cc_send_trdid.read(), 9928 DspinDhccpParam::MULTI_UPDT_UPDT_INDEX); 9929 9930 DspinDhccpParam::dspin_set( 9931 flit, 9932 multi_updt_type, 9933 DspinDhccpParam::M2P_TYPE); 9934 9935 p_dspin_m2p.write = true; 9936 p_dspin_m2p.data = flit; 9937 9938 break; 9939 } 9940 ////////////////////////////// 9941 case CC_SEND_WRITE_UPDT_NLINE: 9942 { 9943 uint64_t flit = 0; 9944 9945 DspinDhccpParam::dspin_set( 9946 flit, 9947 r_write_to_cc_send_index.read(), 9948 DspinDhccpParam::MULTI_UPDT_WORD_INDEX); 9949 9950 DspinDhccpParam::dspin_set( 9951 flit, 9952 r_write_to_cc_send_nline.read(), 9953 DspinDhccpParam::MULTI_UPDT_NLINE); 9954 9955 p_dspin_m2p.write = true; 9956 p_dspin_m2p.data = flit; 9957 9958 break; 9959 } 9960 ///////////////////////////// 9961 case CC_SEND_WRITE_UPDT_DATA: 9962 { 9963 9964 uint8_t multi_updt_cpt = 9965 r_cc_send_cpt.read() + r_write_to_cc_send_index.read(); 9966 9967 uint8_t multi_updt_be = r_write_to_cc_send_be[multi_updt_cpt].read(); 9968 uint32_t multi_updt_data = r_write_to_cc_send_data[multi_updt_cpt].read(); 9969 9970 uint64_t flit = 0; 9971 9972 DspinDhccpParam::dspin_set( 9973 flit, 9974 multi_updt_be, 9975 DspinDhccpParam::MULTI_UPDT_BE); 9976 9977 DspinDhccpParam::dspin_set( 9978 flit, 9979 multi_updt_data, 9980 DspinDhccpParam::MULTI_UPDT_DATA); 9981 9982 p_dspin_m2p.write = true; 9983 p_dspin_m2p.eop = (r_cc_send_cpt.read() == r_write_to_cc_send_count.read()); 9984 p_dspin_m2p.data = flit; 9985 9986 break; 9987 } 9988 //////////////////////////// 9989 case CC_SEND_CAS_UPDT_HEADER: 9990 { 9991 if (not m_cas_to_cc_send_inst_fifo.rok()) break; 9992 9993 uint8_t multi_updt_type; 9994 if(m_cas_to_cc_send_inst_fifo.read()) 9995 { 9996 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_INST; 9997 } 9998 else 9999 { 10000 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_DATA; 10001 } 10002 10003 uint64_t flit = 0; 10004 uint64_t dest = 10005 m_cas_to_cc_send_srcid_fifo.read() << 10006 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 10007 10008 DspinDhccpParam::dspin_set( 10009 flit, 10010 dest, 10011 DspinDhccpParam::MULTI_UPDT_DEST); 10012 10013 DspinDhccpParam::dspin_set( 10014 flit, 10015 m_cc_global_id, 10016 DspinDhccpParam::MULTI_UPDT_SRCID); 10017 10018 DspinDhccpParam::dspin_set( 10019 flit, 10020 r_cas_to_cc_send_trdid.read(), 10021 DspinDhccpParam::MULTI_UPDT_UPDT_INDEX); 10022 10023 DspinDhccpParam::dspin_set( 10024 flit, 10025 multi_updt_type, 10026 DspinDhccpParam::M2P_TYPE); 10027 10028 p_dspin_m2p.write = true; 10029 p_dspin_m2p.data = flit; 10030 10031 break; 10032 } 10033 //////////////////////////// 10034 case CC_SEND_CAS_UPDT_NLINE: 10035 { 10036 uint64_t flit = 0; 10037 10038 DspinDhccpParam::dspin_set( 10039 flit, 10040 r_cas_to_cc_send_index.read(), 10041 DspinDhccpParam::MULTI_UPDT_WORD_INDEX); 10042 10043 DspinDhccpParam::dspin_set( 10044 flit, 10045 r_cas_to_cc_send_nline.read(), 10046 DspinDhccpParam::MULTI_UPDT_NLINE); 10047 10048 p_dspin_m2p.write = true; 10049 p_dspin_m2p.data = flit; 10050 10051 break; 10052 } 10053 /////////////////////////// 10054 case CC_SEND_CAS_UPDT_DATA: 10055 { 10056 uint64_t flit = 0; 10057 10058 DspinDhccpParam::dspin_set( 10059 flit, 10060 0xF, 10061 DspinDhccpParam::MULTI_UPDT_BE); 10062 10063 DspinDhccpParam::dspin_set( 10064 flit, 10065 r_cas_to_cc_send_wdata.read(), 10066 DspinDhccpParam::MULTI_UPDT_DATA); 10067 10068 p_dspin_m2p.write = true; 10069 p_dspin_m2p.eop = not r_cas_to_cc_send_is_long.read(); 10070 p_dspin_m2p.data = flit; 10071 10072 break; 10073 } 10074 //////////////////////////////// 10075 case CC_SEND_CAS_UPDT_DATA_HIGH: 10076 { 10077 uint64_t flit = 0; 10078 10079 DspinDhccpParam::dspin_set( 10080 flit, 10081 0xF, 10082 DspinDhccpParam::MULTI_UPDT_BE); 10083 10084 DspinDhccpParam::dspin_set( 10085 flit, 10086 r_cas_to_cc_send_wdata_high.read(), 10087 DspinDhccpParam::MULTI_UPDT_DATA); 10088 10089 p_dspin_m2p.write = true; 10090 p_dspin_m2p.eop = true; 10091 p_dspin_m2p.data = flit; 10092 10093 break; 10094 } 10095 } 10096 10097 //////////////////////////////////////////////////////////////////// 10098 // p_dspin_clack port (CLEANUP FSM) 10099 //////////////////////////////////////////////////////////////////// 10100 10101 if ( r_cleanup_fsm.read() == CLEANUP_SEND_CLACK ) 10102 { 10103 uint8_t cleanup_ack_type; 10104 if(r_cleanup_inst.read()) 10105 { 10106 cleanup_ack_type = DspinDhccpParam::TYPE_CLACK_INST; 10107 } 10108 else 10109 { 10110 cleanup_ack_type = DspinDhccpParam::TYPE_CLACK_DATA; 10111 } 10112 10113 uint64_t flit = 0; 10114 uint64_t dest = r_cleanup_srcid.read() << 10115 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 10116 10117 DspinDhccpParam::dspin_set( 10118 flit, 10119 dest, 10120 DspinDhccpParam::CLACK_DEST); 10121 10122 DspinDhccpParam::dspin_set( 10123 flit, 10124 r_cleanup_nline.read() & 0xFFFF, 10125 DspinDhccpParam::CLACK_SET); 10126 10127 DspinDhccpParam::dspin_set( 10128 flit, 10129 r_cleanup_way_index.read(), 10130 DspinDhccpParam::CLACK_WAY); 10131 10132 DspinDhccpParam::dspin_set( 10133 flit, 10134 cleanup_ack_type, 10135 DspinDhccpParam::CLACK_TYPE); 10136 10137 p_dspin_clack.eop = true; 10138 p_dspin_clack.write = true; 10139 p_dspin_clack.data = flit; 1360 10140 } 1361 10141 else 1362 10142 { 1363 r_tgt_cmd_fsm = TGT_CMD_IDLE; 10143 p_dspin_clack.write = false; 10144 p_dspin_clack.eop = false; 10145 p_dspin_clack.data = 0; 1364 10146 } 1365 1366 #if DEBUG_MEMC_TGT_CMD 1367 if(m_debug) 1368 std::cout << " <MEMC " << name() << " TGT_CMD_CONFIG> Configuration request:" 1369 << " address = " << std::hex << p_vci_tgt.address.read() 1370 << " / wdata = " << p_vci_tgt.wdata.read() 1371 << " / need_rsp = " << need_rsp 1372 << " / error = " << error << std::endl; 1373 #endif 1374 break; 1375 } 1376 ////////////////// 1377 case TGT_CMD_READ: // Push a read request into read fifo 1378 1379 // check that the read does not cross a cache line limit. 1380 if ( ((m_x[(addr_t) p_vci_tgt.address.read()]+ (p_vci_tgt.plen.read() >>2)) > 16) and 1381 (p_vci_tgt.cmd.read() != vci_param_int::CMD_LOCKED_READ)) 1382 { 1383 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 1384 << " illegal address/plen for VCI read command" << std::endl; 1385 exit(0); 1386 } 1387 // check single flit 1388 if(!p_vci_tgt.eop.read()) 1389 { 1390 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 1391 << " read command packet must contain one single flit" << std::endl; 1392 exit(0); 1393 } 1394 // check plen for LL 1395 if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) and 1396 (p_vci_tgt.plen.read() != 8) ) 1397 { 1398 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 1399 << " ll command packets must have a plen of 8" << std::endl; 1400 exit(0); 1401 } 1402 1403 if ( p_vci_tgt.cmdval and m_cmd_read_addr_fifo.wok() ) 1404 { 1405 1406 #if DEBUG_MEMC_TGT_CMD 1407 if(m_debug) 1408 std::cout << " <MEMC " << name() << " TGT_CMD_READ> Push into read_fifo:" 1409 << " address = " << std::hex << p_vci_tgt.address.read() 1410 << " / srcid = " << p_vci_tgt.srcid.read() 1411 << " / trdid = " << p_vci_tgt.trdid.read() 1412 << " / pktid = " << p_vci_tgt.pktid.read() 1413 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1414 #endif 1415 cmd_read_fifo_put = true; 1416 if(p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) m_cpt_ll++; 1417 else m_cpt_read++; 1418 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1419 } 1420 break; 1421 1422 /////////////////// 1423 case TGT_CMD_WRITE: 1424 if(p_vci_tgt.cmdval and m_cmd_write_addr_fifo.wok()) 1425 { 1426 1427 #if DEBUG_MEMC_TGT_CMD 1428 if(m_debug) 1429 std::cout << " <MEMC " << name() << " TGT_CMD_WRITE> Push into write_fifo:" 1430 << " address = " << std::hex << p_vci_tgt.address.read() 1431 << " / srcid = " << p_vci_tgt.srcid.read() 1432 << " / trdid = " << p_vci_tgt.trdid.read() 1433 << " / pktid = " << p_vci_tgt.pktid.read() 1434 << " / wdata = " << p_vci_tgt.wdata.read() 1435 << " / be = " << p_vci_tgt.be.read() 1436 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1437 #endif 1438 cmd_write_fifo_put = true; 1439 if(p_vci_tgt.eop) r_tgt_cmd_fsm = TGT_CMD_IDLE; 1440 } 1441 break; 1442 1443 ///////////////// 1444 case TGT_CMD_CAS: 1445 if((p_vci_tgt.plen.read() != 8) and (p_vci_tgt.plen.read() != 16)) 1446 { 1447 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_CAS state" 1448 << "illegal format for CAS command " << std::endl; 1449 exit(0); 1450 } 1451 1452 if(p_vci_tgt.cmdval and m_cmd_cas_addr_fifo.wok()) 1453 { 1454 1455 #if DEBUG_MEMC_TGT_CMD 1456 if(m_debug) 1457 std::cout << " <MEMC " << name() << " TGT_CMD_CAS> Pushing command into cmd_cas_fifo:" 1458 << " address = " << std::hex << p_vci_tgt.address.read() 1459 << " srcid = " << p_vci_tgt.srcid.read() 1460 << " trdid = " << p_vci_tgt.trdid.read() 1461 << " pktid = " << p_vci_tgt.pktid.read() 1462 << " wdata = " << p_vci_tgt.wdata.read() 1463 << " be = " << p_vci_tgt.be.read() 1464 << " plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1465 #endif 1466 cmd_cas_fifo_put = true; 1467 if(p_vci_tgt.eop) r_tgt_cmd_fsm = TGT_CMD_IDLE; 1468 } 1469 break; 1470 } // end switch tgt_cmd_fsm 1471 1472 ///////////////////////////////////////////////////////////////////////// 1473 // MULTI_ACK FSM 1474 ///////////////////////////////////////////////////////////////////////// 1475 // This FSM controls the response to the multicast update requests sent 1476 // by the memory cache to the L1 caches and update the UPT. 1477 // 1478 // - The FSM decrements the proper entry in UPT, 1479 // and clear the UPT entry when all responses have been received. 1480 // - If required, it sends a request to the TGT_RSP FSM to complete 1481 // a pending write transaction. 1482 // 1483 // All those multi-ack packets are one flit packet. 1484 // The index in the UPT is defined in the TRDID field. 1485 //////////////////////////////////////////////////////////////////////// 1486 1487 switch(r_multi_ack_fsm.read()) 1488 { 1489 //////////////////// 1490 case MULTI_ACK_IDLE: 1491 { 1492 bool multi_ack_fifo_rok = m_cc_receive_to_multi_ack_fifo.rok(); 1493 1494 // No CC_RECEIVE FSM request and no WRITE FSM request 1495 if( not multi_ack_fifo_rok and not r_write_to_multi_ack_req.read()) 1496 break; 1497 1498 uint8_t updt_index; 1499 1500 // handling WRITE FSM request to decrement update table response 1501 // counter if no CC_RECEIVE FSM request 1502 if(not multi_ack_fifo_rok) 10147 /////////////////////////////////////////////////////////////////// 10148 // p_dspin_p2m port (CC_RECEIVE FSM) 10149 /////////////////////////////////////////////////////////////////// 10150 // 10151 switch(r_cc_receive_fsm.read()) 1503 10152 { 1504 updt_index = r_write_to_multi_ack_upt_index.read(); 1505 r_write_to_multi_ack_req = false; 10153 case CC_RECEIVE_IDLE: 10154 { 10155 p_dspin_p2m.read = false; 10156 break; 10157 } 10158 case CC_RECEIVE_CLEANUP: 10159 case CC_RECEIVE_CLEANUP_EOP: 10160 { 10161 p_dspin_p2m.read = m_cc_receive_to_cleanup_fifo.wok(); 10162 break; 10163 } 10164 case CC_RECEIVE_MULTI_ACK: 10165 { 10166 p_dspin_p2m.read = m_cc_receive_to_multi_ack_fifo.wok(); 10167 break; 10168 } 1506 10169 } 1507 // Handling CC_RECEIVE FSM request 1508 else 1509 { 1510 uint64_t flit = m_cc_receive_to_multi_ack_fifo.read(); 1511 updt_index = DspinDhccpParam::dspin_get(flit, 1512 DspinDhccpParam::MULTI_ACK_UPDT_INDEX); 1513 1514 cc_receive_to_multi_ack_fifo_get = true; 1515 } 1516 1517 assert((updt_index < m_upt.size()) and 1518 "VCI_MEM_CACHE ERROR in MULTI_ACK_IDLE : " 1519 "index too large for UPT"); 1520 1521 r_multi_ack_upt_index = updt_index; 1522 r_multi_ack_fsm = MULTI_ACK_UPT_LOCK; 1523 1524 #if DEBUG_MEMC_MULTI_ACK 1525 if(m_debug) 1526 { 1527 if (multi_ack_fifo_rok) 1528 { 1529 std::cout << " <MEMC " << name() 1530 << " MULTI_ACK_IDLE> Response for UPT entry " 1531 << (size_t)updt_index << std::endl; 1532 } 1533 else 1534 { 1535 std::cout << " <MEMC " << name() 1536 << " MULTI_ACK_IDLE> Write FSM request to decrement UPT entry " 1537 << updt_index << std::endl; 1538 } 1539 } 1540 #endif 1541 break; 1542 } 1543 1544 //////////////////////// 1545 case MULTI_ACK_UPT_LOCK: 1546 { 1547 m_cpt_multi_ack_fsm_upt_lock++; 1548 // get lock to the UPDATE table 1549 if(r_alloc_upt_fsm.read() != ALLOC_UPT_MULTI_ACK) break; 1550 1551 // decrement the number of expected responses 1552 size_t count = 0; 1553 bool valid = m_upt.decrement(r_multi_ack_upt_index.read(), count); 1554 1555 /*ODCCP*/ //m_upt.print(); 1556 1557 if(not valid) 1558 { 1559 std::cout << "VCI_MEM_CACHE ERROR " << name() 1560 << " MULTI_ACK_UPT_LOCK state" << std::endl 1561 << "unsuccessful access to decrement the UPT" << std::endl; 1562 exit(0); 1563 } 1564 1565 if(count == 0) 1566 { 1567 r_multi_ack_fsm = MULTI_ACK_UPT_CLEAR; 1568 } 1569 else 1570 { 1571 r_multi_ack_fsm = MULTI_ACK_IDLE; 1572 } 1573 1574 #if DEBUG_MEMC_MULTI_ACK 1575 if(m_debug) 1576 std::cout << " <MEMC " << name() 1577 << " MULTI_ACK_UPT_LOCK> Decrement the responses counter for UPT:" 1578 << " entry = " << r_multi_ack_upt_index.read() 1579 << " / rsp_count = " << std::dec << count << std::endl; 1580 m_cpt_multi_ack_fsm_n_upt_lock++; 1581 #endif 1582 break; 1583 } 1584 1585 ///////////////////////// 1586 case MULTI_ACK_UPT_CLEAR: // Clear UPT entry / Test if rsp or ack required 1587 { 1588 if(r_alloc_upt_fsm.read() != ALLOC_UPT_MULTI_ACK) 1589 { 1590 std::cout << "VCI_MEM_CACHE ERROR " << name() 1591 << " MULTI_ACK_UPT_CLEAR state" 1592 << " bad UPT allocation" << std::endl; 1593 exit(0); 1594 } 1595 1596 r_multi_ack_srcid = m_upt.srcid(r_multi_ack_upt_index.read()); 1597 r_multi_ack_trdid = m_upt.trdid(r_multi_ack_upt_index.read()); 1598 r_multi_ack_pktid = m_upt.pktid(r_multi_ack_upt_index.read()); 1599 r_multi_ack_nline = m_upt.nline(r_multi_ack_upt_index.read()); 1600 bool need_rsp = m_upt.need_rsp(r_multi_ack_upt_index.read()); 1601 1602 // clear the UPT entry 1603 m_upt.clear(r_multi_ack_upt_index.read()); 1604 1605 if ( need_rsp ) r_multi_ack_fsm = MULTI_ACK_WRITE_RSP; 1606 else r_multi_ack_fsm = MULTI_ACK_IDLE; 1607 1608 #if DEBUG_MEMC_MULTI_ACK 1609 if(m_debug) 1610 std::cout << " <MEMC " << name() 1611 << " MULTI_ACK_UPT_CLEAR> Clear UPT entry " 1612 << std::dec << r_multi_ack_upt_index.read() << std::endl; 1613 #endif 1614 break; 1615 } 1616 ///////////////////////// 1617 case MULTI_ACK_WRITE_RSP: // Post a response request to TGT_RSP FSM 1618 // Wait if pending request 1619 { 1620 if ( r_multi_ack_to_tgt_rsp_req.read() ) break; 1621 1622 r_multi_ack_to_tgt_rsp_req = true; 1623 r_multi_ack_to_tgt_rsp_srcid = r_multi_ack_srcid.read(); 1624 r_multi_ack_to_tgt_rsp_trdid = r_multi_ack_trdid.read(); 1625 r_multi_ack_to_tgt_rsp_pktid = r_multi_ack_pktid.read(); 1626 r_multi_ack_fsm = MULTI_ACK_IDLE; 1627 1628 #if DEBUG_MEMC_MULTI_ACK 1629 if(m_debug) 1630 std::cout << " <MEMC " << name() << " MULTI_ACK_WRITE_RSP>" 1631 << " Request TGT_RSP FSM to send a response to srcid " 1632 << std::hex << r_multi_ack_srcid.read() << std::endl; 1633 #endif 1634 break; 1635 } 1636 } // end switch r_multi_ack_fsm 1637 1638 //////////////////////////////////////////////////////////////////////////////////// 1639 // CONFIG FSM 1640 //////////////////////////////////////////////////////////////////////////////////// 1641 // The CONFIG FSM handles the VCI configuration requests (INVAL & SYNC). 1642 // The target buffer can have any size, and there is one single command for 1643 // all cache lines covered by the target buffer. 1644 // 1645 // An INVAL or SYNC configuration operation is defined by the following registers: 1646 // - bool r_config_cmd : INVAL / SYNC / NOP 1647 1648 // - uint64_t r_config_address : buffer base address 1649 // - uint32_t r_config_cmd_lines : number of lines to be handled 1650 // - uint32_t r_config_rsp_lines : number of lines not completed 1651 1652 // 1653 // For both INVAL and SYNC commands, the CONFIG FSM contains the loop handling 1654 // 1655 // all cache lines covered by the buffer. The various lines of a given buffer 1656 // can be pipelined: the CONFIG FSM does not wait the response for line (n) to send 1657 // the command for line (n+1). It decrements the r_config_cmd_lines counter until 1658 // the last request has been registered in TRT (for a SYNC), or in IVT (for an INVAL). 1659 // 1660 // - INVAL request: 1661 // For each line, it access to the DIR. 1662 // In case of miss, it does nothing, and a response is requested to TGT_RSP FSM. 1663 // In case of hit, with no copies in L1 caches, the line is invalidated and 1664 // a response is requested to TGT_RSP FSM. 1665 // If there is copies, a multi-inval, or a broadcast-inval coherence transaction 1666 // 1667 // is launched and registered in UPT. The multi-inval transaction completion 1668 // is signaled by the CLEANUP FSM by decrementing the r_config_rsp_lines counter. 1669 // The CONFIG INVAL response is sent only when the last line has been invalidated. 1670 // TODO : The target buffer address must be aligned on a cache line boundary. 1671 // This constraint can be released, but it requires to make 2 PUT transactions 1672 // for the first and the last line... 1673 // 1674 // - SYNC request: 1675 // For each line, it access to the DIR. 1676 // In case of miss, it does nothing, and a response is requested to TGT_RSP FSM. 1677 // In case of hit, a PUT transaction is registered in TRT and a request is sent 1678 // to IXR_CMD FSM. The IXR_RSP FSM decrements the r_config_rsp_lines counter 1679 // when a PUT response is received. 1680 // The CONFIG SYNC response is sent only when the last PUT response is received. 1681 // 1682 // From the software point of view, a configuration request is a sequence 1683 // of 6 atomic accesses in an uncached segment. A dedicated lock is used 1684 // to handle only one configuration command at a given time: 1685 // - Read MEMC_LOCK : Get the lock 1686 // - Write MEMC_ADDR_LO : Set the buffer address LSB 1687 // - Write MEMC_ADDR_HI : Set the buffer address MSB 1688 // - Write MEMC_BUF_LENGTH : set buffer length (bytes) 1689 // - Write MEMC_CMD_TYPE : launch the actual operation 1690 // - WRITE MEMC_LOCK : release the lock 1691 //////////////////////////////////////////////////////////////////////////////////// 1692 1693 switch( r_config_fsm.read() ) 1694 { 1695 ///////////////// 1696 case CONFIG_IDLE: // waiting a config request 1697 { 1698 if ( r_config_cmd.read() != MEMC_CMD_NOP ) 1699 { 1700 r_config_fsm = CONFIG_LOOP; 1701 1702 #if DEBUG_MEMC_CONFIG 1703 if(m_debug) 1704 std::cout << " <MEMC " << name() << " CONFIG_IDLE> Config Request received" 1705 << " address = " << std::hex << r_config_address.read() 1706 << " / nlines = " << std::dec << r_config_cmd_lines.read() 1707 << " / type = " << r_config_cmd.read() << std::endl; 1708 #endif 1709 } 1710 break; 1711 } 1712 ///////////////// 1713 case CONFIG_LOOP: // test last line to be handled 1714 { 1715 if ( r_config_cmd_lines.read() == 0 ) 1716 { 1717 r_config_cmd = MEMC_CMD_NOP; 1718 r_config_fsm = CONFIG_WAIT; 1719 } 1720 else 1721 { 1722 r_config_fsm = CONFIG_DIR_REQ; 1723 } 1724 1725 #if DEBUG_MEMC_CONFIG 1726 if(m_debug) 1727 std::cout << " <MEMC " << name() << " CONFIG_LOOP>" 1728 << " address = " << std::hex << r_config_address.read() 1729 << " / nlines = " << std::dec << r_config_cmd_lines.read() 1730 << " / command = " << r_config_cmd.read() << std::endl; 1731 #endif 1732 break; 1733 } 1734 ///////////////// 1735 case CONFIG_WAIT: // wait completion (last response) 1736 { 1737 if ( r_config_rsp_lines.read() == 0 ) // last response received 1738 { 1739 r_config_fsm = CONFIG_RSP; 1740 } 1741 1742 #if DEBUG_MEMC_CONFIG 1743 if(m_debug) 1744 std::cout << " <MEMC " << name() << " CONFIG_WAIT>" 1745 << " / lines to do = " << std::dec << r_config_rsp_lines.read() << std::endl; 1746 #endif 1747 break; 1748 } 1749 //////////////// 1750 case CONFIG_RSP: // request TGT_RSP FSM to return response 1751 { 1752 if ( not r_config_to_tgt_rsp_req.read() ) 1753 { 1754 r_config_to_tgt_rsp_srcid = r_config_srcid.read(); 1755 r_config_to_tgt_rsp_trdid = r_config_trdid.read(); 1756 r_config_to_tgt_rsp_pktid = r_config_pktid.read(); 1757 r_config_to_tgt_rsp_error = false; 1758 r_config_to_tgt_rsp_req = true; 1759 r_config_fsm = CONFIG_IDLE; 1760 1761 #if DEBUG_MEMC_CONFIG 1762 if(m_debug) 1763 std::cout << " <MEMC " << name() << " CONFIG_RSP> Request TGT_RSP FSM to return response:" 1764 << " error = " << r_config_to_tgt_rsp_error.read() 1765 << " / rsrcid = " << std::hex << r_config_srcid.read() 1766 << " / rtrdid = " << std::hex << r_config_trdid.read() 1767 << " / rpktid = " << std::hex << r_config_pktid.read() << std::endl; 1768 #endif 1769 } 1770 break; 1771 1772 } 1773 1774 //////////////////// 1775 case CONFIG_DIR_REQ: // Request directory lock 1776 { 1777 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG ) 1778 { 1779 r_config_fsm = CONFIG_DIR_ACCESS; 1780 } 1781 1782 #if DEBUG_MEMC_CONFIG 1783 if(m_debug) 1784 std::cout << " <MEMC " << name() << " CONFIG_DIR_REQ>" 1785 << " Request DIR access" << std::endl; 1786 #endif 1787 break; 1788 } 1789 /////////////////////// 1790 case CONFIG_DIR_ACCESS: // Access directory and decode config command 1791 { 1792 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1793 "MEMC ERROR in CONFIG_DIR_ACCESS state: bad DIR allocation"); 1794 1795 size_t way = 0; 1796 DirectoryEntry entry = m_cache_directory.read(r_config_address.read(), way); 1797 1798 if ( entry.valid and // hit & inval command 1799 (r_config_cmd.read() == MEMC_CMD_INVAL) ) 1800 { 1801 r_config_dir_way = way; 1802 r_config_dir_copy_inst = entry.owner.inst; 1803 r_config_dir_copy_srcid = entry.owner.srcid; 1804 r_config_dir_is_cnt = entry.is_cnt; 1805 r_config_dir_count = entry.count; 1806 r_config_dir_lock = entry.lock; 1807 r_config_dir_ptr = entry.ptr; 1808 1809 r_config_fsm = CONFIG_IVT_LOCK; 1810 } 1811 else if ( entry.valid and // hit & sync command 1812 entry.dirty and 1813 (r_config_cmd.read() == MEMC_CMD_SYNC) ) 1814 { 1815 r_config_fsm = CONFIG_TRT_LOCK; 1816 } 1817 else // return to LOOP 1818 { 1819 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1820 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 1821 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1822 r_config_address = r_config_address.read() + (m_words<<2); 1823 r_config_fsm = CONFIG_LOOP; 1824 } 1825 1826 #if DEBUG_MEMC_CONFIG 1827 if(m_debug) 1828 std::cout << " <MEMC " << name() << " CONFIG_DIR_ACCESS> Accessing directory: " 1829 << " address = " << std::hex << r_config_address.read() 1830 << " / hit = " << std::dec << entry.valid 1831 << " / dirty = " << entry.dirty 1832 << " / count = " << entry.count 1833 << " / is_cnt = " << entry.is_cnt << std::endl; 1834 #endif 1835 break; 1836 } 1837 ///////////////////// 1838 case CONFIG_TRT_LOCK: // enter this state in case of SYNC command 1839 // to a dirty cache line 1840 // keep DIR lock, and try to get TRT lock 1841 // return to LOOP state if TRT full 1842 // reset dirty bit in DIR and register a PUT 1843 // trabsaction in TRT if not full. 1844 { 1845 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1846 "MEMC ERROR in CONFIG_TRT_LOCK state: bad DIR allocation"); 1847 1848 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG ) 1849 { 1850 size_t index = 0; 1851 bool wok = not m_trt.full(index); 1852 1853 if ( not wok ) 1854 { 1855 r_config_fsm = CONFIG_LOOP; 1856 } 1857 else 1858 { 1859 size_t way = r_config_dir_way.read(); 1860 size_t set = m_y[r_config_address.read()]; 1861 1862 // reset dirty bit in DIR 1863 DirectoryEntry entry; 1864 entry.valid = true; 1865 entry.dirty = false; 1866 entry.tag = m_z[r_config_address.read()]; 1867 entry.is_cnt = r_config_dir_is_cnt.read(); 1868 entry.lock = r_config_dir_lock.read(); 1869 entry.ptr = r_config_dir_ptr.read(); 1870 entry.count = r_config_dir_count.read(); 1871 entry.owner.inst = r_config_dir_copy_inst.read(); 1872 entry.owner.srcid = r_config_dir_copy_srcid.read(); 1873 m_cache_directory.write( set, 1874 way, 1875 entry ); 1876 1877 r_config_trt_index = index; 1878 r_config_fsm = CONFIG_TRT_SET; 1879 } 1880 1881 #if DEBUG_MEMC_CONFIG 1882 if(m_debug) 1883 std::cout << " <MEMC " << name() << " CONFIG_TRT_LOCK> Access TRT: " 1884 << " wok = " << std::dec << wok 1885 << " index = " << index << std::endl; 1886 #endif 1887 } 1888 break; 1889 } 1890 //////////////////// 1891 case CONFIG_TRT_SET: // read data in cache 1892 // and post a PUT request in TRT 1893 { 1894 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1895 "MEMC ERROR in CONFIG_TRT_SET state: bad DIR allocation"); 1896 1897 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG) and 1898 "MEMC ERROR in CONFIG_TRT_SET state: bad TRT allocation"); 1899 1900 // read data into cache 1901 size_t way = r_config_dir_way.read(); 1902 size_t set = m_y[r_config_address.read()]; 1903 1904 sc_signal<data_t> config_data[16]; 1905 m_cache_data.read_line( way, 1906 set, 1907 config_data ); 1908 1909 // post a PUT request in TRT 1910 std::vector<data_t> data_vector; 1911 data_vector.clear(); 1912 for(size_t i=0; i<m_words; i++) data_vector.push_back(config_data[i].read()); 1913 m_trt.set( r_config_trt_index.read(), 1914 false, // PUT 1915 m_nline[r_config_address.read()], // nline 1916 0, // srcid: unused 1917 0, // trdid: unused 1918 0, // pktid: unused 1919 false, // not proc_read 1920 0, // read_length: unused 1921 0, // word_index: unused 1922 std::vector<be_t>(m_words,0xF), 1923 data_vector); 1924 1925 #if DEBUG_MEMC_CONFIG 1926 if(m_debug) 1927 std::cout << " <MEMC " << name() << " CONFIG_TRT_SET> PUT request in TRT:" 1928 << " address = " << std::hex << r_config_address.read() 1929 << " index = " << std::dec << r_config_trt_index.read() << std::endl; 1930 #endif 1931 break; 1932 } 1933 //////////////////// 1934 case CONFIG_PUT_REQ: // PUT request to IXR_CMD_FSM 1935 { 1936 if ( not r_config_to_ixr_cmd_req.read() ) 1937 { 1938 r_config_to_ixr_cmd_req = true; 1939 r_config_to_ixr_cmd_index = r_config_trt_index.read(); 1940 1941 // prepare next iteration 1942 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1943 r_config_address = r_config_address.read() + (m_words<<2); 1944 r_config_fsm = CONFIG_LOOP; 1945 1946 #if DEBUG_MEMC_CONFIG 1947 if(m_debug) 1948 std::cout << " <MEMC " << name() << " CONFIG_PUT_REQ> PUT request to IXR_CMD_FSM" 1949 << " / address = " << std::hex << r_config_address.read() << std::endl; 1950 #endif 1951 } 1952 break; 1953 } 1954 ///////////////////// 1955 case CONFIG_IVT_LOCK: // enter this state in case of INVAL command 1956 // Keep DIR lock and Try to get IVT lock. 1957 // Return to LOOP state if IVT full. 1958 // Register inval in IVT, and invalidate the 1959 // directory if IVT not full. 1960 { 1961 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1962 "MEMC ERROR in CONFIG_IVT_LOCK state: bad DIR allocation"); 1963 1964 if ( r_alloc_ivt_fsm.read() == ALLOC_IVT_CONFIG ) 1965 { 1966 size_t set = m_y[(addr_t)(r_config_address.read())]; 1967 size_t way = r_config_dir_way.read(); 1968 1969 if ( r_config_dir_count.read() == 0 ) // inval DIR and return to LOOP 1970 { 1971 m_cache_directory.inval( way, set ); 1972 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1973 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 1974 r_config_address = r_config_address.read() + (m_words<<2); 1975 r_config_fsm = CONFIG_LOOP; 1976 1977 #if DEBUG_MEMC_CONFIG 1978 if(m_debug) 1979 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1980 << " No copies in L1 : inval DIR entry" << std::endl; 1981 #endif 1982 } 1983 else // try to register inval in IVT 1984 { 1985 bool wok = false; 1986 size_t index = 0; 1987 bool broadcast = r_config_dir_is_cnt.read(); 1988 size_t srcid = r_config_srcid.read(); 1989 size_t trdid = r_config_trdid.read(); 1990 size_t pktid = r_config_pktid.read(); 1991 addr_t nline = m_nline[(addr_t)(r_config_address.read())]; 1992 size_t nb_copies = r_config_dir_count.read(); 1993 1994 wok = m_ivt.set(false, // it's an inval transaction 1995 broadcast, 1996 false, // no response required 1997 true, // acknowledge required 1998 srcid, 1999 trdid, 2000 pktid, 2001 nline, 2002 nb_copies, 2003 index); 2004 2005 if ( wok ) // IVT success => inval DIR slot 2006 { 2007 m_cache_directory.inval( way, set ); 2008 r_config_ivt_index = index; 2009 if ( broadcast ) r_config_fsm = CONFIG_BC_SEND; 2010 else r_config_fsm = CONFIG_INVAL_SEND; 2011 2012 #if DEBUG_MEMC_CONFIG 2013 if(m_debug) 2014 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 2015 << " Inval DIR entry and register inval in IVT" 2016 << " : index = " << std::dec << index 2017 << " / broadcast = " << broadcast << std::endl; 2018 #endif 2019 } 2020 else // IVT full => release both DIR and IVT locks 2021 { 2022 r_config_fsm = CONFIG_LOOP; 2023 2024 #if DEBUG_MEMC_CONFIG 2025 if(m_debug) 2026 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 2027 << " IVT full : release DIR & IVT locks and retry" << std::endl; 2028 #endif 2029 } 2030 } 2031 } 2032 break; 2033 } 2034 //////////////////// 2035 case CONFIG_BC_SEND: // Post a broadcast inval request to CC_SEND FSM 2036 { 2037 if( not r_config_to_cc_send_multi_req.read() and 2038 not r_config_to_cc_send_brdcast_req.read() ) 2039 { 2040 // post bc inval request 2041 r_config_to_cc_send_multi_req = false; 2042 r_config_to_cc_send_brdcast_req = true; 2043 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 2044 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 2045 2046 // prepare next iteration 2047 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2048 r_config_address = r_config_address.read() + (m_words<<2); 2049 r_config_fsm = CONFIG_LOOP; 2050 2051 #if DEBUG_MEMC_CONFIG 2052 if(m_debug) 2053 std::cout << " <MEMC " << name() << " CONFIG_BC_SEND>" 2054 << " Post a broadcast inval request to CC_SEND FSM" 2055 << " / address = " << r_config_address.read() <<std::endl; 2056 #endif 2057 } 2058 break; 2059 } 2060 /////////////////////// 2061 case CONFIG_INVAL_SEND: // Post a multi inval request to CC_SEND FSM 2062 { 2063 if( not r_config_to_cc_send_multi_req.read() and 2064 not r_config_to_cc_send_brdcast_req.read() ) 2065 { 2066 r_config_to_cc_send_multi_req = true; 2067 r_config_to_cc_send_brdcast_req = false; 2068 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 2069 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 2070 2071 // post data into FIFO 2072 config_to_cc_send_fifo_srcid = r_config_dir_copy_srcid.read(); 2073 config_to_cc_send_fifo_inst = r_config_dir_copy_inst.read(); 2074 config_to_cc_send_fifo_put = true; 2075 2076 if ( r_config_dir_count.read() == 1 ) // one copy 2077 { 2078 // prepare next iteration 2079 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2080 r_config_address = r_config_address.read() + (m_words<<2); 2081 r_config_fsm = CONFIG_LOOP; 2082 } 2083 else // several copies 2084 { 2085 r_config_fsm = CONFIG_HEAP_REQ; 2086 } 2087 2088 #if DEBUG_MEMC_CONFIG 2089 if(m_debug) 2090 std::cout << " <MEMC " << name() << " CONFIG_INVAL_SEND>" 2091 << " Post multi inval request to CC_SEND FSM" 2092 << " / address = " << std::hex << r_config_address.read() 2093 << " / copy = " << r_config_dir_copy_srcid.read() 2094 << " / inst = " << std::dec << r_config_dir_copy_inst.read() << std::endl; 2095 #endif 2096 } 2097 break; 2098 } 2099 ///////////////////// 2100 case CONFIG_HEAP_REQ: // Try to get access to Heap 2101 { 2102 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_CONFIG ) 2103 { 2104 r_config_fsm = CONFIG_HEAP_SCAN; 2105 r_config_heap_next = r_config_dir_ptr.read(); 2106 } 2107 2108 #if DEBUG_MEMC_CONFIG 2109 if(m_debug) 2110 std::cout << " <MEMC " << name() << " CONFIG_HEAP_REQ>" 2111 << " Requesting HEAP lock" << std::endl; 2112 #endif 2113 break; 2114 } 2115 ////////////////////// 2116 case CONFIG_HEAP_SCAN: // scan HEAP and send inval to CC_SEND FSM 2117 { 2118 HeapEntry entry = m_heap.read( r_config_heap_next.read() ); 2119 bool last_copy = (entry.next == r_config_heap_next.read()); 2120 2121 config_to_cc_send_fifo_srcid = entry.owner.srcid; 2122 config_to_cc_send_fifo_inst = entry.owner.inst; 2123 // config_to_cc_send_fifo_last = last_copy; 2124 config_to_cc_send_fifo_put = true; 2125 2126 if ( m_config_to_cc_send_inst_fifo.wok() ) // inval request accepted 2127 { 2128 r_config_heap_next = entry.next; 2129 if ( last_copy ) r_config_fsm = CONFIG_HEAP_LAST; 2130 } 2131 2132 #if DEBUG_MEMC_CONFIG 2133 if(m_debug) 2134 std::cout << " <MEMC " << name() << " CONFIG_HEAP_SCAN>" 2135 << " Post multi inval request to CC_SEND FSM" 2136 << " / address = " << std::hex << r_config_address.read() 2137 << " / copy = " << entry.owner.srcid 2138 << " / inst = " << std::dec << entry.owner.inst << std::endl; 2139 #endif 2140 break; 2141 } 2142 ////////////////////// 2143 case CONFIG_HEAP_LAST: // HEAP housekeeping 2144 { 2145 size_t free_pointer = m_heap.next_free_ptr(); 2146 HeapEntry last_entry; 2147 last_entry.owner.srcid = 0; 2148 last_entry.owner.inst = false; 2149 2150 if ( m_heap.is_full() ) 2151 { 2152 last_entry.next = r_config_dir_ptr.read(); 2153 m_heap.unset_full(); 2154 } 2155 else 2156 { 2157 last_entry.next = free_pointer; 2158 } 2159 2160 m_heap.write_free_ptr( r_config_dir_ptr.read() ); 2161 m_heap.write( r_config_heap_next.read(), last_entry ); 2162 2163 // prepare next iteration 2164 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2165 r_config_address = r_config_address.read() + (m_words<<2); 2166 r_config_fsm = CONFIG_LOOP; 2167 2168 #if DEBUG_MEMC_CONFIG 2169 if(m_debug) 2170 std::cout << " <MEMC " << name() << " CONFIG_HEAP_LAST>" 2171 << " Heap housekeeping" << std::endl; 2172 #endif 2173 break; 2174 } 2175 } // end switch r_config_fsm 2176 2177 //////////////////////////////////////////////////////////////////////////////////// 2178 // READ FSM 2179 //////////////////////////////////////////////////////////////////////////////////// 2180 // The READ FSM controls the VCI read and ll requests. 2181 // It takes the lock protecting the cache directory to check the cache line status: 2182 // - In case of HIT 2183 // The fsm copies the data (one line, or one single word) 2184 // in the r_read_to_tgt_rsp buffer. It waits if this buffer is not empty. 2185 // The requesting initiator is registered in the cache directory. 2186 // If the number of copy is larger than 1, the new copy is registered 2187 // in the HEAP. 2188 // If the number of copy is larger than the threshold, the HEAP is cleared, 2189 // and the corresponding line switches to the counter mode. 2190 // - In case of MISS 2191 // The READ fsm takes the lock protecting the transaction tab. 2192 // If a read transaction to the XRAM for this line already exists, 2193 // or if the transaction tab is full, the fsm is stalled. 2194 // If a TRT entry is free, the READ request is registered in TRT, 2195 // it is consumed in the request FIFO, and transmited to the IXR_CMD FSM. 2196 // The READ FSM returns in the IDLE state as the read transaction will be 2197 // completed when the missing line will be received. 2198 //////////////////////////////////////////////////////////////////////////////////// 2199 2200 switch(r_read_fsm.read()) 2201 { 2202 /////////////// 2203 case READ_IDLE: // waiting a read request 2204 { 2205 if(m_cmd_read_addr_fifo.rok()) 2206 { 2207 2208 #if DEBUG_MEMC_READ 2209 if(m_debug) 2210 std::cout << " <MEMC " << name() << " READ_IDLE> Read request" 2211 << " : address = " << std::hex << m_cmd_read_addr_fifo.read() 2212 << " / srcid = " << m_cmd_read_srcid_fifo.read() 2213 << " / trdid = " << m_cmd_read_trdid_fifo.read() 2214 << " / pktid = " << m_cmd_read_pktid_fifo.read() 2215 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 2216 #endif 2217 r_read_coherent = false; //WB by default 2218 r_read_ll_done = false; 2219 r_read_fsm = READ_DIR_REQ; 2220 } 2221 break; 2222 } 2223 2224 ////////////////// 2225 case READ_DIR_REQ: // Get the lock to the directory 2226 { 2227 if(r_alloc_dir_fsm.read() == ALLOC_DIR_READ) 2228 { 2229 r_read_fsm = READ_DIR_LOCK; 2230 m_cpt_read_fsm_n_dir_lock++; 2231 } 2232 2233 #if DEBUG_MEMC_READ 2234 if(m_debug) 2235 std::cout << " <MEMC " << name() << " READ_DIR_REQ> Requesting DIR lock " << std::endl; 2236 #endif 2237 2238 m_cpt_read_fsm_dir_lock++; 2239 2240 break; 2241 } 2242 2243 /////////////////// 2244 case READ_DIR_LOCK: // check directory for hit / miss 2245 { 2246 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2247 "MEMC ERROR in READ_DIR_LOCK state: Bad DIR allocation"); 2248 2249 size_t way = 0; 2250 DirectoryEntry entry = 2251 m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 2252 if(((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) and not r_read_ll_done.read()) // access the global table ONLY when we have an LL cmd 2253 { 2254 r_read_ll_key = m_llsc_table.ll(m_cmd_read_addr_fifo.read()); 2255 /**//*std::cout << "MEMCACHE : from proc " << m_cmd_read_srcid_fifo.read() 2256 << " | @ " << std::hex << m_cmd_read_addr_fifo.read() 2257 << " | LL" << std::endl;*/ 2258 r_read_ll_done = true; 2259 } 2260 r_read_is_cnt = entry.is_cnt; 2261 r_read_dirty = entry.dirty; 2262 r_read_lock = entry.lock; 2263 r_read_tag = entry.tag; 2264 r_read_way = way; 2265 r_read_count = entry.count; 2266 r_read_copy = entry.owner.srcid; 2267 2268 r_read_copy_inst = entry.owner.inst; 2269 r_read_ptr = entry.ptr; // pointer to the heap 2270 2271 // check if this is a cached read, this means pktid is either 2272 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 2273 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2274 bool cached_read = (m_cmd_read_pktid_fifo.read() & 0x1); 2275 2276 if(entry.valid) // hit 2277 { 2278 r_read_coherent = entry.cache_coherent; 2279 if (entry.cache_coherent or (entry.count == 0))// or (entry.owner.srcid == m_cmd_read_srcid_fifo.read())) //hit on a WT line or the owner has no more copy (if LL, the owner must be invalidated even if he made the request) 2280 { 2281 // test if we need to register a new copy in the heap 2282 if(entry.is_cnt || (entry.count == 0) || !cached_read) 2283 { 2284 r_read_fsm = READ_DIR_HIT; 2285 } 2286 else 2287 { 2288 //std::cout << "is LL = " << ((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) << std::endl; 2289 //std::cout << "coherent = " << entry.cache_coherent << " | count = " << std::dec << entry.count << " | cached = " << cached_read << std::endl; 2290 r_read_fsm = READ_HEAP_REQ; 2291 } 2292 } 2293 else //hit on a WB line owned by an other proc 2294 { 2295 r_read_fsm = READ_IVT_LOCK; 2296 } 2297 } 2298 else // miss 2299 { 2300 r_read_fsm = READ_TRT_LOCK; 2301 } 2302 2303 #if DEBUG_MEMC_READ 2304 if(m_debug) 2305 { 2306 std::cout << " <MEMC " << name() << " READ_DIR_LOCK> Accessing directory: " 2307 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 2308 << " / hit = " << std::dec << entry.valid 2309 << " / count = " <<std::dec << entry.count 2310 << " / is_cnt = " << entry.is_cnt 2311 << " / is_coherent = " << entry.cache_coherent; 2312 if((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) std::cout << " / LL access" << std::endl; 2313 else std::cout << std::endl; 2314 } 2315 #endif 2316 break; 2317 } 2318 2319 /////////////////// 2320 case READ_IVT_LOCK: 2321 { 2322 if (r_alloc_ivt_fsm.read() == ALLOC_IVT_READ) 2323 { 2324 size_t index; 2325 addr_t nline = m_nline[(addr_t)(m_cmd_read_addr_fifo.read())]; 2326 /*std::cout << "nline = " << std::dec << nline << std::endl 2327 << "inval en cours sur la ligne = " << m_upt.search_inval(nline, index) << std::endl 2328 << "UPT full = " << m_upt.is_full() << std::endl 2329 << "CC_SEND req = " << r_read_to_cc_send_req.read() << std::endl 2330 << "CLENAUP req = " <<r_read_to_cleanup_req.read() << std::endl;*/ 2331 if(m_ivt.search_inval(nline, index) or m_ivt.is_full() or r_read_to_cc_send_req.read() or r_read_to_cleanup_req.read()) //Check pending inval 2332 { 2333 r_read_fsm = READ_WAIT; 2334 #if DEBUG_MEMC_READ 2335 if(m_debug) 2336 { 2337 std::cout 2338 << " <MEMC " << name() << " READ_IVT_LOCK>" 2339 << " Wait cleanup completion" 2340 << std::endl; 2341 } 2342 #endif 2343 } 2344 else 2345 { 2346 r_read_to_cc_send_req = true; 2347 r_read_to_cc_send_dest = r_read_copy.read(); 2348 r_read_to_cc_send_nline = nline; 2349 r_read_to_cc_send_inst = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2350 r_read_to_cleanup_req = true; 2351 r_read_to_cleanup_nline = nline; 2352 r_read_to_cleanup_srcid = m_cmd_read_srcid_fifo.read(); 2353 r_read_to_cleanup_length = m_cmd_read_length_fifo.read(); 2354 r_read_to_cleanup_first_word = m_x[(addr_t) m_cmd_read_addr_fifo.read()]; 2355 r_read_to_cleanup_cached_read = (m_cmd_read_pktid_fifo.read() & 0x1); 2356 r_read_to_cleanup_addr = m_cmd_read_addr_fifo.read(); 2357 r_read_to_cleanup_is_ll= ((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL); 2358 r_read_to_cleanup_ll_key = r_read_ll_key.read(); 2359 //std::cout << "cleanup req (read) on line " << nline << " /on proc " << r_read_copy.read() << std::endl; 2360 2361 m_ivt.set(false, // it's an inval transaction 2362 false, // it's not a broadcast 2363 false, // it needs a read response 2364 false, // no acknowledge required 2365 m_cmd_read_srcid_fifo.read(), 2366 m_cmd_read_trdid_fifo.read(), 2367 m_cmd_read_pktid_fifo.read(), 2368 nline, 2369 0x1, //Expect only one answer 2370 index); 2371 2372 cmd_read_fifo_get = true; 2373 r_read_fsm = READ_IDLE; 2374 #if DEBUG_MEMC_READ 2375 if(m_debug) 2376 { 2377 std::cout 2378 << " <MEMC " << name() << " READ_IVT_LOCK>" 2379 << " Inval req on an NCC line" 2380 << std::endl; 2381 } 2382 #endif 2383 } 2384 } 2385 2386 2387 break; 2388 } 2389 2390 ////////////////// 2391 case READ_WAIT://Release the locks 2392 { 2393 r_read_fsm = READ_DIR_REQ; 2394 #if DEBUG_MEMC_READ 2395 if(m_debug) 2396 { 2397 std::cout 2398 << " <MEMC " << name() << " READ_WAIT>" << std::endl; 2399 } 2400 #endif 2401 break; 2402 } 2403 /////////////////// 2404 case READ_DIR_HIT: // read data in cache & update the directory 2405 // we enter this state in 3 cases: 2406 // - the read request is uncachable 2407 // - the cache line is in counter mode 2408 // - the cache line is valid but not replicated 2409 2410 { 2411 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2412 "MEMC ERROR in READ_DIR_HIT state: Bad DIR allocation"); 2413 // check if this is an instruction read, this means pktid is either 2414 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 2415 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2416 bool inst_read = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2417 // check if this is a cached read, this means pktid is either 2418 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 2419 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2420 bool cached_read = (m_cmd_read_pktid_fifo.read() & 0x1); 2421 bool is_cnt = r_read_is_cnt.read(); 2422 2423 // read data in the cache 2424 size_t set = m_y[(addr_t)(m_cmd_read_addr_fifo.read())]; 2425 size_t way = r_read_way.read(); 2426 2427 m_cache_data.read_line(way, set, r_read_data); 2428 2429 // update the cache directory 2430 DirectoryEntry entry; 2431 entry.valid = true; 2432 entry.cache_coherent = r_read_coherent.read() or inst_read or (!(cached_read)) or (r_read_copy.read() != m_cmd_read_srcid_fifo.read()); 2433 r_read_coherent = r_read_coherent.read() or inst_read or (!(cached_read)) or (r_read_copy.read() != m_cmd_read_srcid_fifo.read()); 2434 entry.is_cnt = is_cnt; 2435 entry.dirty = r_read_dirty.read(); 2436 entry.tag = r_read_tag.read(); 2437 entry.lock = r_read_lock.read(); 2438 entry.ptr = r_read_ptr.read(); 2439 if(cached_read) // Cached read => we must update the copies 2440 { 2441 if(!is_cnt) // Not counter mode 2442 { 2443 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2444 entry.owner.inst = inst_read; 2445 entry.count = r_read_count.read() + 1; 2446 } 2447 else // Counter mode 2448 { 2449 entry.owner.srcid = 0; 2450 entry.owner.inst = false; 2451 entry.count = r_read_count.read() + 1; 2452 } 2453 } 2454 else // Uncached read 2455 { 2456 entry.owner.srcid = r_read_copy.read(); 2457 entry.owner.inst = r_read_copy_inst.read(); 2458 entry.count = r_read_count.read(); 2459 } 2460 2461 #if DEBUG_MEMC_READ 2462 if(m_debug) 2463 std::cout << " <MEMC " << name() << " READ_DIR_HIT> Update directory entry:" 2464 << " addr = " << std::hex << m_cmd_read_addr_fifo.read() 2465 << " / set = " << std::dec << set 2466 << " / way = " << way 2467 << " / owner_id = " << std::hex << entry.owner.srcid 2468 << " / owner_ins = " << std::dec << entry.owner.inst 2469 << " / coherent = " << entry.cache_coherent 2470 << " / count = " << entry.count 2471 << " / is_cnt = " << entry.is_cnt << std::endl; 2472 #endif 2473 2474 m_cache_directory.write(set, way, entry); 2475 r_read_fsm = READ_RSP; 2476 break; 2477 } 2478 /////////////////// 2479 case READ_HEAP_REQ: // Get the lock to the HEAP directory 2480 { 2481 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2482 { 2483 r_read_fsm = READ_HEAP_LOCK; 2484 m_cpt_read_fsm_n_heap_lock++; 2485 } 2486 2487 #if DEBUG_MEMC_READ 2488 if(m_debug) 2489 std::cout << " <MEMC " << name() << " READ_HEAP_REQ>" 2490 << " Requesting HEAP lock " << std::endl; 2491 #endif 2492 2493 m_cpt_read_fsm_heap_lock++; 2494 2495 break; 2496 } 2497 2498 //////////////////// 2499 case READ_HEAP_LOCK: // read data in cache, update the directory 2500 // and prepare the HEAP update 2501 { 2502 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2503 { 2504 // enter counter mode when we reach the limit of copies or the heap is full 2505 bool go_cnt = (r_read_count.read() >= m_max_copies) or m_heap.is_full(); 2506 2507 if (!r_read_coherent.read()) 2508 { 2509 std::cout << "Address = " << std::hex << (m_cmd_read_addr_fifo.read()) << std::dec << " |count = " << r_read_count.read() << std::endl; 2510 } 2511 assert (r_read_coherent.read() && "accÚs au heap sur ncc"); 2512 // read data in the cache 2513 size_t set = m_y[(addr_t)(m_cmd_read_addr_fifo.read())]; 2514 size_t way = r_read_way.read(); 2515 2516 m_cache_data.read_line(way, set, r_read_data); 2517 2518 // update the cache directory 2519 DirectoryEntry entry; 2520 entry.valid = true; 2521 entry.cache_coherent = r_read_coherent.read(); 2522 entry.is_cnt = go_cnt; 2523 entry.dirty = r_read_dirty.read(); 2524 entry.tag = r_read_tag.read(); 2525 entry.lock = r_read_lock.read(); 2526 entry.count = r_read_count.read() + 1; 2527 2528 if(not go_cnt) // Not entering counter mode 2529 { 2530 entry.owner.srcid = r_read_copy.read(); 2531 entry.owner.inst = r_read_copy_inst.read(); 2532 entry.ptr = m_heap.next_free_ptr(); // set pointer on the heap 2533 } 2534 else // Entering Counter mode 2535 { 2536 entry.owner.srcid = 0; 2537 entry.owner.inst = false; 2538 entry.ptr = 0; 2539 } 2540 2541 m_cache_directory.write(set, way, entry); 2542 2543 // prepare the heap update (add an entry, or clear the linked list) 2544 if(not go_cnt) // not switching to counter mode 2545 { 2546 // We test if the next free entry in the heap is the last 2547 HeapEntry heap_entry = m_heap.next_free_entry(); 2548 r_read_next_ptr = heap_entry.next; 2549 r_read_last_free = (heap_entry.next == m_heap.next_free_ptr()); 2550 2551 r_read_fsm = READ_HEAP_WRITE; // add an entry in the HEAP 2552 } 2553 else // switching to counter mode 2554 { 2555 if(r_read_count.read() >1) // heap must be cleared 2556 { 2557 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 2558 r_read_next_ptr = m_heap.next_free_ptr(); 2559 m_heap.write_free_ptr(r_read_ptr.read()); 2560 2561 if(next_entry.next == r_read_ptr.read()) // last entry 2562 { 2563 r_read_fsm = READ_HEAP_LAST; // erase the entry 2564 } 2565 else // not the last entry 2566 { 2567 r_read_ptr = next_entry.next; 2568 r_read_fsm = READ_HEAP_ERASE; // erase the list 2569 } 2570 } 2571 else // the heap is not used / nothing to do 2572 { 2573 r_read_fsm = READ_RSP; 2574 } 2575 } 2576 2577 #if DEBUG_MEMC_READ 2578 if(m_debug) 2579 std::cout << " <MEMC " << name() << " READ_HEAP_LOCK> Update directory:" 2580 << " tag = " << std::hex << entry.tag 2581 << " set = " << std::dec << set 2582 << " way = " << way 2583 << " count = " << entry.count 2584 << " is_cnt = " << entry.is_cnt << std::endl; 2585 #endif 2586 } 2587 else 2588 { 2589 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_LOCK" 2590 << "Bad HEAP allocation" << std::endl; 2591 exit(0); 2592 } 2593 break; 2594 } 2595 ///////////////////// 2596 case READ_HEAP_WRITE: // add an entry in the heap 2597 { 2598 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2599 { 2600 HeapEntry heap_entry; 2601 heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2602 heap_entry.owner.inst = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2603 2604 if(r_read_count.read() == 1) // creation of a new linked list 2605 { 2606 heap_entry.next = m_heap.next_free_ptr(); 2607 } 2608 else // head insertion in existing list 2609 { 2610 heap_entry.next = r_read_ptr.read(); 2611 } 2612 m_heap.write_free_entry(heap_entry); 2613 m_heap.write_free_ptr(r_read_next_ptr.read()); 2614 if(r_read_last_free.read()) m_heap.set_full(); 2615 2616 r_read_fsm = READ_RSP; 2617 2618 #if DEBUG_MEMC_READ 2619 if(m_debug) 2620 std::cout << " <MEMC " << name() << " READ_HEAP_WRITE> Add an entry in the heap:" 2621 << " owner_id = " << std::hex << heap_entry.owner.srcid 2622 << " owner_ins = " << std::dec << heap_entry.owner.inst << std::endl; 2623 #endif 2624 } 2625 else 2626 { 2627 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_WRITE" 2628 << "Bad HEAP allocation" << std::endl; 2629 exit(0); 2630 } 2631 break; 2632 } 2633 ///////////////////// 2634 case READ_HEAP_ERASE: 2635 { 2636 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2637 { 2638 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 2639 if(next_entry.next == r_read_ptr.read()) 2640 { 2641 r_read_fsm = READ_HEAP_LAST; 2642 } 2643 else 2644 { 2645 r_read_ptr = next_entry.next; 2646 r_read_fsm = READ_HEAP_ERASE; 2647 } 2648 } 2649 else 2650 { 2651 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_ERASE" 2652 << "Bad HEAP allocation" << std::endl; 2653 exit(0); 2654 } 2655 break; 2656 } 2657 2658 //////////////////// 2659 case READ_HEAP_LAST: 2660 { 2661 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2662 { 2663 HeapEntry last_entry; 2664 last_entry.owner.srcid = 0; 2665 last_entry.owner.inst = false; 2666 2667 if(m_heap.is_full()) 2668 { 2669 last_entry.next = r_read_ptr.read(); 2670 m_heap.unset_full(); 2671 } 2672 else 2673 { 2674 last_entry.next = r_read_next_ptr.read(); 2675 } 2676 m_heap.write(r_read_ptr.read(),last_entry); 2677 r_read_fsm = READ_RSP; 2678 } 2679 else 2680 { 2681 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_LAST" 2682 << "Bad HEAP allocation" << std::endl; 2683 exit(0); 2684 } 2685 break; 2686 } 2687 ////////////// 2688 case READ_RSP: // request the TGT_RSP FSM to return data 2689 { 2690 if(!r_read_to_tgt_rsp_req) 2691 { 2692 for(size_t i=0 ; i<m_words ; i++) r_read_to_tgt_rsp_data[i] = r_read_data[i]; 2693 r_read_to_tgt_rsp_word = m_x[(addr_t) m_cmd_read_addr_fifo.read()]; 2694 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read(); 2695 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read(); 2696 /*RWT*/ 2697 //BUG pktid 2698 if (r_read_coherent.read()) 2699 { 2700 r_read_to_tgt_rsp_pktid = 0x0 + m_cmd_read_pktid_fifo.read(); 2701 //std::cout << "READ RSP COHERENT on word" << std::hex << m_x[(addr_t) m_cmd_read_addr_fifo.read()] << std::dec << std::endl; 2702 } 2703 else 2704 { 2705 r_read_to_tgt_rsp_pktid = 0x8 + m_cmd_read_pktid_fifo.read(); 2706 } 2707 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); 2708 r_read_to_tgt_rsp_ll_key = r_read_ll_key.read(); 2709 cmd_read_fifo_get = true; 2710 r_read_to_tgt_rsp_req = true; 2711 r_read_fsm = READ_IDLE; 2712 2713 #if DEBUG_MEMC_READ 2714 if(m_debug) 2715 std::cout << " <MEMC " << name() << " READ_RSP> Request TGT_RSP FSM to return data:" 2716 << " rsrcid = " << std::hex << m_cmd_read_srcid_fifo.read() 2717 << " / address = " << std::hex << m_cmd_read_addr_fifo.read() 2718 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 2719 #endif 2720 } 2721 break; 2722 } 2723 /////////////////// 2724 case READ_TRT_LOCK: // read miss : check the Transaction Table 2725 { 2726 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 2727 { 2728 size_t index = 0; 2729 addr_t addr = (addr_t) m_cmd_read_addr_fifo.read(); 2730 bool hit_read = m_trt.hit_read(m_nline[addr], index); 2731 bool hit_write = m_trt.hit_write(m_nline[addr]); 2732 bool wok = !m_trt.full(index); 2733 2734 if(hit_read or !wok or hit_write) // missing line already requested or no space 2735 { 2736 if(!wok) 2737 { 2738 m_cpt_trt_full++; 2739 } 2740 if(hit_read or hit_write) m_cpt_trt_rb++; 2741 r_read_fsm = READ_IDLE; 2742 } 2743 else // missing line is requested to the XRAM 2744 { 2745 m_cpt_read_miss++; 2746 r_read_trt_index = index; 2747 r_read_fsm = READ_TRT_SET; 2748 } 2749 2750 #if DEBUG_MEMC_READ 2751 if(m_debug) 2752 std::cout << " <MEMC " << name() << " READ_TRT_LOCK> Check TRT:" 2753 << " hit_read = " << hit_read 2754 << " / hit_write = " << hit_write 2755 << " / full = " << !wok << std::endl; 2756 m_cpt_read_fsm_n_trt_lock++; 2757 #endif 2758 } 2759 2760 m_cpt_read_fsm_trt_lock++; 2761 2762 break; 2763 } 2764 2765 ////////////////// 2766 case READ_TRT_SET: // register get transaction in TRT 2767 { 2768 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 2769 { 2770 m_trt.set(r_read_trt_index.read(), 2771 true, 2772 m_nline[(addr_t)(m_cmd_read_addr_fifo.read())], 2773 m_cmd_read_srcid_fifo.read(), 2774 m_cmd_read_trdid_fifo.read(), 2775 m_cmd_read_pktid_fifo.read(), 2776 true, 2777 m_cmd_read_length_fifo.read(), 2778 m_x[(addr_t)(m_cmd_read_addr_fifo.read())], 2779 std::vector<be_t> (m_words,0), 2780 std::vector<data_t> (m_words,0), 2781 r_read_ll_key.read()); 2782 2783 #if DEBUG_MEMC_READ 2784 if(m_debug) 2785 std::cout << " <MEMC " << name() << " READ_TRT_SET> Set a GET in TGT:" 2786 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 2787 << " / srcid = " << std::hex << m_cmd_read_srcid_fifo.read() << std::endl; 2788 #endif 2789 r_read_fsm = READ_TRT_REQ; 2790 } 2791 break; 2792 } 2793 2794 ////////////////// 2795 case READ_TRT_REQ: // consume the read request in FIFO and send it to IXR_CMD_FSM 2796 { 2797 if(not r_read_to_ixr_cmd_req) 2798 { 2799 cmd_read_fifo_get = true; 2800 r_read_to_ixr_cmd_req = true; 2801 //r_read_to_ixr_cmd_nline = m_nline[(addr_t)(m_cmd_read_addr_fifo.read())]; 2802 r_read_to_ixr_cmd_index = r_read_trt_index.read(); 2803 r_read_fsm = READ_IDLE; 2804 2805 #if DEBUG_MEMC_READ 2806 if(m_debug) 2807 std::cout << " <MEMC " << name() << " READ_TRT_REQ> Request GET transaction for address " 2808 << std::hex << m_cmd_read_addr_fifo.read() << std::endl; 2809 #endif 2810 } 2811 break; 2812 } 2813 } // end switch read_fsm 2814 2815 /////////////////////////////////////////////////////////////////////////////////// 2816 // WRITE FSM 2817 /////////////////////////////////////////////////////////////////////////////////// 2818 // The WRITE FSM handles the write bursts and sc requests sent by the processors. 2819 // All addresses in a burst must be in the same cache line. 2820 // A complete write burst is consumed in the FIFO & copied to a local buffer. 2821 // Then the FSM takes the lock protecting the cache directory, to check 2822 // if the line is in the cache. 2823 // 2824 // - In case of HIT, the cache is updated. 2825 // If there is no other copy, an acknowledge response is immediately 2826 // returned to the writing processor. 2827 // If the data is cached by other processors, a coherence transaction must 2828 // be launched (sc requests always require a coherence transaction): 2829 // It is a multicast update if the line is not in counter mode: the processor 2830 // takes the lock protecting the Update Table (UPT) to register this transaction. 2831 // If the UPT is full, it releases the lock(s) and retry. Then, it sends 2832 // a multi-update request to all owners of the line (but the writer), 2833 // through the CC_SEND FSM. In case of coherence transaction, the WRITE FSM 2834 // does not respond to the writing processor, as this response will be sent by 2835 // the MULTI_ACK FSM when all update responses have been received. 2836 // It is a broadcast invalidate if the line is in counter mode: The line 2837 // should be erased in memory cache, and written in XRAM with a PUT transaction, 2838 // after registration in TRT. 2839 // 2840 // - In case of MISS, the WRITE FSM takes the lock protecting the transaction 2841 // table (TRT). If a read transaction to the XRAM for this line already exists, 2842 // it writes in the TRT (write buffer). Otherwise, if a TRT entry is free, 2843 // the WRITE FSM register a new transaction in TRT, and sends a GET request 2844 // to the XRAM. If the TRT is full, it releases the lock, and waits. 2845 // Finally, the WRITE FSM returns an aknowledge response to the writing processor. 2846 ///////////////////////////////////////////////////////////////////////////////////// 2847 2848 switch(r_write_fsm.read()) 2849 { 2850 //////////////// 2851 case WRITE_IDLE: // copy first word of a write burst in local buffer 2852 { 2853 if(m_cmd_write_addr_fifo.rok()) 2854 { 2855 if((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 2856 m_cpt_sc++; 2857 else 2858 { 2859 m_cpt_write++; 2860 m_cpt_write_cells++; 2861 } 2862 2863 // consume a word in the FIFO & write it in the local buffer 2864 cmd_write_fifo_get = true; 2865 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 2866 2867 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 2868 r_write_word_index = index; 2869 r_write_word_count = 0; 2870 r_write_data[index] = m_cmd_write_data_fifo.read(); 2871 r_write_srcid = m_cmd_write_srcid_fifo.read(); 2872 r_write_trdid = m_cmd_write_trdid_fifo.read(); 2873 r_write_pktid = m_cmd_write_pktid_fifo.read(); 2874 2875 if ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 2876 { 2877 assert( not m_cmd_write_eop_fifo.read() && 2878 "MEMC ERROR in WRITE_IDLE state: " 2879 "invalid packet format for SC command"); 2880 2881 r_write_sc_key = m_cmd_write_data_fifo.read(); 2882 } 2883 2884 // initialize the be field for all words 2885 for(size_t word=0 ; word<m_words ; word++) 2886 { 2887 if(word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 2888 else r_write_be[word] = 0x0; 2889 } 2890 2891 if (m_cmd_write_eop_fifo.read()) 2892 { 2893 r_write_fsm = WRITE_DIR_REQ; 2894 } 2895 else 2896 { 2897 r_write_fsm = WRITE_NEXT; 2898 } 2899 2900 #if DEBUG_MEMC_WRITE 2901 if(m_debug) 2902 std::cout << " <MEMC " << name() << " WRITE_IDLE> Write request " 2903 << " srcid = " << std::hex << m_cmd_write_srcid_fifo.read() 2904 << " / address = " << std::hex << m_cmd_write_addr_fifo.read() 2905 << " / data = " << m_cmd_write_data_fifo.read() 2906 << " / pktid = " << m_cmd_write_pktid_fifo.read() 2907 << std::endl; 2908 #endif 2909 } 2910 break; 2911 } 2912 2913 //////////////// 2914 case WRITE_NEXT: // copy next word of a write burst in local buffer 2915 { 2916 if(m_cmd_write_addr_fifo.rok()) 2917 { 2918 2919 m_cpt_write_cells++; 2920 2921 // check that the next word is in the same cache line 2922 assert((m_nline[(addr_t)(r_write_address.read())] == 2923 m_nline[(addr_t)(m_cmd_write_addr_fifo.read())]) && 2924 "MEMC ERROR in WRITE_NEXT state: Illegal write burst"); 2925 2926 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 2927 bool is_sc = ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC); 2928 2929 // check that SC command has constant address 2930 assert((not is_sc or (index == r_write_word_index)) && 2931 "MEMC ERROR in WRITE_NEXT state: " 2932 "the address must be constant on a SC command"); 2933 2934 // check that SC command has two flits 2935 assert((not is_sc or m_cmd_write_eop_fifo.read()) && 2936 "MEMC ERROR in WRITE_NEXT state: " 2937 "invalid packet format for SC command"); 2938 // consume a word in the FIFO & write it in the local buffer 2939 cmd_write_fifo_get = true; 2940 2941 r_write_be[index] = m_cmd_write_be_fifo.read(); 2942 r_write_data[index] = m_cmd_write_data_fifo.read(); 2943 2944 // the first flit of a SC command is the reservation key and 2945 // therefore it must not be counted as a data to write 2946 if (not is_sc) 2947 { 2948 r_write_word_count = r_write_word_count.read() + 1; 2949 } 2950 2951 if (m_cmd_write_eop_fifo.read()) r_write_fsm = WRITE_DIR_REQ; 2952 2953 #if DEBUG_MEMC_WRITE 2954 if (m_debug) 2955 std::cout << " <MEMC " << name() 2956 << " WRITE_NEXT> Write another word in local buffer" 2957 << std::endl; 2958 #endif 2959 } 2960 break; 2961 } 2962 2963 //////////////////// 2964 case WRITE_DIR_REQ: 2965 { 2966 // Get the lock to the directory 2967 // and access the llsc_global_table 2968 if(r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) 2969 { 2970 2971 if( (r_write_pktid.read() & 0x7) == TYPE_SC ) 2972 { 2973 // test address and key match of the SC command on the 2974 // LL/SC table without removing reservation. The reservation 2975 // will be erased after in this FSM. 2976 bool sc_success = m_llsc_table.check(r_write_address.read(), 2977 r_write_sc_key.read()); 2978 2979 r_write_sc_fail = not sc_success; 2980 2981 if(not sc_success) r_write_fsm = WRITE_RSP; 2982 else r_write_fsm = WRITE_DIR_LOCK; 2983 2984 2985 //std::cout << " <MEMC " << name() 2986 // << " WRITE_DIR_REQ> sc fail = " << not sc_success 2987 // << " / addr" << std::hex << r_write_address.read() 2988 // << " / key" << std::hex << r_write_sc_key.read() 2989 // << " / srcid" << std::hex << r_write_srcid.read() 2990 // << std::endl; 2991 2992 break; 2993 } 2994 2995 /////////////////////////////////////////////////////////////////////// 2996 // WRITE command treatment or SC command returning from the WAIT state 2997 // In the second case, we must access the LL/SC global table to 2998 // erase any possible new reservation when we release the lock on the 2999 // directory 3000 #define L2 soclib::common::uint32_log2 3001 addr_t min = r_write_address.read(); 3002 addr_t max = r_write_address.read() + 3003 ((r_write_word_count.read()) << L2(vci_param_int::B)); 3004 #undef L2 3005 m_llsc_table.sw(min, max); 3006 3007 r_write_fsm = WRITE_DIR_LOCK; 3008 m_cpt_write_fsm_n_dir_lock++; 3009 } 3010 3011 #if DEBUG_MEMC_WRITE 3012 if(m_debug) 3013 std::cout << " <MEMC " << name() << " WRITE_DIR_REQ> Requesting DIR lock " 3014 << std::endl; 3015 #endif 3016 3017 m_cpt_write_fsm_dir_lock++; 3018 3019 break; 3020 } 3021 3022 //////////////////// 3023 case WRITE_DIR_LOCK: // access directory to check hit/miss 3024 { 3025 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3026 "MEMC ERROR in ALLOC_DIR_LOCK state: Bad DIR allocation"); 3027 size_t way = 0; 3028 DirectoryEntry entry(m_cache_directory.read(r_write_address.read(), way)); 3029 3030 if(entry.valid) // hit 3031 { 3032 // copy directory entry in local buffer in case of hit 3033 r_write_is_cnt = entry.is_cnt; 3034 r_write_lock = entry.lock; 3035 r_write_tag = entry.tag; 3036 r_write_copy = entry.owner.srcid; 3037 r_write_copy_inst = entry.owner.inst; 3038 r_write_count = entry.count; 3039 r_write_ptr = entry.ptr; 3040 r_write_way = way; 3041 3042 r_write_coherent = entry.cache_coherent; 3043 3044 if (entry.cache_coherent or (entry.owner.srcid == r_write_srcid.read()) or (entry.count == 0)) // hit WT 3045 { 3046 if(entry.is_cnt && entry.count) 3047 { 3048 r_write_fsm = WRITE_BC_DIR_READ; 3049 } 3050 else 3051 { 3052 r_write_fsm = WRITE_DIR_HIT; 3053 } 3054 } 3055 else 3056 { 3057 if (r_write_to_cleanup_req.read())//inval already sent 3058 { 3059 r_write_fsm = WRITE_WAIT; 3060 } 3061 else // hit on a NCC line with a different owner 3062 { 3063 r_write_fsm = WRITE_IVT_LOCK_HIT_WB; 3064 // if(r_write_pktid.read() == TYPE_SC) 3065 // { 3066 // r_write_sc_fail = true; 3067 // } 3068 } 3069 } 3070 } 3071 else // miss 3072 { 3073 r_write_fsm = WRITE_MISS_IVT_LOCK; 3074 } 3075 3076 #if DEBUG_MEMC_WRITE 3077 if(m_debug) 3078 { 3079 std::cout << " <MEMC " << name() << " WRITE_DIR_LOCK> Check the directory: " 3080 << " address = " << std::hex << r_write_address.read() 3081 << " / hit = " << std::dec << entry.valid 3082 << " / count = " << entry.count 3083 << " / is_cnt = " << entry.is_cnt ; 3084 if((r_write_pktid.read() & 0x7) == TYPE_SC) 3085 std::cout << " / SC access" << std::endl; 3086 else 3087 std::cout << " / SW access" << std::endl; 3088 } 3089 #endif 3090 break; 3091 } 3092 //////////////////// 3093 case WRITE_IVT_LOCK_HIT_WB: 3094 { 3095 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 3096 { 3097 3098 size_t index = 0; 3099 bool match_inval; 3100 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 3101 3102 //std::cout << "WRITE on NCC on line" << std::hex << nline << std::dec << std::endl; 3103 //if there is a matched updt req, we should wait until it is over. Because 3104 //we need the lastest updt data. 3105 match_inval = m_ivt.search_inval(nline, index); 3106 3107 assert ((r_write_count.read() == 1) and "NCC to CC req without copy"); 3108 if(!match_inval and !r_write_to_cc_send_req.read()) 3109 { 3110 r_write_to_cc_send_req = true; 3111 r_write_to_cc_send_dest = r_write_copy; 3112 r_write_to_cc_send_nline = nline; 3113 r_write_to_cleanup_req = true; 3114 r_write_to_cleanup_nline = nline; 3115 3116 m_ivt.set(false, // it's an inval transaction 3117 false, // it's not a broadcast 3118 true, // it needs no read response 3119 false, // no acknowledge required 3120 m_cmd_write_srcid_fifo.read(), //never read, used for debug 3121 m_cmd_write_trdid_fifo.read(), //never read, used for debug 3122 m_cmd_write_pktid_fifo.read(), //never read, used for debug 3123 nline, 3124 0x1, //Expect only one answer 3125 index); 3126 } 3127 r_write_fsm = WRITE_WAIT; 3128 #if DEBUG_MEMC_WRITE 3129 if(m_debug) 3130 { 3131 std::cout << " <MEMC " << name() << " WRITE_IVT_LOCK_HIT_WB> get access to the UPT: " 3132 << " Inval requested = " << (!match_inval and !r_write_to_cc_send_req.read()) 3133 << std::endl; 3134 } 3135 #endif 3136 } 3137 #if DEBUG_MEMC_WRITE 3138 if(m_debug) 3139 { 3140 std::cout << " <MEMC " << name() << " WRITE_IVT_LOCK_HIT_WB> failed to access to the UPT: " 3141 << std::endl; 3142 } 3143 #endif 3144 break; 3145 } 3146 3147 /////////////////// 3148 case WRITE_DIR_HIT: 3149 { 3150 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3151 "MEMC ERROR in ALLOC_DIR_HIT state: Bad DIR allocation"); 3152 3153 // update the cache directory 3154 // update directory with Dirty bit 3155 DirectoryEntry entry; 3156 entry.valid = true; 3157 entry.cache_coherent = r_write_coherent.read(); 3158 entry.dirty = true; 3159 entry.tag = r_write_tag.read(); 3160 entry.is_cnt = r_write_is_cnt.read(); 3161 entry.lock = r_write_lock.read(); 3162 entry.owner.srcid = r_write_copy.read(); 3163 entry.owner.inst = r_write_copy_inst.read(); 3164 entry.count = r_write_count.read(); 3165 entry.ptr = r_write_ptr.read(); 3166 3167 size_t set = m_y[(addr_t)(r_write_address.read())]; 3168 size_t way = r_write_way.read(); 3169 3170 // update directory 3171 m_cache_directory.write(set, way, entry); 3172 3173 // owner is true when the the first registered copy is the writer itself 3174 bool owner = (((r_write_copy.read() == r_write_srcid.read()) 3175 ) and not r_write_copy_inst.read()); 3176 3177 // no_update is true when there is no need for coherence transaction 3178 // (tests for sc requests) 3179 bool no_update = ((r_write_count.read() ==0) || //no need for coherency 3180 (owner && (r_write_count.read() ==1) && (r_write_pktid.read() != TYPE_SC))); //|| //writer is owner 3181 // ((r_write_pktid.read() == TYPE_SC) && r_write_sc_fail.read())); //SC failed: no data update 3182 3183 // write data in the cache if no coherence transaction 3184 if(no_update) 3185 { 3186 // SC command but zero copies 3187 if ((r_write_pktid.read() & 0x7) == TYPE_SC) 3188 { 3189 m_llsc_table.sc(r_write_address.read(), 3190 r_write_sc_key.read()); 3191 } 3192 3193 for(size_t word=0 ; word<m_words ; word++) 3194 { 3195 m_cache_data.write(way, set, word, r_write_data[word].read(), r_write_be[word].read()); 3196 3197 } 3198 } 3199 3200 if(owner and not no_update and(r_write_pktid.read() != TYPE_SC)) 3201 { 3202 r_write_count = r_write_count.read() - 1; 3203 } 3204 3205 if(no_update) 3206 // Write transaction completed 3207 { 3208 r_write_fsm = WRITE_RSP; 3209 } 3210 else 3211 // coherence update required 3212 { 3213 if(!r_write_to_cc_send_multi_req.read() and 3214 !r_write_to_cc_send_brdcast_req.read()) 3215 { 3216 r_write_fsm = WRITE_UPT_LOCK; 3217 } 3218 else 3219 { 3220 r_write_fsm = WRITE_WAIT; 3221 } 3222 } 3223 3224 #if DEBUG_MEMC_WRITE 3225 if(m_debug) 3226 { 3227 if(no_update) 3228 { 3229 std::cout << " <MEMC " << name() 3230 << " WRITE_DIR_HIT> Write into cache / No coherence transaction" 3231 << std::endl; 3232 } 3233 else 3234 { 3235 std::cout << " <MEMC " << name() << " WRITE_DIR_HIT> Coherence update required:" 3236 << " is_cnt = " << r_write_is_cnt.read() 3237 << " nb_copies = " << std::dec << r_write_count.read() << std::endl; 3238 if(owner) std::cout << " ... but the first copy is the writer" << std::endl; 3239 } 3240 } 3241 #endif 3242 break; 3243 } 3244 //////////////////// 3245 case WRITE_UPT_LOCK: // Try to register the update request in UPT 3246 { 3247 if(r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE) 3248 { 3249 bool wok = false; 3250 size_t index = 0; 3251 size_t srcid = r_write_srcid.read(); 3252 size_t trdid = r_write_trdid.read(); 3253 size_t pktid = r_write_pktid.read(); 3254 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 3255 size_t nb_copies = r_write_count.read(); 3256 size_t set = m_y[(addr_t)(r_write_address.read())]; 3257 size_t way = r_write_way.read(); 3258 3259 3260 wok = m_upt.set(true, // it's an update transaction 3261 false, // it's not a broadcast 3262 true, // response required 3263 false, // no acknowledge required 3264 srcid, 3265 trdid, 3266 pktid, 3267 nline, 3268 nb_copies, 3269 index); 3270 if(wok) // write data in cache 3271 { 3272 3273 if ((r_write_pktid.read() & 0x7) == TYPE_SC) 3274 { 3275 m_llsc_table.sc(r_write_address.read(), 3276 r_write_sc_key.read()); 3277 } 3278 3279 for(size_t word=0 ; word<m_words ; word++) 3280 { 3281 m_cache_data.write(way, 3282 set, 3283 word, 3284 r_write_data[word].read(), 3285 r_write_be[word].read()); 3286 3287 } 3288 } 3289 3290 #if DEBUG_MEMC_WRITE 3291 if(m_debug and wok) 3292 { 3293 if(wok) 3294 { 3295 std::cout << " <MEMC " << name() 3296 << " WRITE_UPT_LOCK> Register the multicast update in UPT / " 3297 << " nb_copies = " << r_write_count.read() << std::endl; 3298 } 3299 } 3300 #endif 3301 r_write_upt_index = index; 3302 // releases the lock protecting UPT and the DIR if no entry... 3303 if(wok) r_write_fsm = WRITE_UPT_HEAP_LOCK; 3304 else r_write_fsm = WRITE_WAIT; 3305 m_cpt_write_fsm_n_upt_lock++; 3306 } 3307 3308 m_cpt_write_fsm_upt_lock++; 3309 3310 break; 3311 } 3312 3313 ///////////////////////// 3314 case WRITE_UPT_HEAP_LOCK: // get access to heap 3315 { 3316 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE) 3317 { 3318 3319 #if DEBUG_MEMC_WRITE 3320 if(m_debug) 3321 std::cout << " <MEMC " << name() 3322 << " WRITE_UPT_HEAP_LOCK> Get acces to the HEAP" << std::endl; 3323 #endif 3324 r_write_fsm = WRITE_UPT_REQ; 3325 m_cpt_write_fsm_n_heap_lock++; 3326 } 3327 3328 m_cpt_write_fsm_heap_lock++; 3329 3330 break; 3331 } 3332 3333 ////////////////// 3334 case WRITE_UPT_REQ: // prepare the coherence transaction for the CC_SEND FSM 3335 // and write the first copy in the FIFO 3336 // send the request if only one copy 3337 { 3338 assert(not r_write_to_cc_send_multi_req.read() and 3339 not r_write_to_cc_send_brdcast_req.read() and 3340 "Error in VCI_MEM_CACHE : pending multicast or broadcast\n" 3341 "transaction in WRITE_UPT_REQ state" 3342 ); 3343 3344 3345 r_write_to_cc_send_brdcast_req = false; 3346 r_write_to_cc_send_trdid = r_write_upt_index.read(); 3347 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())]; 3348 r_write_to_cc_send_index = r_write_word_index.read(); 3349 r_write_to_cc_send_count = r_write_word_count.read(); 3350 3351 for(size_t i=0; i<m_words ; i++) r_write_to_cc_send_be[i]=r_write_be[i].read(); 3352 3353 size_t min = r_write_word_index.read(); 3354 size_t max = r_write_word_index.read() + r_write_word_count.read(); 3355 for(size_t i=min ; i<=max ; i++) r_write_to_cc_send_data[i] = r_write_data[i]; 3356 3357 if((r_write_copy.read() != r_write_srcid.read()) or(r_write_pktid.read() == TYPE_SC) or r_write_copy_inst.read()) 3358 { 3359 // put the first srcid in the fifo 3360 write_to_cc_send_fifo_put = true; 3361 write_to_cc_send_fifo_inst = r_write_copy_inst.read(); 3362 write_to_cc_send_fifo_srcid = r_write_copy.read(); 3363 if(r_write_count.read() == 1) 3364 { 3365 r_write_fsm = WRITE_IDLE; 3366 r_write_to_cc_send_multi_req = true; 3367 } 3368 else 3369 { 3370 r_write_fsm = WRITE_UPT_NEXT; 3371 r_write_to_dec = false; 3372 3373 } 3374 } 3375 else 3376 { 3377 r_write_fsm = WRITE_UPT_NEXT; 3378 r_write_to_dec = false; 3379 } 3380 3381 #if DEBUG_MEMC_WRITE 3382 if(m_debug) 3383 { 3384 std::cout 3385 << " <MEMC " << name() 3386 << " WRITE_UPT_REQ> Post first request to CC_SEND FSM" 3387 << " / srcid = " << std::dec << r_write_copy.read() 3388 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3389 3390 if(r_write_count.read() == 1) 3391 std::cout << " ... and this is the last" << std::endl; 3392 } 3393 #endif 3394 break; 3395 } 3396 3397 /////////////////// 3398 case WRITE_UPT_NEXT: 3399 { 3400 // continue the multi-update request to CC_SEND fsm 3401 // when there is copies in the heap. 3402 // if one copy in the heap is the writer itself 3403 // the corresponding SRCID should not be written in the fifo, 3404 // but the UPT counter must be decremented. 3405 // As this decrement is done in the WRITE_UPT_DEC state, 3406 // after the last copy has been found, the decrement request 3407 // must be registered in the r_write_to_dec flip-flop. 3408 3409 HeapEntry entry = m_heap.read(r_write_ptr.read()); 3410 3411 bool dec_upt_counter; 3412 3413 if(((entry.owner.srcid != r_write_srcid.read()) or (r_write_pktid.read() == TYPE_SC)) or entry.owner.inst) // put the next srcid in the fifo 3414 { 3415 dec_upt_counter = false; 3416 write_to_cc_send_fifo_put = true; 3417 write_to_cc_send_fifo_inst = entry.owner.inst; 3418 write_to_cc_send_fifo_srcid = entry.owner.srcid; 3419 3420 #if DEBUG_MEMC_WRITE 3421 if(m_debug) 3422 { 3423 std::cout << " <MEMC " << name() << " WRITE_UPT_NEXT> Post another request to CC_SEND FSM" 3424 << " / heap_index = " << std::dec << r_write_ptr.read() 3425 << " / srcid = " << std::dec << r_write_copy.read() 3426 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3427 if(entry.next == r_write_ptr.read()) 3428 std::cout << " ... and this is the last" << std::endl; 3429 } 3430 #endif 3431 } 3432 else // the UPT counter must be decremented 3433 { 3434 dec_upt_counter = true; 3435 3436 #if DEBUG_MEMC_WRITE 3437 if(m_debug) 3438 { 3439 std::cout << " <MEMC " << name() << " WRITE_UPT_NEXT> Skip one entry in heap matching the writer" 3440 << " / heap_index = " << std::dec << r_write_ptr.read() 3441 << " / srcid = " << std::dec << r_write_copy.read() 3442 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3443 if(entry.next == r_write_ptr.read()) 3444 std::cout << " ... and this is the last" << std::endl; 3445 } 3446 #endif 3447 } 3448 3449 // register the possible UPT decrement request 3450 r_write_to_dec = dec_upt_counter or r_write_to_dec.read(); 3451 3452 if(not m_write_to_cc_send_inst_fifo.wok()) 3453 { 3454 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_UPT_NEXT state" << std::endl 3455 << "The write_to_cc_send_fifo should not be full" << std::endl 3456 << "as the depth should be larger than the max number of copies" << std::endl; 3457 exit(0); 3458 } 3459 3460 r_write_ptr = entry.next; 3461 3462 if(entry.next == r_write_ptr.read()) // last copy 3463 { 3464 r_write_to_cc_send_multi_req = true; 3465 if(r_write_to_dec.read() or dec_upt_counter) r_write_fsm = WRITE_UPT_DEC; 3466 else r_write_fsm = WRITE_IDLE; 3467 } 3468 break; 3469 } 3470 3471 ////////////////// 3472 case WRITE_UPT_DEC: 3473 { 3474 // If the initial writer has a copy, it should not 3475 // receive an update request, but the counter in the 3476 // update table must be decremented by the MULTI_ACK FSM. 3477 3478 if(!r_write_to_multi_ack_req.read()) 3479 { 3480 r_write_to_multi_ack_req = true; 3481 r_write_to_multi_ack_upt_index = r_write_upt_index.read(); 3482 r_write_fsm = WRITE_IDLE; 3483 } 3484 break; 3485 } 3486 3487 /////////////// 3488 case WRITE_RSP: 3489 { 3490 // Post a request to TGT_RSP FSM to acknowledge the write 3491 // In order to increase the Write requests throughput, 3492 // we don't wait to return in the IDLE state to consume 3493 // a new request in the write FIFO 3494 3495 if(!r_write_to_tgt_rsp_req.read()) 3496 { 3497 // post the request to TGT_RSP_FSM 3498 r_write_to_tgt_rsp_req = true; 3499 r_write_to_tgt_rsp_srcid = r_write_srcid.read(); 3500 r_write_to_tgt_rsp_trdid = r_write_trdid.read(); 3501 r_write_to_tgt_rsp_pktid = r_write_pktid.read(); 3502 r_write_to_tgt_rsp_sc_fail = r_write_sc_fail.read(); 3503 3504 // try to get a new write request from the FIFO 3505 if(m_cmd_write_addr_fifo.rok()) 3506 { 3507 if((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 3508 m_cpt_sc++; 3509 else 3510 { 3511 m_cpt_write++; 3512 m_cpt_write_cells++; 3513 } 3514 3515 // consume a word in the FIFO & write it in the local buffer 3516 cmd_write_fifo_get = true; 3517 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 3518 3519 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 3520 r_write_word_index = index; 3521 r_write_word_count = 0; 3522 r_write_data[index] = m_cmd_write_data_fifo.read(); 3523 r_write_srcid = m_cmd_write_srcid_fifo.read(); 3524 r_write_trdid = m_cmd_write_trdid_fifo.read(); 3525 r_write_pktid = m_cmd_write_pktid_fifo.read(); 3526 3527 if ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 3528 { 3529 assert( not m_cmd_write_eop_fifo.read() && 3530 "MEMC ERROR in WRITE_RSP state: " 3531 "invalid packet format for SC command"); 3532 3533 r_write_sc_key = m_cmd_write_data_fifo.read(); 3534 } 3535 3536 3537 // initialize the be field for all words 3538 for(size_t word=0 ; word<m_words ; word++) 3539 { 3540 if(word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 3541 else r_write_be[word] = 0x0; 3542 } 3543 3544 if( m_cmd_write_eop_fifo.read()) 3545 { 3546 r_write_fsm = WRITE_DIR_REQ; 3547 } 3548 else 3549 { 3550 r_write_fsm = WRITE_NEXT; 3551 } 3552 } 3553 else 3554 { 3555 r_write_fsm = WRITE_IDLE; 3556 } 3557 3558 #if DEBUG_MEMC_WRITE 3559 if(m_debug) 3560 { 3561 std::cout << " <MEMC " << name() << " WRITE_RSP> Post a request to TGT_RSP FSM" 3562 << " : rsrcid = " << std::hex << r_write_srcid.read() 3563 << " : rpktid = " << std::hex << r_write_pktid.read() 3564 << " : sc_fail= " << std::hex << r_write_sc_fail.read() 3565 << std::endl; 3566 if(m_cmd_write_addr_fifo.rok()) 3567 { 3568 std::cout << " New Write request: " 3569 << " srcid = " << std::hex << m_cmd_write_srcid_fifo.read() 3570 << " / address = " << m_cmd_write_addr_fifo.read() 3571 << " / data = " << m_cmd_write_data_fifo.read() 3572 << " / pktid = " << m_cmd_write_pktid_fifo.read() 3573 << std::endl; 3574 } 3575 } 3576 #endif 3577 } 3578 break; 3579 } 3580 ///////////////////////// RWT 3581 case WRITE_MISS_IVT_LOCK: 3582 { 3583 if (r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 3584 { 3585 size_t index; 3586 if(m_ivt.search_inval(m_nline[(addr_t)(r_write_address.read())], index)) 3587 { 3588 r_write_fsm = WRITE_WAIT; 3589 } 3590 else 3591 { 3592 r_write_fsm = WRITE_MISS_TRT_LOCK; 3593 } 3594 } 3595 break; 3596 } 3597 3598 ///////////////////////// 3599 case WRITE_MISS_TRT_LOCK: // Miss : check Transaction Table 3600 { 3601 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3602 { 3603 3604 #if DEBUG_MEMC_WRITE 3605 if(m_debug) 3606 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_LOCK> Check the TRT" << std::endl; 3607 #endif 3608 size_t hit_index = 0; 3609 size_t wok_index = 0; 3610 addr_t addr = (addr_t) r_write_address.read(); 3611 bool hit_read = m_trt.hit_read(m_nline[addr], hit_index); 3612 bool hit_write = m_trt.hit_write(m_nline[addr]); 3613 bool wok = not m_trt.full(wok_index); 3614 3615 // wait an empty entry in TRT 3616 if(not hit_read and (not wok or hit_write)) 3617 { 3618 r_write_fsm = WRITE_WAIT; 3619 m_cpt_trt_full++; 3620 break; 3621 } 3622 3623 if ((r_write_pktid.read() & 0x7) == TYPE_SC) 3624 { 3625 m_llsc_table.sc(r_write_address.read(), 3626 r_write_sc_key.read()); 3627 } 3628 3629 //std::cout << "MEMCACHE : WRITE MISS at " << std::hex << (uint32_t)addr << std::dec << std::endl; 3630 if(hit_read) // register the modified data in TRT 3631 { 3632 r_write_trt_index = hit_index; 3633 r_write_fsm = WRITE_MISS_TRT_DATA; 3634 m_cpt_write_miss++; 3635 break; 3636 } 3637 3638 if(wok and not hit_write) // set a new entry in TRT 3639 { 3640 r_write_trt_index = wok_index; 3641 r_write_fsm = WRITE_MISS_TRT_SET; 3642 m_cpt_write_miss++; 3643 break; 3644 } 3645 assert(false && "VCI_MEM_CACHE ERROR: this part must not be reached"); 3646 m_cpt_write_fsm_n_trt_lock++; 3647 } 3648 3649 m_cpt_write_fsm_trt_lock++; 3650 3651 break; 3652 } 3653 3654 //////////////// 3655 case WRITE_WAIT: // release the locks protecting the shared ressources 3656 { 3657 3658 #if DEBUG_MEMC_WRITE 3659 if(m_debug) 3660 std::cout << " <MEMC " << name() << " WRITE_WAIT> Releases the locks before retry" << std::endl; 3661 #endif 3662 r_write_fsm = WRITE_DIR_REQ; 3663 break; 3664 } 3665 3666 //////////////////////// 3667 case WRITE_MISS_TRT_SET: // register a new transaction in TRT (Write Buffer) 3668 { 3669 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3670 { 3671 std::vector<be_t> be_vector; 3672 std::vector<data_t> data_vector; 3673 be_vector.clear(); 3674 data_vector.clear(); 3675 for(size_t i=0; i<m_words; i++) 3676 { 3677 be_vector.push_back(r_write_be[i]); 3678 data_vector.push_back(r_write_data[i]); 3679 } 3680 m_trt.set(r_write_trt_index.read(), 3681 true, // read request to XRAM 3682 m_nline[(addr_t)(r_write_address.read())], 3683 r_write_srcid.read(), 3684 r_write_trdid.read(), 3685 r_write_pktid.read(), 3686 false, // not a processor read 3687 0, // not a single word 3688 0, // word index 3689 be_vector, 3690 data_vector); 3691 r_write_fsm = WRITE_MISS_XRAM_REQ; 3692 3693 #if DEBUG_MEMC_WRITE 3694 if(m_debug) 3695 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_SET> Set a new entry in TRT" << std::endl; 3696 #endif 3697 } 3698 break; 3699 } 3700 3701 ///////////////////////// 3702 case WRITE_MISS_TRT_DATA: // update an entry in TRT (used as a Write Buffer) 3703 { 3704 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3705 { 3706 std::vector<be_t> be_vector; 3707 std::vector<data_t> data_vector; 3708 be_vector.clear(); 3709 data_vector.clear(); 3710 for(size_t i=0; i<m_words; i++) 3711 { 3712 be_vector.push_back(r_write_be[i]); 3713 data_vector.push_back(r_write_data[i]); 3714 } 3715 m_trt.write_data_mask(r_write_trt_index.read(), 3716 be_vector, 3717 data_vector); 3718 r_write_fsm = WRITE_RSP; 3719 3720 #if DEBUG_MEMC_WRITE 3721 if(m_debug) 3722 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_DATA> Modify an existing entry in TRT" << std::endl; 3723 #endif 3724 } 3725 break; 3726 } 3727 3728 ///////////////////////// 3729 case WRITE_MISS_XRAM_REQ: // send a GET request to IXR_CMD FSM 3730 { 3731 if(not r_write_to_ixr_cmd_req.read()) 3732 { 3733 r_write_to_ixr_cmd_req = true; 3734 r_write_to_ixr_cmd_put = false; 3735 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 3736 r_write_fsm = WRITE_RSP; 3737 3738 #if DEBUG_MEMC_WRITE 3739 if(m_debug) 3740 std::cout << " <MEMC " << name() << " WRITE_MISS_XRAM_REQ> Post a GET request to the IXR_CMD FSM" << std::endl; 3741 #endif 3742 } 3743 break; 3744 } 3745 3746 /////////////////////// 3747 case WRITE_BC_DIR_READ: // enter this state if a broadcast-inval is required 3748 // the cache line must be erased in mem-cache, and written 3749 // into XRAM. we read the cache and complete the buffer 3750 { 3751 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3752 "MEMC ERROR in WRITE_BC_DIR_READ state: Bad DIR allocation"); 3753 3754 // update local buffer 3755 size_t set = m_y[(addr_t)(r_write_address.read())]; 3756 size_t way = r_write_way.read(); 3757 for(size_t word=0 ; word<m_words ; word++) 3758 { 3759 data_t mask = 0; 3760 if(r_write_be[word].read() & 0x1) mask = mask | 0x000000FF; 3761 if(r_write_be[word].read() & 0x2) mask = mask | 0x0000FF00; 3762 if(r_write_be[word].read() & 0x4) mask = mask | 0x00FF0000; 3763 if(r_write_be[word].read() & 0x8) mask = mask | 0xFF000000; 3764 3765 // complete only if mask is not null (for energy consumption) 3766 r_write_data[word] = (r_write_data[word].read() & mask) | 3767 (m_cache_data.read(way, set, word) & ~mask); 3768 } // end for 3769 3770 r_write_fsm = WRITE_BC_TRT_LOCK; 3771 3772 #if DEBUG_MEMC_WRITE 3773 if(m_debug) 3774 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_READ>" 3775 << " Read the cache to complete local buffer" << std::endl; 3776 #endif 3777 break; 3778 } 3779 3780 /////////////////////// 3781 case WRITE_BC_TRT_LOCK: // get TRT lock to check TRT not full 3782 { 3783 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3784 "MEMC ERROR in WRITE_BC_TRT_LOCK state: Bad DIR allocation"); 3785 3786 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3787 { 3788 size_t wok_index = 0; 3789 bool wok = not m_trt.full(wok_index); 3790 if(wok) // set a new entry in TRT 3791 { 3792 r_write_trt_index = wok_index; 3793 r_write_fsm = WRITE_BC_IVT_LOCK; 3794 } 3795 else // wait an empty entry in TRT 3796 { 3797 r_write_fsm = WRITE_WAIT; 3798 } 3799 3800 #if DEBUG_MEMC_WRITE 3801 if(m_debug) 3802 std::cout << " <MEMC " << name() << " WRITE_BC_TRT_LOCK> Check TRT" 3803 << " : wok = " << wok << " / index = " << wok_index << std::endl; 3804 #endif 3805 m_cpt_write_fsm_n_trt_lock++; 3806 } 3807 3808 m_cpt_write_fsm_trt_lock++; 3809 3810 break; 3811 } 3812 3813 ////////////////////// 3814 case WRITE_BC_IVT_LOCK: // register BC transaction in IVT 3815 { 3816 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3817 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad DIR allocation"); 3818 3819 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 3820 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad TRT allocation"); 3821 3822 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 3823 { 3824 bool wok = false; 3825 size_t index = 0; 3826 size_t srcid = r_write_srcid.read(); 3827 size_t trdid = r_write_trdid.read(); 3828 size_t pktid = r_write_pktid.read(); 3829 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 3830 size_t nb_copies = r_write_count.read(); 3831 3832 wok = m_ivt.set(false, // it's an inval transaction 3833 true, // it's a broadcast 3834 true, // response required 3835 false, // no acknowledge required 3836 srcid, 3837 trdid, 3838 pktid, 3839 nline, 3840 nb_copies, 3841 index); 3842 /*ODCCP*/ //m_upt.print(); 3843 #if DEBUG_MEMC_WRITE 3844 if( m_debug and wok ) 3845 std::cout << " <MEMC " << name() << " WRITE_BC_IVT_LOCK> Register broadcast inval in IVT" 3846 << " / nb_copies = " << r_write_count.read() << std::endl; 3847 #endif 3848 r_write_upt_index = index; 3849 3850 if(wok) r_write_fsm = WRITE_BC_DIR_INVAL; 3851 else r_write_fsm = WRITE_WAIT; 3852 m_cpt_write_fsm_n_upt_lock++; 3853 } 3854 3855 m_cpt_write_fsm_upt_lock++; 3856 3857 break; 3858 } 3859 3860 //////////////////////// 3861 case WRITE_BC_DIR_INVAL: 3862 { 3863 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3864 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad DIR allocation"); 3865 3866 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 3867 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad TRT allocation"); 3868 3869 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) and 3870 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad IVT allocation"); 3871 3872 // register PUT request in TRT 3873 std::vector<data_t> data_vector; 3874 data_vector.clear(); 3875 for(size_t i=0; i<m_words; i++) data_vector.push_back(r_write_data[i].read()); 3876 m_trt.set( r_write_trt_index.read(), 3877 false, // PUT request 3878 m_nline[(addr_t)(r_write_address.read())], 3879 0, // unused 3880 0, // unused 3881 0, // unused 3882 false, // not a processor read 3883 0, // unused 3884 0, // unused 3885 std::vector<be_t> (m_words,0), 3886 data_vector ); 3887 3888 // invalidate directory entry 3889 DirectoryEntry entry; 3890 entry.valid = false; 3891 entry.cache_coherent= false; 3892 entry.dirty = false; 3893 entry.tag = 0; 3894 entry.is_cnt = false; 3895 entry.lock = false; 3896 entry.owner.srcid = 0; 3897 entry.owner.inst = false; 3898 entry.ptr = 0; 3899 entry.count = 0; 3900 size_t set = m_y[(addr_t)(r_write_address.read())]; 3901 size_t way = r_write_way.read(); 3902 3903 m_cache_directory.write(set, way, entry); 3904 3905 if ((r_write_pktid.read() & 0x7) == TYPE_SC) 3906 { 3907 m_llsc_table.sc(r_write_address.read(), 3908 r_write_sc_key.read()); 3909 } 3910 3911 3912 #if DEBUG_MEMC_WRITE 3913 if(m_debug) 3914 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_INVAL> Invalidate the directory entry: @ = " 3915 << r_write_address.read() << " / register the put transaction in TRT:" << std::endl; 3916 #endif 3917 r_write_fsm = WRITE_BC_CC_SEND; 3918 break; 3919 } 3920 3921 ////////////////////// 3922 case WRITE_BC_CC_SEND: // Post a coherence broadcast request to CC_SEND FSM 3923 { 3924 if(!r_write_to_cc_send_multi_req.read() and !r_write_to_cc_send_brdcast_req.read()) 3925 { 3926 r_write_to_cc_send_multi_req = false; 3927 r_write_to_cc_send_brdcast_req = true; 3928 r_write_to_cc_send_trdid = r_write_upt_index.read(); 3929 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())]; 3930 r_write_to_cc_send_index = 0; 3931 r_write_to_cc_send_count = 0; 3932 3933 for(size_t i=0; i<m_words ; i++) 3934 { 3935 r_write_to_cc_send_be[i]=0; 3936 r_write_to_cc_send_data[i] = 0; 3937 } 3938 r_write_fsm = WRITE_BC_XRAM_REQ; 3939 3940 #if DEBUG_MEMC_WRITE 3941 if(m_debug) 3942 std::cout << " <MEMC " << name() 3943 << " WRITE_BC_CC_SEND> Post a broadcast request to CC_SEND FSM" << std::endl; 3944 #endif 3945 } 3946 break; 3947 } 3948 3949 /////////////////////// 3950 case WRITE_BC_XRAM_REQ: // Post a put request to IXR_CMD FSM 3951 { 3952 if( not r_write_to_ixr_cmd_req.read() ) 3953 { 3954 r_write_to_ixr_cmd_req = true; 3955 r_write_to_ixr_cmd_put = true; 3956 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 3957 r_write_fsm = WRITE_IDLE; 3958 3959 #if DEBUG_MEMC_WRITE 3960 if(m_debug) 3961 std::cout << " <MEMC " << name() 3962 << " WRITE_BC_XRAM_REQ> Post a put request to IXR_CMD FSM" << std::endl; 3963 #endif 3964 } 3965 break; 3966 } 3967 } // end switch r_write_fsm 3968 3969 /////////////////////////////////////////////////////////////////////// 3970 // IXR_CMD FSM 3971 /////////////////////////////////////////////////////////////////////// 3972 // The IXR_CMD fsm controls the command packets to the XRAM : 3973 // It handles requests from 5 FSMs with a round-robin priority: 3974 // READ > WRITE > CAS > XRAM_RSP > CONFIG 3975 // 3976 // - It sends a single flit VCI read to the XRAM in case of 3977 // GET request posted by the READ, WRITE or CAS FSMs. 3978 // - It sends a multi-flit VCI write in case of PUT request posted by 3979 // the XRAM_RSP, WRITE, CAS, or CONFIG FSMs. 3980 // 3981 // For each client, there is three steps: 3982 // - IXR_CMD_*_IDLE : round-robin allocation to a client 3983 // - IXR_CMD_*_TRT : access to TRT for address and data 3984 // - IXR_CMD_*_SEND : send the PUT or GET VCI command 3985 // 3986 // The address and data to be written (for a PUT) are stored in TRT. 3987 // The trdid field contains always the TRT entry index. 3988 //////////////////////////////////////////////////////////////////////// 3989 3990 switch(r_ixr_cmd_fsm.read()) 3991 { 3992 //////////////////////// 3993 case IXR_CMD_READ_IDLE: 3994 if (r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3995 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3996 else if(r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 3997 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3998 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3999 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4000 break; 4001 //////////////////////// 4002 case IXR_CMD_WRITE_IDLE: 4003 if (r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4004 else if(r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4005 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4006 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4007 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4008 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4009 break; 4010 //////////////////////// 4011 case IXR_CMD_CAS_IDLE: 4012 if (r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4013 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4014 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4015 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4016 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4017 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4018 break; 4019 //////////////////////// 4020 case IXR_CMD_XRAM_IDLE: 4021 if (r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4022 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4023 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4024 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4025 else if(r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4026 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4027 break; 4028 //////////////////////// 4029 case IXR_CMD_CLEANUP_IDLE: 4030 /*ODCCP*///std::cout << "IXR_CMD_CLEANUP_IDLE" << std::endl; 4031 if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4032 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4033 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4034 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4035 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4036 else if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4037 break; 4038 ///////////////////////// 4039 case IXR_CMD_CONFIG_IDLE: 4040 { 4041 if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4042 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4043 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4044 else if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4045 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4046 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4047 break; 4048 } 4049 4050 ////////////////////// 4051 case IXR_CMD_READ_TRT: // access TRT for a GET 4052 { 4053 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 4054 { 4055 TransactionTabEntry entry = m_trt.read( r_read_to_ixr_cmd_index.read() ); 4056 r_ixr_cmd_address = entry.nline * (m_words<<2); 4057 r_ixr_cmd_trdid = r_read_to_ixr_cmd_index.read(); 4058 r_ixr_cmd_get = true; 4059 r_ixr_cmd_word = 0; 4060 r_ixr_cmd_fsm = IXR_CMD_READ_SEND; 4061 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 4062 4063 #if DEBUG_MEMC_IXR_CMD 4064 if(m_debug) 4065 std::cout << " <MEMC " << name() << " IXR_CMD_READ_TRT> TRT access" 4066 << " index = " << std::dec << r_read_to_ixr_cmd_index.read() 4067 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 4068 #endif 4069 } 4070 break; 4071 } 4072 /////////////////////// 4073 case IXR_CMD_WRITE_TRT: // access TRT for a PUT or a GET 4074 { 4075 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 4076 { 4077 TransactionTabEntry entry = m_trt.read( r_write_to_ixr_cmd_index.read() ); 4078 r_ixr_cmd_address = entry.nline * (m_words<<2); 4079 r_ixr_cmd_trdid = r_write_to_ixr_cmd_index.read(); 4080 r_ixr_cmd_get = entry.xram_read; 4081 r_ixr_cmd_word = 0; 4082 r_ixr_cmd_fsm = IXR_CMD_WRITE_SEND; 4083 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 4084 4085 #if DEBUG_MEMC_IXR_CMD 4086 if(m_debug) 4087 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_TRT> TRT access" 4088 << " index = " << std::dec << r_write_to_ixr_cmd_index.read() 4089 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 4090 #endif 4091 } 4092 break; 4093 } 4094 ///////////////////// 4095 case IXR_CMD_CAS_TRT: // access TRT for a PUT or a GET 4096 { 4097 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 4098 { 4099 TransactionTabEntry entry = m_trt.read( r_cas_to_ixr_cmd_index.read() ); 4100 r_ixr_cmd_address = entry.nline * (m_words<<2); 4101 r_ixr_cmd_trdid = r_cas_to_ixr_cmd_index.read(); 4102 r_ixr_cmd_get = entry.xram_read; 4103 r_ixr_cmd_word = 0; 4104 r_ixr_cmd_fsm = IXR_CMD_CAS_SEND; 4105 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 4106 4107 #if DEBUG_MEMC_IXR_CMD 4108 if(m_debug) 4109 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_TRT> TRT access" 4110 << " index = " << std::dec << r_cas_to_ixr_cmd_index.read() 4111 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 4112 #endif 4113 } 4114 break; 4115 } 4116 ////////////////////// 4117 case IXR_CMD_XRAM_TRT: // access TRT for a PUT 4118 { 4119 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 4120 { 4121 TransactionTabEntry entry = m_trt.read( r_xram_rsp_to_ixr_cmd_index.read() ); 4122 r_ixr_cmd_address = entry.nline * (m_words<<2); 4123 r_ixr_cmd_trdid = r_xram_rsp_to_ixr_cmd_index.read(); 4124 r_ixr_cmd_get = false; 4125 r_ixr_cmd_word = 0; 4126 r_ixr_cmd_fsm = IXR_CMD_XRAM_SEND; 4127 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 4128 4129 #if DEBUG_MEMC_IXR_CMD 4130 if(m_debug) 4131 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_TRT> TRT access" 4132 << " index = " << std::dec << r_xram_rsp_to_ixr_cmd_index.read() 4133 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 4134 #endif 4135 } 4136 break; 4137 } 4138 ////////////////////// 4139 case IXR_CMD_CLEANUP_TRT: // access TRT for a PUT 4140 { 4141 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 4142 { 4143 4144 TransactionTabEntry entry = m_trt.read( r_cleanup_to_ixr_cmd_index.read() ); 4145 r_ixr_cmd_address = entry.nline * (m_words<<2); 4146 r_ixr_cmd_trdid = r_cleanup_to_ixr_cmd_index.read(); 4147 r_ixr_cmd_get = false; 4148 r_ixr_cmd_word = 0; 4149 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 4150 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 4151 4152 #if DEBUG_MEMC_IXR_CMD 4153 if(m_debug) 4154 std::cout << " <MEMC " << name() << " IXR_CMD_CLEANUP_TRT> TRT access" 4155 << " index = " << std::dec << r_cleanup_to_ixr_cmd_index.read() 4156 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 4157 #endif 4158 } 4159 break; 4160 } 4161 //////////////////////// 4162 case IXR_CMD_CONFIG_TRT: // access TRT for a PUT 4163 { 4164 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 4165 { 4166 TransactionTabEntry entry = m_trt.read( r_config_to_ixr_cmd_index.read() ); 4167 r_ixr_cmd_address = entry.nline * (m_words<<2); 4168 r_ixr_cmd_trdid = r_config_to_ixr_cmd_index.read(); 4169 r_ixr_cmd_get = false; 4170 r_ixr_cmd_word = 0; 4171 r_ixr_cmd_fsm = IXR_CMD_CONFIG_SEND; 4172 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 4173 4174 #if DEBUG_MEMC_IXR_CMD 4175 if(m_debug) 4176 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_TRT> TRT access" 4177 << " index = " << std::dec << r_config_to_ixr_cmd_index.read() 4178 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 4179 #endif 4180 } 4181 break; 4182 } 4183 4184 /////////////////////// 4185 case IXR_CMD_READ_SEND: // send a get from READ FSM 4186 { 4187 if(p_vci_ixr.cmdack) 4188 { 4189 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 4190 r_read_to_ixr_cmd_req = false; 4191 4192 #if DEBUG_MEMC_IXR_CMD 4193 if(m_debug) 4194 std::cout << " <MEMC " << name() << " IXR_CMD_READ_SEND> GET request:" << std::hex 4195 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4196 #endif 4197 } 4198 break; 4199 } 4200 //////////////////////// 4201 case IXR_CMD_WRITE_SEND: // send a put or get from WRITE FSM 4202 { 4203 if(p_vci_ixr.cmdack) 4204 { 4205 if(r_write_to_ixr_cmd_put.read()) // PUT 4206 { 4207 if(r_ixr_cmd_word.read() == (m_words - 2)) 4208 { 4209 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 4210 r_write_to_ixr_cmd_req = false; 4211 } 4212 else 4213 { 4214 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 4215 } 4216 4217 #if DEBUG_MEMC_IXR_CMD 4218 if(m_debug) 4219 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> PUT request:" << std::hex 4220 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4221 #endif 4222 } 4223 else // GET 4224 { 4225 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 4226 r_write_to_ixr_cmd_req = false; 4227 4228 #if DEBUG_MEMC_IXR_CMD 4229 if(m_debug) 4230 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> GET request:" << std::hex 4231 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4232 #endif 4233 } 4234 } 4235 break; 4236 } 4237 ////////////////////// 4238 case IXR_CMD_CAS_SEND: // send a put or get command from CAS FSM 4239 { 4240 if(p_vci_ixr.cmdack) 4241 { 4242 if(r_cas_to_ixr_cmd_put.read()) // PUT 4243 { 4244 if(r_ixr_cmd_word.read() == (m_words - 2)) 4245 { 4246 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 4247 r_cas_to_ixr_cmd_req = false; 4248 } 4249 else 4250 { 4251 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 4252 } 4253 4254 #if DEBUG_MEMC_IXR_CMD 4255 if(m_debug) 4256 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> PUT request:" << std::hex 4257 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4258 #endif 4259 } 4260 else // GET 4261 { 4262 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 4263 r_cas_to_ixr_cmd_req = false; 4264 4265 #if DEBUG_MEMC_IXR_CMD 4266 if(m_debug) 4267 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> GET request:" << std::hex 4268 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4269 #endif 4270 } 4271 } 4272 break; 4273 } 4274 /////////////////////// 4275 case IXR_CMD_XRAM_SEND: // send a put from XRAM_RSP FSM 4276 { 4277 if(p_vci_ixr.cmdack.read()) 4278 { 4279 if(r_ixr_cmd_word.read() == (m_words - 2)) 4280 { 4281 r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE; 4282 r_xram_rsp_to_ixr_cmd_req = false; 4283 } 4284 else 4285 { 4286 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 4287 } 4288 #if DEBUG_MEMC_IXR_CMD 4289 if(m_debug) 4290 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_SEND> PUT request:" << std::hex 4291 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4292 #endif 4293 } 4294 break; 4295 } 4296 4297 //////////////////////// 4298 case IXR_CMD_CLEANUP_DATA_SEND: // send a put command to XRAM 4299 { 4300 if(p_vci_ixr.cmdack.read()) 4301 { 4302 /*ODCCP*/ //std::cout << "IXR_CMD_CLEANUP_DATA_SEND STATE at cycle : " << std::dec << m_cpt_cycles << std::endl; 4303 if(r_ixr_cmd_word.read() == (m_words - 2)) 4304 { 4305 /*ODCCP*/ //std::cout << "IXR_CMD_CLEANUP_DATA_SEND GO TO IXR_CMD_CLEANUP_IDLE" << std::endl; 4306 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_IDLE; 4307 r_cleanup_to_ixr_cmd_req = false; 4308 //r_ixr_cmd_word = 0; 4309 //r_xram_rsp_to_ixr_cmd_inval_ncc_pending = false; 4310 } 4311 else 4312 { 4313 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 4314 } 4315 4316 #if DEBUG_MEMC_IXR_CMD 4317 if(m_debug) 4318 { 4319 std::cout << " <MEMC " << name() << ".IXR_CMD_CLEANUP_DATA_SEND> Send a put request to xram" << std::endl; 4320 } 4321 #endif 4322 } 4323 break; 4324 } 4325 4326 ///////////////////////// 4327 case IXR_CMD_CONFIG_SEND: // send a put from CONFIG FSM 4328 { 4329 if(p_vci_ixr.cmdack.read()) 4330 { 4331 if(r_ixr_cmd_word.read() == (m_words - 2)) 4332 { 4333 r_ixr_cmd_fsm = IXR_CMD_CONFIG_IDLE; 4334 r_config_to_ixr_cmd_req = false; 4335 } 4336 else 4337 { 4338 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 4339 } 4340 4341 #if DEBUG_MEMC_IXR_CMD 4342 if(m_debug) 4343 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_SEND> PUT request:" << std::hex 4344 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4345 #endif 4346 } 4347 break; 4348 } 4349 } // end switch r_ixr_cmd_fsm 4350 4351 //////////////////////////////////////////////////////////////////////////// 4352 // IXR_RSP FSM 4353 //////////////////////////////////////////////////////////////////////////// 4354 // The IXR_RSP FSM receives the response packets from the XRAM, 4355 // for both PUT transaction, and GET transaction. 4356 // 4357 // - A response to a PUT request is a single-cell VCI packet. 4358 // The TRT index is contained in the RTRDID field. 4359 // The FSM takes the lock protecting the TRT, and the corresponding 4360 // entry is erased. If an acknowledge was required (in case of software SYNC) 4361 // the r_config_rsp_lines counter is decremented. 4362 // 4363 // - A response to a GET request is a multi-cell VCI packet. 4364 // The TRT index is contained in the RTRDID field. 4365 // The N cells contain the N words of the cache line in the RDATA field. 4366 // The FSM takes the lock protecting the TRT to store the line in the TRT 4367 // (taking into account the write requests already stored in the TRT). 4368 // When the line is completely written, the r_ixr_rsp_to_xram_rsp_rok[index] 4369 // signal is set to inform the XRAM_RSP FSM. 4370 /////////////////////////////////////////////////////////////////////////////// 4371 4372 switch(r_ixr_rsp_fsm.read()) 4373 { 4374 ////////////////// 4375 case IXR_RSP_IDLE: // test transaction type: PUT/GET 4376 { 4377 if(p_vci_ixr.rspval.read()) 4378 { 4379 r_ixr_rsp_cpt = 0; 4380 r_ixr_rsp_trt_index = p_vci_ixr.rtrdid.read(); 4381 4382 assert( ((p_vci_ixr.rerror.read() & 0x1) == 0) and 4383 "MEMC ERROR in IXR_RSP state: XRAM response error !"); 4384 4385 if(p_vci_ixr.reop.read()) // PUT 4386 { 4387 r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 4388 } 4389 4390 else // GET transaction 4391 { 4392 r_ixr_rsp_fsm = IXR_RSP_TRT_READ; 4393 4394 #if DEBUG_MEMC_IXR_RSP 4395 if(m_debug) 4396 std::cout << " <MEMC " << name() 4397 << " IXR_RSP_IDLE> Response from XRAM to a get transaction" << std::endl; 4398 #endif 4399 } 4400 } 4401 break; 4402 } 4403 //////////////////////// 4404 case IXR_RSP_ACK: // Acknowledge PUT transaction 4405 { 4406 r_ixr_rsp_fsm = IXR_RSP_IDLE; 4407 break; 4408 } 4409 4410 //////////////////////// 4411 case IXR_RSP_TRT_ERASE: // erase the entry in the TRT 4412 // decrease the line counter if config request 4413 { 4414 if(r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) 4415 { 4416 size_t index = r_ixr_rsp_trt_index.read(); 4417 if (m_trt.is_config(index) ) r_config_rsp_lines = r_config_rsp_lines.read() - 1; 4418 m_trt.erase(index); 4419 r_ixr_rsp_fsm = IXR_RSP_IDLE; 4420 4421 // std::cout << "remove a valid slot in trt index = " << r_ixr_rsp_trt_index.read()<< std::endl; 4422 #if DEBUG_MEMC_IXR_RSP 4423 if(m_debug) 4424 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_ERASE> Erase TRT entry " 4425 << r_ixr_rsp_trt_index.read() << std::endl; 4426 #endif 4427 } 4428 break; 4429 } 4430 ////////////////////// 4431 case IXR_RSP_TRT_READ: // write a 64 bits data in the TRT 4432 { 4433 if((r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) and p_vci_ixr.rspval) 4434 { 4435 size_t index = r_ixr_rsp_trt_index.read(); 4436 size_t word = r_ixr_rsp_cpt.read(); 4437 bool eop = p_vci_ixr.reop.read(); 4438 wide_data_t data = p_vci_ixr.rdata.read(); 4439 bool error = ((p_vci_ixr.rerror.read() & 0x1) == 1); 4440 4441 assert(((eop == (word == (m_words-2))) or error) and 4442 "MEMC ERROR in IXR_RSP_TRT_READ state : invalid response from XRAM"); 4443 4444 m_trt.write_rsp( index, 4445 word, 4446 data ); 4447 4448 r_ixr_rsp_cpt = word + 2; 4449 4450 if(eop) 4451 { 4452 r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()]=true; 4453 r_ixr_rsp_fsm = IXR_RSP_IDLE; 4454 } 4455 4456 #if DEBUG_MEMC_IXR_RSP 4457 if(m_debug) 4458 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_READ> Writing a word in TRT : " 4459 << " index = " << std::dec << index 4460 << " / word = " << word 4461 << " / data = " << std::hex << data << std::endl; 4462 #endif 4463 m_cpt_ixr_fsm_n_trt_lock++; 4464 } 4465 m_cpt_ixr_fsm_trt_lock++; 4466 break; 4467 } 4468 } // end swich r_ixr_rsp_fsm 4469 4470 //////////////////////////////////////////////////////////////////////////// 4471 // XRAM_RSP FSM 4472 //////////////////////////////////////////////////////////////////////////// 4473 // The XRAM_RSP FSM handles the incoming cache lines after an XRAM GET. 4474 // The cache line has been written in the TRT by the IXR_CMD_FSM. 4475 // As the IXR_RSP FSM and the XRAM_RSP FSM are running in parallel, 4476 // there is as many flip-flops r_ixr_rsp_to_xram_rsp_rok[i] as the number 4477 // of entries in the TRT, that are handled with a round-robin priority... 4478 // 4479 // The FSM takes the lock protecting TRT, and the lock protecting DIR. 4480 // The selected TRT entry is copied in the local buffer r_xram_rsp_trt_buf. 4481 // It selects a cache slot and save the victim line in another local buffer 4482 // r_xram_rsp_victim_***. 4483 // It writes the line extracted from TRT in the cache. 4484 // If it was a read MISS, the XRAM_RSP FSM send a request to the TGT_RSP 4485 // FSM to return the cache line to the registered processor. 4486 // If there is no empty slot, a victim line is evicted, and 4487 // invalidate requests are sent to the L1 caches containing copies. 4488 // If this line is dirty, the XRAM_RSP FSM send a request to the IXR_CMD 4489 // FSM to save the victim line to the XRAM, and register the write transaction 4490 // in the TRT (using the entry previously used by the read transaction). 4491 /////////////////////////////////////////////////////////////////////////////// 4492 4493 switch(r_xram_rsp_fsm.read()) 4494 { 4495 /////////////////// 4496 case XRAM_RSP_IDLE: // scan the XRAM responses / select a TRT index (round robin) 4497 { 4498 size_t old = r_xram_rsp_trt_index.read(); 4499 size_t lines = m_trt_lines; 4500 for(size_t i=0 ; i<lines ; i++) 4501 { 4502 size_t index = (i+old+1) %lines; 4503 if(r_ixr_rsp_to_xram_rsp_rok[index]) 4504 { 4505 r_xram_rsp_trt_index = index; 4506 r_ixr_rsp_to_xram_rsp_rok[index] = false; 4507 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 4508 4509 #if DEBUG_MEMC_XRAM_RSP 4510 if(m_debug) 4511 std::cout << " <MEMC " << name() << " XRAM_RSP_IDLE>" 4512 << " Available cache line in TRT:" 4513 << " index = " << std::dec << index << std::endl; 4514 #endif 4515 break; 4516 } 4517 } 4518 break; 4519 } 4520 /////////////////////// 4521 case XRAM_RSP_DIR_LOCK: // Takes the DIR lock and the TRT lock 4522 // Copy the TRT entry in a local buffer 4523 { 4524 if((r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4525 (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP)) 4526 { 4527 // copy the TRT entry in the r_xram_rsp_trt_buf local buffer 4528 size_t index = r_xram_rsp_trt_index.read(); 4529 r_xram_rsp_trt_buf.copy( m_trt.read(index) ); 4530 r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; 4531 4532 #if DEBUG_MEMC_XRAM_RSP 4533 if(m_debug) 4534 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_LOCK>" 4535 << " Get access to DIR and TRT" << std::endl; 4536 #endif 4537 m_cpt_xram_rsp_fsm_n_dir_lock++; 4538 m_cpt_xram_rsp_fsm_n_trt_lock++; 4539 } 4540 m_cpt_xram_rsp_fsm_dir_lock++; 4541 m_cpt_xram_rsp_fsm_trt_lock++; 4542 break; 4543 } 4544 /////////////////////// 4545 case XRAM_RSP_TRT_COPY: // Select a victim cache line 4546 // and copy it in a local buffer 4547 { 4548 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4549 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad DIR allocation"); 4550 4551 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4552 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad TRT allocation"); 4553 4554 // selects & extracts a victim line from cache 4555 size_t way = 0; 4556 size_t set = m_y[(addr_t)(r_xram_rsp_trt_buf.nline * m_words * 4)]; 4557 4558 DirectoryEntry victim(m_cache_directory.select(set, way)); 4559 4560 bool inval = (victim.count && victim.valid) or (!victim.cache_coherent and (victim.count == 1)) ; 4561 4562 4563 // copy the victim line in a local buffer 4564 m_cache_data.read_line(way, set, r_xram_rsp_victim_data); 4565 4566 r_xram_rsp_victim_copy = victim.owner.srcid; 4567 r_xram_rsp_victim_coherent = victim.cache_coherent; 4568 r_xram_rsp_victim_copy_inst = victim.owner.inst; 4569 r_xram_rsp_victim_count = victim.count; 4570 r_xram_rsp_victim_ptr = victim.ptr; 4571 r_xram_rsp_victim_way = way; 4572 r_xram_rsp_victim_set = set; 4573 r_xram_rsp_victim_nline = victim.tag*m_sets + set; 4574 r_xram_rsp_victim_is_cnt = victim.is_cnt; 4575 r_xram_rsp_victim_inval = inval ; 4576 r_xram_rsp_victim_dirty = victim.dirty or (!victim.cache_coherent && (victim.count == 1)); //a NCC line is by default considered as dirty in the L1: we must take a reservation on a TRT entry 4577 4578 if( not r_xram_rsp_trt_buf.rerror ) r_xram_rsp_fsm = XRAM_RSP_IVT_LOCK; 4579 else r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 4580 4581 #if DEBUG_MEMC_XRAM_RSP 4582 if(m_debug) 4583 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_COPY>" 4584 << " Select a victim slot: " 4585 << " way = " << std::dec << way 4586 << " / set = " << set 4587 << "/ count = " << victim.count 4588 << " / inval_required = " << inval << std::endl; 4589 #endif 4590 break; 4591 } 4592 /////////////////////// 4593 case XRAM_RSP_IVT_LOCK: // Keep DIR and TRT locks and take the IVT lock 4594 // to check a possible pending inval 4595 { 4596 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4597 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad DIR allocation"); 4598 4599 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4600 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad TRT allocation"); 4601 4602 if(r_alloc_ivt_fsm == ALLOC_IVT_XRAM_RSP) 4603 { 4604 size_t index = 0; 4605 if(m_ivt.search_inval(r_xram_rsp_trt_buf.nline, index)) // pending inval 4606 { 4607 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 4608 4609 #if DEBUG_MEMC_XRAM_RSP 4610 if(m_debug) 4611 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 4612 << " Get acces to IVT, but line invalidation registered" 4613 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline*m_words*4 4614 << " / index = " << std::dec << index << std::endl; 4615 #endif 4616 4617 } 4618 else if(m_ivt.is_full() and r_xram_rsp_victim_inval.read()) // IVT full 4619 { 4620 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 4621 4622 #if DEBUG_MEMC_XRAM_RSP 4623 if(m_debug) 4624 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 4625 << " Get acces to IVT, but inval required and IVT full" << std::endl; 4626 #endif 4627 } 4628 else 4629 { 4630 r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; 4631 4632 #if DEBUG_MEMC_XRAM_RSP 4633 if(m_debug) 4634 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 4635 << " Get acces to IVT / no pending inval request" << std::endl; 4636 #endif 4637 } 4638 } 4639 break; 4640 } 4641 ///////////////////////// 4642 case XRAM_RSP_INVAL_WAIT: // release all locks and returns to DIR_LOCK to retry 4643 { 4644 4645 #if DEBUG_MEMC_XRAM_RSP 4646 if(m_debug) 4647 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL_WAIT>" 4648 << " Release all locks and retry" << std::endl; 4649 #endif 4650 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 4651 break; 4652 } 4653 /////////////////////// 4654 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory), 4655 // erases the TRT entry if victim not dirty, 4656 // and set inval request in IVT if required 4657 { 4658 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4659 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad DIR allocation"); 4660 4661 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4662 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad TRT allocation"); 4663 4664 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_XRAM_RSP) and 4665 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad IVT allocation"); 4666 4667 // check if this is an instruction read, this means pktid is either 4668 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 4669 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4670 4671 // check if this is a cached read, this means pktid is either 4672 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 4673 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4674 bool cached_read = (r_xram_rsp_trt_buf.pktid & 0x1) and r_xram_rsp_trt_buf.proc_read; 4675 4676 bool inst_read = (r_xram_rsp_trt_buf.pktid & 0x2) and r_xram_rsp_trt_buf.proc_read; 4677 4678 bool dirty = false; 4679 4680 // update cache data 4681 size_t set = r_xram_rsp_victim_set.read(); 4682 size_t way = r_xram_rsp_victim_way.read(); 4683 for(size_t word=0; word<m_words ; word++) 4684 { 4685 m_cache_data.write(way, set, word, r_xram_rsp_trt_buf.wdata[word]); 4686 dirty = dirty or (r_xram_rsp_trt_buf.wdata_be[word] != 0); 4687 4688 } 4689 4690 // update cache directory 4691 DirectoryEntry entry; 4692 entry.valid = true; 4693 entry.cache_coherent = (inst_read or (not(cached_read))) and (r_xram_rsp_trt_buf.proc_read); 4694 entry.is_cnt = false; 4695 entry.lock = false; 4696 entry.dirty = dirty; 4697 entry.tag = r_xram_rsp_trt_buf.nline / m_sets; 4698 entry.ptr = 0; 4699 if(cached_read) 4700 { 4701 entry.owner.srcid = r_xram_rsp_trt_buf.srcid; 4702 entry.owner.inst = inst_read; 4703 entry.count = 1; 4704 } 4705 else 4706 { 4707 entry.owner.srcid = 0; 4708 entry.owner.inst = 0; 4709 entry.count = 0; 4710 } 4711 m_cache_directory.write(set, way, entry); 4712 //RWT: keep the coherence information in order to send it to the read_rsp 4713 r_xram_rsp_coherent = inst_read or (not(cached_read)); 4714 // request an invalidattion request in IVT for victim line 4715 if(r_xram_rsp_victim_inval.read()) 4716 { 4717 bool broadcast = r_xram_rsp_victim_is_cnt.read(); 4718 size_t index = 0; 4719 size_t count_copies = r_xram_rsp_victim_count.read(); 4720 4721 bool wok = m_ivt.set(false, // it's an inval transaction 4722 broadcast, // set broadcast bit 4723 false, // no response required 4724 false, // no acknowledge required 4725 0, // srcid 4726 0, // trdid 4727 0, // pktid 4728 r_xram_rsp_victim_nline.read(), 4729 count_copies, 4730 index); 4731 4732 r_xram_rsp_ivt_index = index; 4733 assert( wok and 4734 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: IVT should not be full"); 4735 4736 } 4737 if (!r_xram_rsp_victim_coherent.read()) 4738 { 4739 addr_t min = r_xram_rsp_victim_nline.read()*m_words*4 ; 4740 addr_t max = r_xram_rsp_victim_nline.read()*m_words*4 + (m_words - 1)*4; 4741 m_llsc_table.sw(min, max); 4742 } 4743 #if DEBUG_MEMC_XRAM_RSP 4744 if(m_debug) 4745 { 4746 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_UPDT>" 4747 << " Cache update: " 4748 << " way = " << std::dec << way 4749 << " / set = " << set 4750 << " / owner_id = " << std::hex << entry.owner.srcid 4751 << " / owner_ins = " << std::dec << entry.owner.inst 4752 << " / count = " << entry.count 4753 << " / nline = " << r_xram_rsp_trt_buf.nline 4754 << " / is_cnt = " << entry.is_cnt << std::endl; 4755 if(r_xram_rsp_victim_inval.read()) 4756 std::cout << " Invalidation request for victim line " 4757 << std::hex << r_xram_rsp_victim_nline.read() 4758 << " / broadcast = " << r_xram_rsp_victim_is_cnt.read() << std::endl; 4759 } 4760 #endif 4761 4762 // If the victim is not dirty (RWT: if it is not coherent, we can not know wether it is dirty or not), we don't need another XRAM put transaction, 4763 // and we can erase the TRT entry 4764 if(!r_xram_rsp_victim_dirty.read() and (r_xram_rsp_victim_coherent.read() or (r_xram_rsp_victim_count.read() == 0))) m_trt.erase(r_xram_rsp_trt_index.read()); 4765 4766 // Next state 4767 if(r_xram_rsp_victim_dirty.read() or (!r_xram_rsp_victim_coherent.read() and (r_xram_rsp_victim_count.read() == 1))) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; 4768 else if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4769 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4770 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4771 break; 4772 } 4773 //////////////////////// 4774 case XRAM_RSP_TRT_DIRTY: // set the TRT entry (write to XRAM) if the victim is dirty or not coherent (RWT) 4775 { 4776 if(r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) 4777 { 4778 std::vector<data_t> data_vector; 4779 data_vector.clear(); 4780 for(size_t i=0; i<m_words; i++) 4781 { 4782 data_vector.push_back(r_xram_rsp_victim_data[i].read()); 4783 } 4784 m_trt.set( r_xram_rsp_trt_index.read(), 4785 false, // PUT 4786 r_xram_rsp_victim_nline.read(), // line index 4787 0, // unused 4788 0, // unused 4789 0, // unused 4790 false, // not proc_read 4791 0, // unused 4792 0, // unused 4793 std::vector<be_t>(m_words,0xF), 4794 data_vector); 4795 #if DEBUG_MEMC_XRAM_RSP 4796 if(m_debug) 4797 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_DIRTY>" 4798 << " Set TRT entry for the put transaction" 4799 << " / dirty victim line = " << r_xram_rsp_victim_nline.read() << std::endl; 4800 #endif 4801 4802 // if( not r_xram_rsp_victim_coherent ) 4803 // std::cout << "a victim coherent not sent trt index =" << r_xram_rsp_trt_index.read() << std::endl; 4804 if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4805 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4806 else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4807 m_cpt_xram_rsp_fsm_n_trt_lock++; 4808 } 4809 4810 m_cpt_xram_rsp_fsm_trt_lock++; 4811 4812 break; 4813 } 4814 ////////////////////// 4815 case XRAM_RSP_DIR_RSP: // Request a response to TGT_RSP FSM 4816 { 4817 if( not r_xram_rsp_to_tgt_rsp_req.read()) 4818 { 4819 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 4820 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 4821 if (r_xram_rsp_coherent.read()) 4822 { 4823 r_xram_rsp_to_tgt_rsp_pktid = 0x0 + r_xram_rsp_trt_buf.pktid;//RWT CC 4824 } 4825 else 4826 { 4827 r_xram_rsp_to_tgt_rsp_pktid = 0x8 + r_xram_rsp_trt_buf.pktid;//RWT NCC 4828 } 4829 for(size_t i=0; i < m_words; i++) 4830 { 4831 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 4832 } 4833 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 4834 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 4835 r_xram_rsp_to_tgt_rsp_ll_key = r_xram_rsp_trt_buf.ll_key; 4836 r_xram_rsp_to_tgt_rsp_rerror = false; 4837 r_xram_rsp_to_tgt_rsp_req = true; 4838 4839 4840 if(r_xram_rsp_victim_inval) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4841 else if(r_xram_rsp_victim_dirty) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4842 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4843 4844 #if DEBUG_MEMC_XRAM_RSP 4845 if(m_debug) 4846 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_RSP>" 4847 << " Request the TGT_RSP FSM to return data:" 4848 << " rsrcid = " << std::hex << r_xram_rsp_trt_buf.srcid 4849 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline*m_words*4 4850 << " / nwords = " << std::dec << r_xram_rsp_trt_buf.read_length << std::endl; 4851 #endif 4852 } 4853 break; 4854 } 4855 //////////////////// 4856 case XRAM_RSP_INVAL: // send invalidate request to CC_SEND FSM 4857 { 4858 if(!r_xram_rsp_to_cc_send_multi_req.read() and 4859 !r_xram_rsp_to_cc_send_brdcast_req.read()) 4860 { 4861 bool multi_req = !r_xram_rsp_victim_is_cnt.read(); 4862 bool last_multi_req = multi_req and (r_xram_rsp_victim_count.read() == 1); 4863 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 4864 4865 r_xram_rsp_to_cc_send_multi_req = last_multi_req; 4866 r_xram_rsp_to_cc_send_brdcast_req = r_xram_rsp_victim_is_cnt.read(); 4867 r_xram_rsp_to_cc_send_nline = r_xram_rsp_victim_nline.read(); 4868 r_xram_rsp_to_cc_send_trdid = r_xram_rsp_ivt_index; 4869 xram_rsp_to_cc_send_fifo_srcid = r_xram_rsp_victim_copy.read(); 4870 xram_rsp_to_cc_send_fifo_inst = r_xram_rsp_victim_copy_inst.read(); 4871 xram_rsp_to_cc_send_fifo_put = multi_req; 4872 r_xram_rsp_next_ptr = r_xram_rsp_victim_ptr.read(); 4873 4874 if(r_xram_rsp_victim_dirty and r_xram_rsp_victim_coherent) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4875 else if(not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 4876 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4877 4878 // std::cout << "cleanup sent for trt index =" << r_xram_rsp_trt_index.read() << std::endl; 4879 #if DEBUG_MEMC_XRAM_RSP 4880 if(m_debug) 4881 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL>" 4882 << " Send an inval request to CC_SEND FSM" 4883 << " / address = " << r_xram_rsp_victim_nline.read()*m_words*4 << std::endl; 4884 #endif 4885 } 4886 break; 4887 } 4888 ////////////////////////// 4889 case XRAM_RSP_WRITE_DIRTY: // send a write request to IXR_CMD FSM 4890 { 4891 if ( not r_xram_rsp_to_ixr_cmd_req.read() ) 4892 { 4893 r_xram_rsp_to_ixr_cmd_req = true; 4894 r_xram_rsp_to_ixr_cmd_index = r_xram_rsp_trt_index.read(); 4895 4896 m_cpt_write_dirty++; 4897 4898 bool multi_req = not r_xram_rsp_victim_is_cnt.read() and 4899 r_xram_rsp_victim_inval.read(); 4900 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 4901 4902 if(not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 4903 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4904 4905 #if DEBUG_MEMC_XRAM_RSP 4906 if(m_debug) 4907 std::cout << " <MEMC " << name() << " XRAM_RSP_WRITE_DIRTY>" 4908 << " Send the put request to IXR_CMD FSM" 4909 << " / victim line = " << r_xram_rsp_victim_nline.read() << std::endl; 4910 #endif 4911 } 4912 break; 4913 } 4914 ///////////////////////// 4915 case XRAM_RSP_HEAP_REQ: // Get the lock to the HEAP 4916 { 4917 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 4918 { 4919 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4920 m_cpt_xram_rsp_fsm_n_heap_lock++; 4921 } 4922 4923 #if DEBUG_MEMC_XRAM_RSP 4924 if(m_debug) 4925 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_REQ>" 4926 << " Requesting HEAP lock" << std::endl; 4927 #endif 4928 4929 m_cpt_xram_rsp_fsm_heap_lock++; 4930 4931 break; 4932 } 4933 ///////////////////////// 4934 case XRAM_RSP_HEAP_ERASE: // erase the copies and send invalidations 4935 { 4936 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 4937 { 4938 HeapEntry entry = m_heap.read(r_xram_rsp_next_ptr.read()); 4939 4940 xram_rsp_to_cc_send_fifo_srcid = entry.owner.srcid; 4941 xram_rsp_to_cc_send_fifo_inst = entry.owner.inst; 4942 xram_rsp_to_cc_send_fifo_put = true; 4943 if(m_xram_rsp_to_cc_send_inst_fifo.wok()) 4944 { 4945 r_xram_rsp_next_ptr = entry.next; 4946 if(entry.next == r_xram_rsp_next_ptr.read()) // last copy 4947 { 4948 r_xram_rsp_to_cc_send_multi_req = true; 4949 r_xram_rsp_fsm = XRAM_RSP_HEAP_LAST; 4950 } 4951 else 4952 { 4953 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4954 } 4955 } 4956 else 4957 { 4958 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4959 } 4960 4961 #if DEBUG_MEMC_XRAM_RSP 4962 if(m_debug) 4963 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_ERASE>" 4964 << " Erase copy:" 4965 << " srcid = " << std::hex << entry.owner.srcid 4966 << " / inst = " << std::dec << entry.owner.inst << std::endl; 4967 #endif 4968 } 4969 break; 4970 } 4971 ///////////////////////// 4972 case XRAM_RSP_HEAP_LAST: // last copy 4973 { 4974 if(r_alloc_heap_fsm.read() != ALLOC_HEAP_XRAM_RSP) 4975 { 4976 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_HEAP_LAST" 4977 << " bad HEAP allocation" << std::endl; 4978 exit(0); 4979 } 4980 size_t free_pointer = m_heap.next_free_ptr(); 4981 4982 HeapEntry last_entry; 4983 last_entry.owner.srcid = 0; 4984 last_entry.owner.inst = false; 4985 if(m_heap.is_full()) 4986 { 4987 last_entry.next = r_xram_rsp_next_ptr.read(); 4988 m_heap.unset_full(); 4989 } 4990 else 4991 { 4992 last_entry.next = free_pointer; 4993 } 4994 4995 m_heap.write_free_ptr(r_xram_rsp_victim_ptr.read()); 4996 m_heap.write(r_xram_rsp_next_ptr.read(),last_entry); 4997 4998 r_xram_rsp_fsm = XRAM_RSP_IDLE; 4999 5000 #if DEBUG_MEMC_XRAM_RSP 5001 if(m_debug) 5002 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_LAST>" 5003 << " Heap housekeeping" << std::endl; 5004 #endif 5005 break; 5006 } 5007 ///////////////////////// 5008 case XRAM_RSP_ERROR_ERASE: // erase TRT entry in case of error 5009 { 5010 m_trt.erase(r_xram_rsp_trt_index.read()); 5011 5012 // Next state 5013 if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_ERROR_RSP; 5014 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 5015 5016 #if DEBUG_MEMC_XRAM_RSP 5017 if(m_debug) 5018 std::cout << " <MEMC " << name() << " XRAM_RSP_ERROR_ERASE>" 5019 << " Error reported by XRAM / erase the TRT entry" << std::endl; 5020 #endif 5021 break; 5022 } 5023 //////////////////////// 5024 case XRAM_RSP_ERROR_RSP: // Request an error response to TGT_RSP FSM 5025 { 5026 if(!r_xram_rsp_to_tgt_rsp_req.read()) 5027 { 5028 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 5029 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 5030 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 5031 for(size_t i=0; i < m_words; i++) 5032 { 5033 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 5034 } 5035 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 5036 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 5037 r_xram_rsp_to_tgt_rsp_rerror = true; 5038 r_xram_rsp_to_tgt_rsp_req = true; 5039 5040 r_xram_rsp_fsm = XRAM_RSP_IDLE; 5041 5042 #if DEBUG_MEMC_XRAM_RSP 5043 if(m_debug) 5044 std::cout << " <MEMC " << name() 5045 << " XRAM_RSP_ERROR_RSP> Request a response error to TGT_RSP FSM:" 5046 << " srcid = " << std::dec << r_xram_rsp_trt_buf.srcid << std::endl; 5047 #endif 5048 } 5049 break; 5050 } 5051 } // end swich r_xram_rsp_fsm 5052 5053 //////////////////////////////////////////////////////////////////////////////////// 5054 // CLEANUP FSM 5055 //////////////////////////////////////////////////////////////////////////////////// 5056 // The CLEANUP FSM handles the cleanup request from L1 caches. 5057 // It accesses the cache directory and the heap to update the list of copies. 5058 //////////////////////////////////////////////////////////////////////////////////// 5059 5060 switch(r_cleanup_fsm.read()) 5061 { 5062 ////////////////// 5063 case CLEANUP_IDLE: // Get first DSPIN flit of the CLEANUP command 5064 { 5065 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 5066 5067 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 5068 5069 uint32_t srcid = 5070 DspinDhccpParam::dspin_get( 5071 flit, 5072 DspinDhccpParam::CLEANUP_SRCID); 5073 5074 uint8_t type = 5075 DspinDhccpParam::dspin_get( 5076 flit, 5077 DspinDhccpParam::P2M_TYPE); 5078 5079 r_cleanup_way_index = 5080 DspinDhccpParam::dspin_get( 5081 flit, 5082 DspinDhccpParam::CLEANUP_WAY_INDEX); 5083 5084 r_cleanup_nline = 5085 DspinDhccpParam::dspin_get( 5086 flit, 5087 DspinDhccpParam::CLEANUP_NLINE_MSB) << 32; 5088 5089 r_cleanup_inst = (type == DspinDhccpParam::TYPE_CLEANUP_INST); 5090 r_cleanup_srcid = srcid; 5091 r_cleanup_ncc = 5092 DspinDhccpParam::dspin_get( 5093 flit, 5094 DspinDhccpParam::CLEANUP_NCC); 5095 r_cleanup_contains_data = false; 5096 5097 assert( (srcid < m_initiators) and 5098 "MEMC ERROR in CLEANUP_IDLE state : illegal SRCID value"); 5099 5100 m_cpt_cleanup++; 5101 cc_receive_to_cleanup_fifo_get = true; 5102 r_cleanup_fsm = CLEANUP_GET_NLINE; 5103 5104 #if DEBUG_MEMC_CLEANUP 5105 if(m_debug) 5106 { 5107 std::cout 5108 << " <MEMC " << name() 5109 << " CLEANUP_IDLE> Cleanup request:" << std::hex 5110 << " / owner_id = " << srcid 5111 << " / owner_ins = " << (type == DspinDhccpParam::TYPE_CLEANUP_INST) 5112 << " / ncc = " << DspinDhccpParam::dspin_get( 5113 flit, 5114 DspinDhccpParam::CLEANUP_NCC) 5115 << std::endl; 5116 } 5117 #endif 5118 break; 5119 } 5120 5121 /////////////////////// 5122 case CLEANUP_GET_NLINE: // GET second DSPIN flit of the cleanup command 5123 { 5124 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 5125 5126 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 5127 5128 addr_t nline = r_cleanup_nline.read() | 5129 DspinDhccpParam::dspin_get(flit, DspinDhccpParam::CLEANUP_NLINE_LSB); 5130 5131 //A MODIFIER POUR DIRTY // 5132 bool eop = 5133 DspinDhccpParam::dspin_get(flit, DspinDhccpParam::P2M_EOP) == 0x1; 5134 if (! eop) 5135 { 5136 r_cleanup_fsm = CLEANUP_GET_DATA; 5137 r_cleanup_data_index = 0; 5138 r_cleanup_contains_data = true; 5139 } 5140 else 5141 { 5142 r_cleanup_fsm = CLEANUP_DIR_REQ; 5143 } 5144 cc_receive_to_cleanup_fifo_get = true; 5145 r_cleanup_nline = nline; 5146 5147 #if DEBUG_MEMC_CLEANUP 5148 if(m_debug) 5149 { 5150 std::cout 5151 << " <MEMC " << name() 5152 << " CLEANUP_GET_NLINE> Cleanup request:" 5153 << std::hex 5154 << " / address = " << nline * m_words * 4 5155 << " / contains data = " << (!eop) 5156 << std::endl; 5157 } 5158 #endif 5159 break; 5160 } 5161 ///////////////////// 5162 case CLEANUP_GET_DATA : 5163 { 5164 if (m_cc_receive_to_cleanup_fifo.rok()) 5165 { 5166 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 5167 5168 uint32_t data = 5169 DspinDhccpParam::dspin_get (flit, DspinDhccpParam::CLEANUP_DATA_UPDT); 5170 5171 r_cleanup_data[r_cleanup_data_index] = data; 5172 r_cleanup_data_index = r_cleanup_data_index.read() + 1; 5173 assert (r_cleanup_data_index.read() < m_words and "MEM_CACHE in CLEANUP_GET_DATA : too much flits in cleanup data updt"); 5174 cc_receive_to_cleanup_fifo_get = true; 5175 if (r_cleanup_data_index.read() == m_words - 1) 5176 { 5177 r_cleanup_contains_data = true; 5178 m_cpt_cleanup_data ++; 5179 r_cleanup_fsm = CLEANUP_DIR_REQ; 5180 } 5181 #if DEBUG_MEMC_CLEANUP 5182 if(m_debug) 5183 { 5184 std::cout 5185 << " <MEMC " << name() 5186 << " CLEANUP_GET_DATA> " 5187 << " / word = " << std::dec << r_cleanup_data_index.read() 5188 << " / data = " << std::hex << data 5189 << std::endl; 5190 } 5191 #endif 5192 } 5193 break; 5194 } 5195 ///////////////////// 5196 case CLEANUP_DIR_REQ: // Get the lock to the directory 5197 { 5198 m_cpt_cleanup_fsm_dir_lock++; 5199 if(r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) break; 5200 5201 r_cleanup_fsm = CLEANUP_DIR_LOCK; 5202 //std::cout << " MEM_CACHE : CLEANUP_DIR_REQ" << std::endl; 5203 5204 #if DEBUG_MEMC_CLEANUP 5205 if(m_debug) 5206 std::cout << " <MEMC " << name() << " CLEANUP_DIR_REQ> Requesting DIR lock" << std::endl; 5207 #endif 5208 5209 m_cpt_cleanup_fsm_n_dir_lock++; 5210 5211 break; 5212 } 5213 5214 ////////////////////// 5215 case CLEANUP_DIR_LOCK: 5216 { 5217 // test directory status 5218 if(r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) 5219 { 5220 std::cout 5221 << "VCI_MEM_CACHE ERROR " << name() 5222 << " CLEANUP_DIR_LOCK state" 5223 << " bad DIR allocation" << std::endl; 5224 5225 exit(0); 5226 } 5227 //std::cout << " MEM_CACHE : CLEANUP_DIR_LOCK" << std::endl; 5228 5229 // Read the directory 5230 size_t way = 0; 5231 addr_t cleanup_address = r_cleanup_nline.read() * m_words * 4; 5232 5233 DirectoryEntry entry = m_cache_directory.read(cleanup_address , way); 5234 r_cleanup_is_cnt = entry.is_cnt; 5235 r_cleanup_dirty = entry.dirty; 5236 r_cleanup_tag = entry.tag; 5237 r_cleanup_lock = entry.lock; 5238 r_cleanup_way = way; 5239 r_cleanup_count = entry.count; 5240 r_cleanup_ptr = entry.ptr; 5241 r_cleanup_copy = entry.owner.srcid; 5242 r_cleanup_copy_inst = entry.owner.inst; 5243 5244 //RWT 5245 size_t set = m_y[(addr_t)(cleanup_address)]; 5246 m_cache_data.read_line(way, set, r_cleanup_old_data); 5247 r_cleanup_coherent = entry.cache_coherent; 5248 5249 if(entry.valid) // hit : the copy must be cleared 5250 { 5251 assert( 5252 (entry.count > 0) and 5253 "VCI MEM CACHE ERROR: " 5254 "In CLEANUP_DIR_LOCK, CLEANUP command on a valid entry " 5255 "with no copies"); 5256 5257 // no access to the heap 5258 if((entry.count == 1) or (entry.is_cnt)) 5259 { 5260 r_cleanup_fsm = CLEANUP_DIR_WRITE; 5261 } 5262 // access to the heap 5263 else 5264 { 5265 r_cleanup_fsm = CLEANUP_HEAP_REQ; 5266 } 5267 } 5268 else // miss : check UPT for a pending invalidation transaction 5269 { 5270 r_cleanup_fsm = CLEANUP_IVT_LOCK; 5271 } 5272 5273 #if DEBUG_MEMC_CLEANUP 5274 if(m_debug) 5275 { 5276 std::cout 5277 << " <MEMC " << name() 5278 << " CLEANUP_DIR_LOCK> Test directory status: " 5279 << std::hex 5280 << " line = " << cleanup_address 5281 << " / hit = " << entry.valid 5282 << " / dir_id = " << entry.owner.srcid 5283 << " / dir_ins = " << entry.owner.inst 5284 << " / search_id = " << r_cleanup_srcid.read() 5285 << " / search_ins = " << r_cleanup_inst.read() 5286 << " / count = " << entry.count 5287 << " / is_cnt = " << entry.is_cnt 5288 << std::endl; 5289 } 5290 #endif 5291 break; 5292 } 5293 5294 /////////////////////// 5295 case CLEANUP_DIR_WRITE: 5296 { 5297 /*ODCCP*///std::cout << "CLEANUP_DIR_WRITE" << std::endl; 5298 // Update the directory entry without heap access 5299 if(r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) 5300 { 5301 std::cout 5302 << "VCI_MEM_CACHE ERROR " << name() 5303 << " CLEANUP_DIR_WRITE state" 5304 << " bad DIR allocation" << std::endl; 5305 5306 exit(0); 5307 } 5308 5309 size_t way = r_cleanup_way.read(); 5310 size_t set = m_y[(addr_t)(r_cleanup_nline.read()*m_words*4)]; 5311 bool match_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 5312 5313 bool match_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 5314 bool match = match_srcid and match_inst; 5315 5316 if(not r_cleanup_is_cnt.read() and not match) 5317 { 5318 std::cout 5319 << "VCI_MEM_CACHE ERROR : Cleanup request on a valid" 5320 << "entry using linked list mode with no corresponding" 5321 << "directory or heap entry" 5322 << std::endl; 5323 5324 exit(1); 5325 } 5326 5327 /*RWT*/ 5328 bool inval_request = (r_read_to_cleanup_req.read() and (r_cleanup_nline.read() == r_read_to_cleanup_nline.read())) // NCC to CC initiated by a read transaction 5329 or (r_write_to_cleanup_req.read() and (r_cleanup_nline.read() == r_write_to_cleanup_nline.read())); //NCC to CC initiated by a wrtie transaction 5330 5331 5332 if (inval_request) m_cpt_ncc_to_cc ++; 5333 5334 if (r_write_to_cleanup_req.read() and (r_cleanup_nline.read() == r_write_to_cleanup_nline.read())) 5335 { 5336 r_write_to_cleanup_req = false; 5337 m_cpt_ncc_to_cc_write ++; 5338 } 5339 5340 5341 // update the cache directory (for the copies) 5342 DirectoryEntry entry; 5343 entry.valid = true; 5344 entry.cache_coherent = inval_request or r_cleanup_coherent.read(); 5345 entry.is_cnt = r_cleanup_is_cnt.read(); 5346 entry.dirty = r_cleanup_dirty.read() or r_cleanup_contains_data.read(); 5347 entry.tag = r_cleanup_tag.read(); 5348 entry.lock = r_cleanup_lock.read(); 5349 entry.ptr = r_cleanup_ptr.read(); 5350 if (r_read_to_cleanup_req.read() and (r_cleanup_nline.read() == r_read_to_cleanup_nline.read())) //pending READ 5351 { 5352 if (r_read_to_cleanup_cached_read.read()) 5353 { 5354 entry.count = r_cleanup_count.read(); 5355 entry.owner.srcid = r_read_to_cleanup_srcid.read(); 5356 entry.owner.inst = 0; 5357 } 5358 else 5359 { 5360 entry.count = r_cleanup_count.read() - 1; 5361 entry.owner.srcid = r_cleanup_copy.read(); 5362 entry.owner.inst = r_cleanup_copy_inst.read(); 5363 } 5364 if (r_read_to_cleanup_is_ll.read()) 5365 { 5366 r_cleanup_to_tgt_rsp_ll_key = r_read_to_cleanup_ll_key.read(); 5367 } 5368 } 5369 else 5370 { 5371 entry.count = r_cleanup_count.read() - 1; 5372 entry.owner.srcid = 0; 5373 entry.owner.inst = 0; 5374 } 5375 5376 if (r_cleanup_contains_data.read()) 5377 { 5378 for (size_t word = 0; word < m_words; word ++) 5379 { 5380 m_cache_data.write(way, set, word, r_cleanup_data[word].read(), 0xF); 5381 } 5382 addr_t min = r_cleanup_nline.read()*m_words*4 ; 5383 addr_t max = r_cleanup_nline.read()*m_words*4 + (m_words - 1)*4; 5384 m_llsc_table.sw(min, max); 5385 } 5386 5387 m_cache_directory.write(set, way, entry); 5388 5389 /*RWT*/ 5390 if (inval_request) 5391 { 5392 r_cleanup_fsm = CLEANUP_IVT_LOCK_DATA; 5393 } 5394 else 5395 { 5396 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5397 } 5398 5399 #if DEBUG_MEMC_CLEANUP 5400 if(m_debug) 5401 { 5402 std::cout 5403 << " <MEMC " << name() 5404 << " CLEANUP_DIR_WRITE> Update directory:" 5405 << std::hex 5406 << " address = " << r_cleanup_nline.read() * m_words * 4 5407 << " / dir_id = " << entry.owner.srcid 5408 << " / dir_ins = " << entry.owner.inst 5409 << " / count = " << entry.count 5410 << " / is_cnt = " << entry.is_cnt 5411 << " / match_inval = " << inval_request 5412 << std::endl; 5413 } 5414 #endif 5415 5416 break; 5417 } 5418 ///////////////////// 5419 case CLEANUP_IVT_LOCK_DATA://RWT 5420 { 5421 //Search for a matching inval in the UPT (there must be one) and check if there is a pending read. 5422 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) 5423 { 5424 size_t index = 0; 5425 bool match_inval; 5426 5427 match_inval = m_ivt.search_inval(r_cleanup_nline.read(), index); 5428 assert (match_inval && "VCI MEM CACHE ERROR: In CLEANUP_IVT_LOCK_DATA, NO CORRESPONDING INVAL"); 5429 r_cleanup_read_srcid = m_ivt.srcid(index); 5430 r_cleanup_read_trdid = m_ivt.trdid(index); 5431 r_cleanup_read_pktid = 0x0 + m_ivt.pktid(index); 5432 r_cleanup_read_need_rsp = !m_ivt.need_rsp(index); 5433 r_cleanup_index = index; 5434 5435 r_cleanup_fsm = CLEANUP_IVT_CLEAR_DATA; 5436 } 5437 #if DEBUG_MC_CLEANUP 5438 if (m_debug) 5439 { 5440 std::cout 5441 << " <MEMC " << name() 5442 << " CLEANUP_IVT_LOCK_DATA> fetch pending inval" 5443 << std::endl; 5444 } 5445 #endif 5446 break; 5447 } 5448 5449 ////////////////////////// 5450 case CLEANUP_IVT_CLEAR_DATA://RWT 5451 { 5452 m_ivt.clear(r_cleanup_index.read()); 5453 assert ((r_cleanup_read_need_rsp.read() == (r_read_to_cleanup_req.read() && (r_cleanup_nline.read() == r_read_to_cleanup_nline.read()))) && "condition pending read"); 5454 if (r_cleanup_read_need_rsp.read()) 5455 { 5456 r_cleanup_fsm = CLEANUP_READ_RSP; 5457 } 5458 else 5459 { 5460 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5461 } 5462 #if DEBUG_MC_CLEANUP 5463 if (m_debug) 5464 { 5465 std::cout 5466 << " <MEMC " << name() 5467 << " CLEANUP_IVT_CLEAR_DATA> clear UPT entry" 5468 << std::endl; 5469 } 5470 #endif 5471 break; 5472 } 5473 5474 //////////////////////// 5475 case CLEANUP_READ_RSP://RWT 5476 { 5477 if(r_cleanup_to_tgt_rsp_req.read()) break; 5478 5479 r_cleanup_to_tgt_rsp_req = true; 5480 r_cleanup_to_tgt_rsp_srcid = r_cleanup_read_srcid.read(); 5481 r_cleanup_to_tgt_rsp_trdid = r_cleanup_read_trdid.read(); 5482 r_cleanup_to_tgt_rsp_pktid = 0x0 + r_cleanup_read_pktid.read();//WT 5483 r_cleanup_to_tgt_rsp_type = 0; //Read instruction 5484 r_cleanup_to_tgt_rsp_length = r_read_to_cleanup_length.read(); 5485 r_cleanup_to_tgt_rsp_first_word = r_read_to_cleanup_first_word.read(); 5486 r_read_to_cleanup_req = false; 5487 m_cpt_ncc_to_cc_read ++; 5488 if (r_cleanup_contains_data.read()) //L1 was dirty 5489 { 5490 for(size_t i = 0; i<m_words; i++) 5491 { 5492 r_cleanup_to_tgt_rsp_data[i] = r_cleanup_data[i].read(); 5493 } 5494 } 5495 else //the L2 data are up to date 5496 { 5497 for(size_t i = 0; i<m_words; i++) 5498 { 5499 r_cleanup_to_tgt_rsp_data[i] = r_cleanup_old_data[i].read(); 5500 } 5501 } 5502 5503 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5504 5505 #if DEBUG_MC_CLEANUP 5506 if (m_debug) 5507 { 5508 std::cout 5509 << " <MEMC " << name() 5510 << " CLEANUP_READ_RSP> answer READ" 5511 << std::endl; 5512 } 5513 #endif 5514 break; 5515 } 5516 ////////////////////// 5517 case CLEANUP_HEAP_REQ: 5518 { 5519 // get the lock to the HEAP directory 5520 if(r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP) break; 5521 5522 r_cleanup_fsm = CLEANUP_HEAP_LOCK; 5523 5524 #if DEBUG_MEMC_CLEANUP 5525 if(m_debug) 5526 { 5527 std::cout 5528 << " <MEMC " << name() 5529 << " CLEANUP_HEAP_REQ> HEAP lock acquired " 5530 << std::endl; 5531 } 5532 #endif 5533 m_cpt_cleanup_fsm_n_heap_lock++; 5534 break; 5535 } 5536 5537 ////////////////////// 5538 case CLEANUP_HEAP_LOCK: 5539 { 5540 // two cases are handled in this state : 5541 // 1. the matching copy is directly in the directory 5542 // 2. the matching copy is the first copy in the heap 5543 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 5544 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 5545 5546 size_t way = r_cleanup_way.read(); 5547 size_t set = m_y[(addr_t)(r_cleanup_nline.read() *m_words*4)]; 5548 5549 HeapEntry heap_entry = m_heap.read(r_cleanup_ptr.read()); 5550 bool last = (heap_entry.next == r_cleanup_ptr.read()); 5551 5552 // match_dir computation 5553 bool match_dir_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 5554 bool match_dir_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 5555 bool match_dir = match_dir_srcid and match_dir_inst; 5556 5557 // match_heap computation 5558 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 5559 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 5560 bool match_heap = match_heap_srcid and match_heap_inst; 5561 5562 r_cleanup_prev_ptr = r_cleanup_ptr.read(); 5563 r_cleanup_prev_srcid = heap_entry.owner.srcid; 5564 r_cleanup_prev_inst = heap_entry.owner.inst; 5565 5566 assert( (not last or match_dir or match_heap) and 5567 "MEMC ERROR in CLEANUP_HEAP_LOCK state: hit but no copy found"); 5568 5569 assert( (not match_dir or not match_heap) and 5570 "MEMC ERROR in CLEANUP_HEAP_LOCK state: two matching copies found"); 5571 5572 DirectoryEntry dir_entry; 5573 dir_entry.valid = true; 5574 dir_entry.cache_coherent = true; 5575 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 5576 dir_entry.dirty = r_cleanup_dirty.read(); 5577 dir_entry.tag = r_cleanup_tag.read(); 5578 dir_entry.lock = r_cleanup_lock.read(); 5579 dir_entry.count = r_cleanup_count.read()-1; 5580 5581 // the matching copy is registered in the directory and 5582 // it must be replaced by the first copy registered in 5583 // the heap. The corresponding entry must be freed 5584 if(match_dir) 5585 { 5586 dir_entry.ptr = heap_entry.next; 5587 dir_entry.owner.srcid = heap_entry.owner.srcid; 5588 dir_entry.owner.inst = heap_entry.owner.inst; 5589 r_cleanup_next_ptr = r_cleanup_ptr.read(); 5590 r_cleanup_fsm = CLEANUP_HEAP_FREE; 5591 } 5592 5593 // the matching copy is the first copy in the heap 5594 // It must be freed and the copy registered in directory 5595 // must point to the next copy in heap 5596 else if(match_heap) 5597 { 5598 dir_entry.ptr = heap_entry.next; 5599 dir_entry.owner.srcid = r_cleanup_copy.read(); 5600 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 5601 r_cleanup_next_ptr = r_cleanup_ptr.read(); 5602 r_cleanup_fsm = CLEANUP_HEAP_FREE; 5603 } 5604 5605 // The matching copy is in the heap, but is not the first copy 5606 // The directory entry must be modified to decrement count 5607 else 5608 { 5609 dir_entry.ptr = r_cleanup_ptr.read(); 5610 dir_entry.owner.srcid = r_cleanup_copy.read(); 5611 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 5612 r_cleanup_next_ptr = heap_entry.next; 5613 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 5614 } 5615 5616 m_cache_directory.write(set,way,dir_entry); 5617 5618 #if DEBUG_MEMC_CLEANUP 5619 if(m_debug) 5620 { 5621 std::cout 5622 << " <MEMC " << name() 5623 << " CLEANUP_HEAP_LOCK> Checks matching:" 5624 << " address = " << r_cleanup_nline.read() * m_words * 4 5625 << " / dir_id = " << r_cleanup_copy.read() 5626 << " / dir_ins = " << r_cleanup_copy_inst.read() 5627 << " / heap_id = " << heap_entry.owner.srcid 5628 << " / heap_ins = " << heap_entry.owner.inst 5629 << " / search_id = " << r_cleanup_srcid.read() 5630 << " / search_ins = " << r_cleanup_inst.read() 5631 << std::endl; 5632 } 5633 #endif 5634 break; 5635 } 5636 5637 //////////////////////// 5638 case CLEANUP_HEAP_SEARCH: 5639 { 5640 // This state is handling the case where the copy 5641 // is in the heap, but is not the first in the linked list 5642 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 5643 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 5644 5645 HeapEntry heap_entry = m_heap.read(r_cleanup_next_ptr.read()); 5646 5647 bool last = (heap_entry.next == r_cleanup_next_ptr.read()); 5648 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 5649 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 5650 bool match_heap = match_heap_srcid and match_heap_inst; 5651 5652 assert( (not last or match_heap) and 5653 "MEMC ERROR in CLEANUP_HEAP_SEARCH state: no copy found"); 5654 5655 // the matching copy must be removed 5656 if(match_heap) 5657 { 5658 // re-use ressources 5659 r_cleanup_ptr = heap_entry.next; 5660 r_cleanup_fsm = CLEANUP_HEAP_CLEAN; 5661 } 5662 // test the next in the linked list 5663 else 5664 { 5665 r_cleanup_prev_ptr = r_cleanup_next_ptr.read(); 5666 r_cleanup_prev_srcid = heap_entry.owner.srcid; 5667 r_cleanup_prev_inst = heap_entry.owner.inst; 5668 r_cleanup_next_ptr = heap_entry.next; 5669 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 5670 } 5671 5672 #if DEBUG_MEMC_CLEANUP 5673 if(m_debug) 5674 { 5675 if(not match_heap) 5676 { 5677 std::cout 5678 << " <MEMC " << name() 5679 << " CLEANUP_HEAP_SEARCH> Matching copy not found, search next:" 5680 << std::endl; 5681 } 5682 else 5683 { 5684 std::cout 5685 << " <MEMC " << name() 5686 << " CLEANUP_HEAP_SEARCH> Matching copy found:" 5687 << std::endl; 5688 } 5689 5690 std::cout 5691 << " address = " << r_cleanup_nline.read() * m_words * 4 5692 << " / heap_id = " << heap_entry.owner.srcid 5693 << " / heap_ins = " << heap_entry.owner.inst 5694 << " / search_id = " << r_cleanup_srcid.read() 5695 << " / search_ins = " << r_cleanup_inst.read() 5696 << " / last = " << last 5697 << std::endl; 5698 } 5699 #endif 5700 break; 5701 } 5702 //////////////////////// 5703 case CLEANUP_HEAP_CLEAN: // remove a copy in the linked list 5704 { 5705 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 5706 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 5707 5708 HeapEntry heap_entry; 5709 heap_entry.owner.srcid = r_cleanup_prev_srcid.read(); 5710 heap_entry.owner.inst = r_cleanup_prev_inst.read(); 5711 bool last = (r_cleanup_next_ptr.read() == r_cleanup_ptr.read()); 5712 5713 // this is the last entry of the list of copies 5714 if(last) 5715 { 5716 heap_entry.next = r_cleanup_prev_ptr.read(); 5717 } 5718 // this is not the last entry 5719 else 5720 { 5721 heap_entry.next = r_cleanup_ptr.read(); 5722 } 5723 5724 m_heap.write(r_cleanup_prev_ptr.read(), heap_entry); 5725 5726 r_cleanup_fsm = CLEANUP_HEAP_FREE; 5727 5728 #if DEBUG_MEMC_CLEANUP 5729 if(m_debug) 5730 std::cout << " <MEMC " << name() << " CLEANUP_HEAP_SEARCH>" 5731 << " Remove the copy in the linked list" << std::endl; 5732 #endif 5733 break; 5734 } 5735 /////////////////////// 5736 case CLEANUP_HEAP_FREE: // The heap entry pointed by r_cleanup_next_ptr is freed 5737 // and becomes the head of the list of free entries 5738 { 5739 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 5740 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 5741 HeapEntry heap_entry; 5742 heap_entry.owner.srcid = 0; 5743 heap_entry.owner.inst = false; 5744 5745 if(m_heap.is_full()) 5746 { 5747 heap_entry.next = r_cleanup_next_ptr.read(); 5748 } 5749 else 5750 { 5751 heap_entry.next = m_heap.next_free_ptr(); 5752 } 5753 5754 m_heap.write(r_cleanup_next_ptr.read(),heap_entry); 5755 m_heap.write_free_ptr(r_cleanup_next_ptr.read()); 5756 m_heap.unset_full(); 5757 5758 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5759 5760 #if DEBUG_MEMC_CLEANUP 5761 if(m_debug) 5762 std::cout << " <MEMC " << name() << " CLEANUP_HEAP_FREE>" 5763 << " Update the list of free entries" << std::endl; 5764 #endif 5765 break; 5766 } 5767 ////////////////////// 5768 case CLEANUP_IVT_LOCK: // get the lock protecting the IVT to search a pending 5769 // invalidate transaction matching the cleanup 5770 { 5771 m_cpt_cleanup_fsm_ivt_lock++; 5772 if(r_alloc_ivt_fsm.read() != ALLOC_IVT_CLEANUP) break; 5773 5774 size_t index = 0; 5775 bool match_inval; 5776 5777 match_inval = m_ivt.search_inval(r_cleanup_nline.read(), index); 5778 if ( not match_inval ) // no pending inval 5779 { 5780 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5781 5782 #if DEBUG_MEMC_CLEANUP 5783 if(m_debug) 5784 std::cout << " <MEMC " << name() 5785 << " CLEANUP_IVT_LOCK> Unexpected cleanup" 5786 << " with no corresponding IVT entry:" 5787 << " address = " << std::hex 5788 << (r_cleanup_nline.read() *4*m_words) 5789 << std::endl; 5790 #endif 5791 m_cpt_cleanup_fsm_n_upt_lock++; 5792 } 5793 else 5794 { 5795 // pending inval 5796 r_cleanup_write_srcid = m_ivt.srcid(index); 5797 r_cleanup_write_trdid = m_ivt.trdid(index); 5798 r_cleanup_write_pktid = m_ivt.pktid(index); 5799 r_cleanup_need_rsp = m_ivt.need_rsp(index); 5800 r_cleanup_need_ack = m_ivt.need_ack(index); 5801 r_cleanup_index = index; 5802 r_cleanup_fsm = CLEANUP_IVT_DECREMENT; 5803 #if DEBUG_MEMC_CLEANUP 5804 if(m_debug) 5805 std::cout << " <MEMC " << name() 5806 << " CLEANUP_IVT_LOCK> Cleanup matching pending" 5807 << " invalidate transaction on IVT:" 5808 << " address = " << std::hex << r_cleanup_nline.read() * m_words * 4 5809 << " / ivt_entry = " << index << std::endl; 5810 #endif 5811 } 5812 break; 5813 } 5814 /////////////////////////// 5815 case CLEANUP_IVT_DECREMENT: // decrement response counter in IVT matching entry 5816 { 5817 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) and 5818 "MEMC ERROR in CLEANUP_IVT_DECREMENT state: Bad IVT allocation"); 5819 5820 size_t count = 0; 5821 m_ivt.decrement(r_cleanup_index.read(), count); 5822 5823 if(count == 0) // multi inval transaction completed 5824 { 5825 r_cleanup_fsm = CLEANUP_IVT_CLEAR; 5826 } 5827 else // multi inval transaction not completed 5828 { 5829 if (r_cleanup_ncc.read()) //need to put data to the XRAM 5830 { 5831 r_cleanup_fsm = CLEANUP_IXR_REQ; 5832 } 5833 else 5834 { 5835 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5836 } 5837 } 5838 5839 #if DEBUG_MEMC_CLEANUP 5840 if(m_debug) 5841 std::cout << " <MEMC " << name() << " CLEANUP_IVT_DECREMENT>" 5842 << " Decrement response counter in IVT:" 5843 << " IVT_index = " << r_cleanup_index.read() 5844 << " / rsp_count = " << count << std::endl; 5845 #endif 5846 break; 5847 } 5848 /////////////////////// 5849 case CLEANUP_IVT_CLEAR: // Clear IVT entry 5850 { 5851 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) and 5852 "MEMC ERROR in CLEANUP_IVT_CLEAR state : bad IVT allocation"); 5853 5854 m_ivt.clear(r_cleanup_index.read()); 5855 5856 if ( r_cleanup_need_ack.read() ) 5857 { 5858 assert( (r_config_rsp_lines.read() > 0) and 5859 "MEMC ERROR in CLEANUP_IVT_CLEAR state"); 5860 5861 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 5862 } 5863 5864 if ( r_cleanup_need_rsp.read() ) r_cleanup_fsm = CLEANUP_WRITE_RSP; 5865 else if ( r_cleanup_ncc.read() ) r_cleanup_fsm = CLEANUP_IXR_REQ; 5866 else r_cleanup_fsm = CLEANUP_SEND_CLACK; 5867 5868 #if DEBUG_MEMC_CLEANUP 5869 if(m_debug) 5870 std::cout << " <MEMC " << name() 5871 << " CLEANUP_IVT_CLEAR> Clear entry in IVT:" 5872 << " IVT_index = " << r_cleanup_index.read() << std::endl; 5873 #endif 5874 break; 5875 } 5876 /////////////////////// 5877 case CLEANUP_WRITE_RSP: // response to a previous write on the direct network 5878 // wait if pending request to the TGT_RSP FSM 5879 { 5880 if(r_cleanup_to_tgt_rsp_req.read()) break; 5881 5882 // no pending request 5883 r_cleanup_to_tgt_rsp_req = true; 5884 r_cleanup_to_tgt_rsp_srcid = r_cleanup_write_srcid.read(); 5885 r_cleanup_to_tgt_rsp_trdid = r_cleanup_write_trdid.read(); 5886 r_cleanup_to_tgt_rsp_pktid = r_cleanup_write_pktid.read(); 5887 r_cleanup_to_tgt_rsp_type = true; 5888 5889 if (r_cleanup_ncc.read()) 5890 { 5891 r_cleanup_fsm = CLEANUP_IXR_REQ;//need to put data to the XRAM 5892 } 5893 else 5894 { 5895 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5896 } 5897 5898 #if DEBUG_MEMC_CLEANUP 5899 if(m_debug) 5900 std::cout << " <MEMC " << name() << " CLEANUP_WRITE_RSP>" 5901 << " Send a response to a previous write request: " 5902 << " rsrcid = " << std::hex << r_cleanup_write_srcid.read() 5903 << " / rtrdid = " << r_cleanup_write_trdid.read() 5904 << " / rpktid = " << r_cleanup_write_pktid.read() << std::endl; 5905 #endif 5906 break; 5907 } 5908 ///////////////////////// 5909 case CLEANUP_IXR_REQ: 5910 { 5911 //Send a request to the ixr to write the data in the XRAM using the prereserved TRT entry 5912 if (r_alloc_trt_fsm.read() == ALLOC_TRT_CLEANUP) 5913 { 5914 if( not r_cleanup_to_ixr_cmd_req.read()) 5915 { 5916 size_t index = 0; 5917 bool hit = m_trt.hit_write(r_cleanup_nline.read(), &index); 5918 5919 assert (hit and "CLEANUP_IXR_REQ found no matching entry in TRT"); 5920 5921 r_cleanup_to_ixr_cmd_req = true; 5922 5923 if (r_cleanup_contains_data.read()) 5924 { 5925 std::vector<data_t> data_vector; 5926 data_vector.clear(); 5927 5928 for(size_t i=0; i<m_words; i++) 5929 { 5930 data_vector.push_back(r_cleanup_data[i]); 5931 } 5932 5933 m_trt.set(index, 5934 false, // write to XRAM 5935 r_cleanup_nline.read(), // line index 5936 0, 5937 0, 5938 0, 5939 false, 5940 0, 5941 0, 5942 std::vector<be_t> (m_words,0), 5943 data_vector); 5944 } 5945 //std::cout << "cleanup with a non coherent ligne in trt index = " << index << std::endl; 5946 r_cleanup_to_ixr_cmd_srcid = r_cleanup_srcid.read(); 5947 r_cleanup_to_ixr_cmd_index = index; 5948 r_cleanup_to_ixr_cmd_pktid = r_cleanup_pktid.read(); 5949 r_cleanup_to_ixr_cmd_nline = r_cleanup_nline.read(); 5950 //r_cleanup_to_ixr_cmd_l1_dirty_ncc = r_cleanup_contains_data.read(); 5951 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5952 #if DEBUG_MEMC_CLEANUP 5953 if(m_debug) 5954 { 5955 std::cout 5956 << " <MEMC " << name() 5957 << " CLEANUP_IXR_REQ>" 5958 << " request send to IXR_CMD" 5959 << std::endl; 5960 } 5961 #endif 5962 } 5963 else 5964 { 5965 r_cleanup_fsm = CLEANUP_WAIT; 5966 #if DEBUG_MEMC_CLEANUP 5967 if(m_debug) 5968 { 5969 std::cout 5970 << " <MEMC " << name() 5971 << " CLEANUP_IXR_REQ>" 5972 << " waiting completion of previous request" 5973 << std::endl; 5974 } 5975 #endif 5976 } 5977 } 5978 break; 5979 } 5980 5981 ///////////////////// 5982 case CLEANUP_WAIT : 5983 { 5984 r_cleanup_fsm = CLEANUP_IXR_REQ; 5985 break; 5986 } 5987 5988 //////////////////////// 5989 case CLEANUP_SEND_CLACK: // acknowledgement to a cleanup command 5990 // on the coherence CLACK network. 5991 { 5992 if(not p_dspin_clack.read) break; 5993 5994 r_cleanup_fsm = CLEANUP_IDLE; 5995 5996 #if DEBUG_MEMC_CLEANUP 5997 if(m_debug) 5998 std::cout << " <MEMC " << name() 5999 << " CLEANUP_SEND_CLACK> Send the response to a cleanup request:" 6000 << " nline = " << std::hex << r_cleanup_nline.read() 6001 << " / way = " << std::dec << r_cleanup_way.read() 6002 << " / srcid = " << std::dec << r_cleanup_srcid.read() 6003 << std::endl; 6004 #endif 6005 break; 6006 } 6007 } // end switch cleanup fsm 6008 6009 //////////////////////////////////////////////////////////////////////////////////// 6010 // CAS FSM 6011 //////////////////////////////////////////////////////////////////////////////////// 6012 // The CAS FSM handles the CAS (Store Conditionnal) atomic commands, 6013 // that are handled as "compare-and-swap instructions. 6014 // 6015 // This command contains two or four flits: 6016 // - In case of 32 bits atomic access, the first flit contains the value read 6017 // by a previous LL instruction, the second flit contains the value to be writen. 6018 // - In case of 64 bits atomic access, the 2 first flits contains the value read 6019 // by a previous LL instruction, the 2 next flits contains the value to be writen. 6020 // 6021 // The target address is cachable. If it is replicated in other L1 caches 6022 // than the writer, a coherence operation is done. 6023 // 6024 // It access the directory to check hit / miss. 6025 // - In case of miss, the CAS FSM must register a GET transaction in TRT. 6026 // If a read transaction to the XRAM for this line already exists, 6027 // or if the transaction table is full, it goes to the WAIT state 6028 // to release the locks and try again. When the GET transaction has been 6029 // launched, it goes to the WAIT state and try again. 6030 // The CAS request is not consumed in the FIFO until a HIT is obtained. 6031 // - In case of hit... 6032 /////////////////////////////////////////////////////////////////////////////////// 6033 6034 switch(r_cas_fsm.read()) 6035 { 6036 ///////////// 6037 case CAS_IDLE: // fill the local rdata buffers 6038 { 6039 if(m_cmd_cas_addr_fifo.rok()) 6040 { 6041 6042 #if DEBUG_MEMC_CAS 6043 if(m_debug) 6044 { 6045 std::cout << " <MEMC " << name() << " CAS_IDLE> CAS command: " << std::hex 6046 << " srcid = " << std::dec << m_cmd_cas_srcid_fifo.read() 6047 << " addr = " << std::hex << m_cmd_cas_addr_fifo.read() 6048 << " wdata = " << m_cmd_cas_wdata_fifo.read() 6049 << " eop = " << std::dec << m_cmd_cas_eop_fifo.read() 6050 << " cpt = " << std::dec << r_cas_cpt.read() << std::endl; 6051 } 6052 #endif 6053 if(m_cmd_cas_eop_fifo.read()) 6054 { 6055 m_cpt_cas++; 6056 r_cas_fsm = CAS_DIR_REQ; 6057 } 6058 else // we keep the last word in the FIFO 6059 { 6060 cmd_cas_fifo_get = true; 6061 } 6062 // We fill the two buffers 6063 if(r_cas_cpt.read() < 2) // 32 bits access 6064 r_cas_rdata[r_cas_cpt.read()] = m_cmd_cas_wdata_fifo.read(); 6065 6066 if((r_cas_cpt.read() == 1) and m_cmd_cas_eop_fifo.read()) 6067 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 6068 6069 assert( (r_cas_cpt.read() <= 3) and // no more than 4 flits... 6070 "MEMC ERROR in CAS_IDLE state: illegal CAS command"); 6071 6072 if(r_cas_cpt.read() ==2) 6073 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 6074 6075 r_cas_cpt = r_cas_cpt.read() +1; 6076 } 6077 break; 6078 } 6079 6080 ///////////////// 6081 case CAS_DIR_REQ: 6082 { 6083 if(r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) 6084 { 6085 r_cas_fsm = CAS_DIR_LOCK; 6086 m_cpt_cas_fsm_n_dir_lock++; 6087 } 6088 6089 #if DEBUG_MEMC_CAS 6090 if(m_debug) 6091 { 6092 std::cout 6093 << " <MEMC " << name() << " CAS_DIR_REQ> Requesting DIR lock " 6094 << std::endl; 6095 } 6096 #endif 6097 6098 m_cpt_cas_fsm_dir_lock++; 6099 6100 break; 6101 } 6102 6103 ///////////////// 6104 case CAS_DIR_LOCK: // Read the directory 6105 { 6106 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 6107 "MEMC ERROR in CAS_DIR_LOCK: Bad DIR allocation"); 6108 6109 size_t way = 0; 6110 DirectoryEntry entry(m_cache_directory.read(m_cmd_cas_addr_fifo.read(), way)); 6111 6112 r_cas_is_cnt = entry.is_cnt; 6113 r_cas_coherent = entry.cache_coherent; 6114 r_cas_dirty = entry.dirty; 6115 r_cas_tag = entry.tag; 6116 r_cas_way = way; 6117 r_cas_copy = entry.owner.srcid; 6118 r_cas_copy_inst = entry.owner.inst; 6119 r_cas_ptr = entry.ptr; 6120 r_cas_count = entry.count; 6121 6122 if(entry.valid) r_cas_fsm = CAS_DIR_HIT_READ; 6123 else r_cas_fsm = CAS_MISS_TRT_LOCK; 6124 6125 #if DEBUG_MEMC_CAS 6126 if(m_debug) 6127 { 6128 std::cout << " <MEMC " << name() << " CAS_DIR_LOCK> Directory acces" 6129 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 6130 << " / hit = " << std::dec << entry.valid 6131 << " / count = " << entry.count 6132 << " / is_cnt = " << entry.is_cnt << std::endl; 6133 } 6134 #endif 6135 break; 6136 } 6137 ///////////////////// 6138 case CAS_DIR_HIT_READ: // update directory for lock and dirty bit 6139 // and check data change in cache 6140 { 6141 size_t way = r_cas_way.read(); 6142 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6143 6144 // update directory (lock & dirty bits) 6145 DirectoryEntry entry; 6146 entry.valid = true; 6147 entry.cache_coherent = r_cas_coherent.read(); 6148 entry.is_cnt = r_cas_is_cnt.read(); 6149 entry.dirty = true; 6150 entry.lock = true; 6151 entry.tag = r_cas_tag.read(); 6152 entry.owner.srcid = r_cas_copy.read(); 6153 entry.owner.inst = r_cas_copy_inst.read(); 6154 entry.count = r_cas_count.read(); 6155 entry.ptr = r_cas_ptr.read(); 6156 6157 m_cache_directory.write(set, way, entry); 6158 6159 // Stored data from cache in buffer to do the comparison in next state 6160 m_cache_data.read_line(way, set, r_cas_data); 6161 6162 r_cas_fsm = CAS_DIR_HIT_COMPARE; 6163 6164 #if DEBUG_MEMC_CAS 6165 if(m_debug) 6166 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_READ> Read data from " 6167 << " cache and store it in buffer" << std::endl; 6168 #endif 6169 break; 6170 } 6171 6172 case CAS_DIR_HIT_COMPARE: 6173 { 6174 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6175 6176 // Read data in buffer & check data change 6177 bool ok = (r_cas_rdata[0].read() == r_cas_data[word].read()); 6178 6179 if(r_cas_cpt.read() == 4) // 64 bits CAS 6180 ok &= (r_cas_rdata[1] == r_cas_data[word+1]); 6181 6182 // to avoid livelock, force the atomic access to fail pseudo-randomly 6183 bool forced_fail = ((r_cas_lfsr % (64) == 0) and RANDOMIZE_CAS); 6184 r_cas_lfsr = (r_cas_lfsr >> 1) ^ ((- (r_cas_lfsr & 1)) & 0xd0000001); 6185 6186 // cas success 6187 if(ok and not forced_fail) 6188 { 6189 r_cas_fsm = CAS_DIR_HIT_WRITE; 6190 } 6191 // cas failure 6192 else 6193 { 6194 r_cas_fsm = CAS_RSP_FAIL; 6195 } 6196 6197 #if DEBUG_MEMC_CAS 6198 if(m_debug) 6199 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_COMPARE> Compare the old" 6200 << " and the new data" 6201 << " / expected value = " << r_cas_rdata[0].read() 6202 << " / actual value = " << r_cas_data[word].read() 6203 << " / forced_fail = " << forced_fail << std::endl; 6204 #endif 6205 break; 6206 } 6207 ////////////////////// 6208 case CAS_DIR_HIT_WRITE: // test if a CC transaction is required 6209 // write data in cache if no CC request 6210 { 6211 // The CAS is a success => sw access to the llsc_global_table 6212 //m_llsc_table.sw(m_cmd_cas_addr_fifo.read()); 6213 /**//*std::cout << "MEMCACHE : from proc " << m_cmd_cas_srcid_fifo.read() 6214 << " | @ " << std::hex << m_cmd_cas_addr_fifo.read() 6215 << " | WRITE (cas triggered)" << std::endl;*/ 6216 6217 m_llsc_table.sw(m_cmd_cas_addr_fifo.read(), m_cmd_cas_addr_fifo.read()); 6218 // test coherence request 6219 if(r_cas_count.read()) // replicated line 6220 { 6221 if(r_cas_is_cnt.read()) 6222 { 6223 r_cas_fsm = CAS_BC_TRT_LOCK; // broadcast invalidate required 6224 #if DEBUG_MEMC_CAS 6225 if(m_debug) 6226 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 6227 << " Broacast Inval required" 6228 << " / copies = " << r_cas_count.read() << std::endl; 6229 #endif 6230 6231 } 6232 else if(!r_cas_to_cc_send_multi_req.read() and 6233 !r_cas_to_cc_send_brdcast_req.read()) 6234 { 6235 r_cas_fsm = CAS_UPT_LOCK; // multi update required 6236 #if DEBUG_MEMC_CAS 6237 if(m_debug) 6238 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 6239 << " Multi Inval required" 6240 << " / copies = " << r_cas_count.read() << std::endl; 6241 #endif 6242 } 6243 else 6244 { 6245 r_cas_fsm = CAS_WAIT; 6246 #if DEBUG_MEMC_CAS 6247 if(m_debug) 6248 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 6249 << " CC_SEND FSM busy: release all locks and retry" << std::endl; 6250 #endif 6251 } 6252 } 6253 else // no copies 6254 { 6255 size_t way = r_cas_way.read(); 6256 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6257 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6258 6259 // cache update 6260 m_cache_data.write(way, set, word, r_cas_wdata.read()); 6261 if(r_cas_cpt.read() == 4) 6262 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 6263 6264 r_cas_fsm = CAS_RSP_SUCCESS; 6265 6266 #if DEBUG_MEMC_CAS 6267 if(m_debug) 6268 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE> Update cache:" 6269 << " way = " << std::dec << way 6270 << " / set = " << set 6271 << " / word = " << word 6272 << " / value = " << r_cas_wdata.read() 6273 << " / count = " << r_cas_count.read() 6274 << " / global_llsc_table access" << std::endl; 6275 #endif 6276 } 6277 break; 6278 } 6279 ///////////////// 6280 case CAS_UPT_LOCK: // try to register the transaction in UPT 6281 // and write data in cache if successful registration 6282 // releases locks to retry later if UPT full 6283 { 6284 if(r_alloc_upt_fsm.read() == ALLOC_UPT_CAS) 6285 { 6286 bool wok = false; 6287 size_t index = 0; 6288 size_t srcid = m_cmd_cas_srcid_fifo.read(); 6289 size_t trdid = m_cmd_cas_trdid_fifo.read(); 6290 size_t pktid = m_cmd_cas_pktid_fifo.read(); 6291 addr_t nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6292 size_t nb_copies = r_cas_count.read(); 6293 6294 wok = m_upt.set(true, // it's an update transaction 6295 false, // it's not a broadcast 6296 true, // response required 6297 false, // no acknowledge required 6298 srcid, 6299 trdid, 6300 pktid, 6301 nline, 6302 nb_copies, 6303 index); 6304 if(wok) // coherence transaction registered in UPT 6305 { 6306 // cache update 6307 size_t way = r_cas_way.read(); 6308 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6309 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6310 6311 m_cache_data.write(way, set, word, r_cas_wdata.read()); 6312 if(r_cas_cpt.read() ==4) 6313 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 6314 6315 r_cas_upt_index = index; 6316 r_cas_fsm = CAS_UPT_HEAP_LOCK; 6317 6318 } 6319 else // releases the locks protecting UPT and DIR UPT full 6320 { 6321 r_cas_fsm = CAS_WAIT; 6322 } 6323 6324 #if DEBUG_MEMC_CAS 6325 if(m_debug) 6326 std::cout << " <MEMC " << name() 6327 << " CAS_UPT_LOCK> Register multi-update transaction in UPT" 6328 << " / wok = " << wok 6329 << " / nline = " << std::hex << nline 6330 << " / count = " << nb_copies << std::endl; 6331 #endif 6332 m_cpt_cas_fsm_n_upt_lock++; 6333 } 6334 6335 m_cpt_cas_fsm_upt_lock++; 6336 6337 break; 6338 } 6339 ///////////// 6340 case CAS_WAIT: // release all locks and retry from beginning 6341 { 6342 6343 #if DEBUG_MEMC_CAS 6344 if(m_debug) 6345 { 6346 std::cout << " <MEMC " << name() 6347 << " CAS_WAIT> Release all locks" << std::endl; 6348 } 6349 #endif 6350 r_cas_fsm = CAS_DIR_REQ; 6351 break; 6352 } 6353 ////////////////// 6354 case CAS_UPT_HEAP_LOCK: // lock the heap 6355 { 6356 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) 6357 { 6358 6359 #if DEBUG_MEMC_CAS 6360 if(m_debug) 6361 { 6362 std::cout << " <MEMC " << name() 6363 << " CAS_UPT_HEAP_LOCK> Get access to the heap" << std::endl; 6364 } 6365 #endif 6366 r_cas_fsm = CAS_UPT_REQ; 6367 m_cpt_cas_fsm_n_heap_lock++; 6368 } 6369 6370 m_cpt_cas_fsm_heap_lock++; 6371 6372 break; 6373 } 6374 //////////////// 6375 case CAS_UPT_REQ: // send a first update request to CC_SEND FSM 6376 { 6377 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) and 6378 "VCI_MEM_CACHE ERROR : bad HEAP allocation"); 6379 6380 if(!r_cas_to_cc_send_multi_req.read() and !r_cas_to_cc_send_brdcast_req.read()) 6381 { 6382 r_cas_to_cc_send_brdcast_req = false; 6383 r_cas_to_cc_send_trdid = r_cas_upt_index.read(); 6384 r_cas_to_cc_send_nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6385 r_cas_to_cc_send_index = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6386 r_cas_to_cc_send_wdata = r_cas_wdata.read(); 6387 6388 if(r_cas_cpt.read() == 4) 6389 { 6390 r_cas_to_cc_send_is_long = true; 6391 r_cas_to_cc_send_wdata_high = m_cmd_cas_wdata_fifo.read(); 6392 } 6393 else 6394 { 6395 r_cas_to_cc_send_is_long = false; 6396 r_cas_to_cc_send_wdata_high = 0; 6397 } 6398 6399 // We put the first copy in the fifo 6400 cas_to_cc_send_fifo_put = true; 6401 cas_to_cc_send_fifo_inst = r_cas_copy_inst.read(); 6402 cas_to_cc_send_fifo_srcid = r_cas_copy.read(); 6403 if(r_cas_count.read() == 1) // one single copy 6404 { 6405 r_cas_fsm = CAS_IDLE; // Response will be sent after receiving 6406 // update responses 6407 cmd_cas_fifo_get = true; 6408 r_cas_to_cc_send_multi_req = true; 6409 r_cas_cpt = 0; 6410 } 6411 else // several copies 6412 { 6413 r_cas_fsm = CAS_UPT_NEXT; 6414 } 6415 6416 #if DEBUG_MEMC_CAS 6417 if(m_debug) 6418 { 6419 std::cout << " <MEMC " << name() << " CAS_UPT_REQ> Send the first update request to CC_SEND FSM " 6420 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 6421 << " / wdata = " << std::hex << r_cas_wdata.read() 6422 << " / srcid = " << std::dec << r_cas_copy.read() 6423 << " / inst = " << std::dec << r_cas_copy_inst.read() << std::endl; 6424 } 6425 #endif 6426 } 6427 break; 6428 } 6429 ///////////////// 6430 case CAS_UPT_NEXT: // send a multi-update request to CC_SEND FSM 6431 { 6432 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) 6433 and "VCI_MEM_CACHE ERROR : bad HEAP allocation"); 6434 6435 HeapEntry entry = m_heap.read(r_cas_ptr.read()); 6436 cas_to_cc_send_fifo_srcid = entry.owner.srcid; 6437 cas_to_cc_send_fifo_inst = entry.owner.inst; 6438 cas_to_cc_send_fifo_put = true; 6439 6440 if(m_cas_to_cc_send_inst_fifo.wok()) // request accepted by CC_SEND FSM 6441 { 6442 r_cas_ptr = entry.next; 6443 if(entry.next == r_cas_ptr.read()) // last copy 6444 { 6445 r_cas_to_cc_send_multi_req = true; 6446 r_cas_fsm = CAS_IDLE; // Response will be sent after receiving 6447 // all update responses 6448 cmd_cas_fifo_get = true; 6449 r_cas_cpt = 0; 6450 } 6451 } 6452 6453 #if DEBUG_MEMC_CAS 6454 if(m_debug) 6455 { 6456 std::cout << " <MEMC " << name() << " CAS_UPT_NEXT> Send the next update request to CC_SEND FSM " 6457 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 6458 << " / wdata = " << std::hex << r_cas_wdata.read() 6459 << " / srcid = " << std::dec << entry.owner.srcid 6460 << " / inst = " << std::dec << entry.owner.inst << std::endl; 6461 } 6462 #endif 6463 break; 6464 } 6465 ///////////////////// 6466 case CAS_BC_TRT_LOCK: // check the TRT to register a PUT transaction 6467 { 6468 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 6469 "MEMC ERROR in CAS_BC_TRT_LOCK state: Bas DIR allocation"); 6470 6471 if(r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 6472 { 6473 size_t wok_index = 0; 6474 bool wok = !m_trt.full(wok_index); 6475 if( wok ) 6476 { 6477 r_cas_trt_index = wok_index; 6478 r_cas_fsm = CAS_BC_IVT_LOCK; 6479 } 6480 else 6481 { 6482 r_cas_fsm = CAS_WAIT; 6483 m_cpt_cas_fsm_n_trt_lock++; 6484 } 6485 6486 #if DEBUG_MEMC_CAS 6487 if(m_debug) 6488 std::cout << " <MEMC " << name() << " CAS_BC_TRT_LOCK> Check TRT" 6489 << " : wok = " << wok << " / index = " << wok_index << std::endl; 6490 #endif 6491 } 6492 m_cpt_cas_fsm_trt_lock++; 6493 6494 break; 6495 } 6496 ///////////////////// 6497 case CAS_BC_IVT_LOCK: // register a broadcast inval transaction in IVT 6498 // write data in cache in case of successful registration 6499 { 6500 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS) 6501 { 6502 bool wok = false; 6503 size_t index = 0; 6504 size_t srcid = m_cmd_cas_srcid_fifo.read(); 6505 size_t trdid = m_cmd_cas_trdid_fifo.read(); 6506 size_t pktid = m_cmd_cas_pktid_fifo.read(); 6507 addr_t nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6508 size_t nb_copies = r_cas_count.read(); 6509 6510 // register a broadcast inval transaction in IVT 6511 wok = m_ivt.set(false, // it's an inval transaction 6512 true, // it's a broadcast 6513 true, // response required 6514 false, // no acknowledge required 6515 srcid, 6516 trdid, 6517 pktid, 6518 nline, 6519 nb_copies, 6520 index); 6521 6522 if(wok) // IVT not full 6523 { 6524 // cache update 6525 size_t way = r_cas_way.read(); 6526 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6527 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6528 6529 m_cache_data.write(way, set, word, r_cas_wdata.read()); 6530 if(r_cas_cpt.read() ==4) 6531 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 6532 6533 r_cas_upt_index = index; 6534 r_cas_fsm = CAS_BC_DIR_INVAL; 6535 6536 #if DEBUG_MEMC_CAS 6537 if(m_debug) 6538 std::cout << " <MEMC " << name() 6539 << " CAS_BC_IVT_LOCK> Register a broadcast inval transaction in IVT" 6540 << " / nline = " << std::hex << nline 6541 << " / count = " << std::dec << nb_copies 6542 << " / ivt_index = " << index << std::endl; 6543 #endif 6544 } 6545 else // releases the lock protecting IVT 6546 { 6547 r_cas_fsm = CAS_WAIT; 6548 } 6549 m_cpt_cas_fsm_n_upt_lock++; 6550 } 6551 6552 m_cpt_cas_fsm_upt_lock++; 6553 6554 break; 6555 } 6556 ////////////////////// 6557 case CAS_BC_DIR_INVAL: // Register the PUT transaction in TRT, and inval the DIR entry 6558 { 6559 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 6560 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad DIR allocation"); 6561 6562 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 6563 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad TRT allocation"); 6564 6565 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS) and 6566 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad IVT allocation"); 6567 6568 std::vector<data_t> data_vector; 6569 data_vector.clear(); 6570 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6571 for(size_t i=0; i<m_words; i++) 6572 { 6573 if(i == word) // first modified word 6574 data_vector.push_back( r_cas_wdata.read() ); 6575 else if((i == word+1) and (r_cas_cpt.read() == 4)) // second modified word 6576 data_vector.push_back( m_cmd_cas_wdata_fifo.read() ); 6577 else // unmodified words 6578 data_vector.push_back( r_cas_data[i].read() ); 6579 } 6580 m_trt.set( r_cas_trt_index.read(), 6581 false, // PUT request 6582 m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())], 6583 0, 6584 0, 6585 0, 6586 false, // not a processor read 6587 0, 6588 0, 6589 std::vector<be_t> (m_words,0), 6590 data_vector ); 6591 6592 // invalidate directory entry 6593 DirectoryEntry entry; 6594 entry.valid = false; 6595 entry.dirty = false; 6596 entry.tag = 0; 6597 entry.is_cnt = false; 6598 entry.lock = false; 6599 entry.count = 0; 6600 entry.owner.srcid = 0; 6601 entry.owner.inst = false; 6602 entry.ptr = 0; 6603 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6604 size_t way = r_cas_way.read(); 6605 m_cache_directory.write(set, way, entry); 6606 6607 r_cas_fsm = CAS_BC_CC_SEND; 6608 6609 #if DEBUG_MEMC_CAS 6610 if(m_debug) 6611 std::cout << " <MEMC " << name() << " CAS_BC_DIR_INVAL> Inval DIR & register in TRT:" 6612 << " address = " << m_cmd_cas_addr_fifo.read() << std::endl; 6613 #endif 6614 break; 6615 } 6616 /////////////////// 6617 case CAS_BC_CC_SEND: // Request the broadcast inval to CC_SEND FSM 6618 { 6619 if( not r_cas_to_cc_send_multi_req.read() and 6620 not r_cas_to_cc_send_brdcast_req.read()) 6621 { 6622 r_cas_to_cc_send_multi_req = false; 6623 r_cas_to_cc_send_brdcast_req = true; 6624 r_cas_to_cc_send_trdid = r_cas_upt_index.read(); 6625 r_cas_to_cc_send_nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6626 r_cas_to_cc_send_index = 0; 6627 r_cas_to_cc_send_wdata = 0; 6628 6629 r_cas_fsm = CAS_BC_XRAM_REQ; 6630 } 6631 break; 6632 } 6633 //////////////////// 6634 case CAS_BC_XRAM_REQ: // request the IXR FSM to start a put transaction 6635 { 6636 if( not r_cas_to_ixr_cmd_req.read() ) 6637 { 6638 r_cas_to_ixr_cmd_req = true; 6639 r_cas_to_ixr_cmd_put = true; 6640 r_cas_to_ixr_cmd_index = r_cas_trt_index.read(); 6641 r_cas_fsm = CAS_IDLE; 6642 cmd_cas_fifo_get = true; 6643 r_cas_cpt = 0; 6644 6645 #if DEBUG_MEMC_CAS 6646 if(m_debug) 6647 std::cout << " <MEMC " << name() 6648 << " CAS_BC_XRAM_REQ> Request a PUT transaction to IXR_CMD FSM" << std::hex 6649 << " / address = " << (addr_t) m_cmd_cas_addr_fifo.read() 6650 << " / trt_index = " << r_cas_trt_index.read() << std::endl; 6651 #endif 6652 } 6653 6654 break; 6655 } 6656 ///////////////// 6657 case CAS_RSP_FAIL: // request TGT_RSP FSM to send a failure response 6658 { 6659 if( not r_cas_to_tgt_rsp_req.read() ) 6660 { 6661 cmd_cas_fifo_get = true; 6662 r_cas_cpt = 0; 6663 r_cas_to_tgt_rsp_req = true; 6664 r_cas_to_tgt_rsp_data = 1; 6665 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read(); 6666 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read(); 6667 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read(); 6668 r_cas_fsm = CAS_IDLE; 6669 6670 #if DEBUG_MEMC_CAS 6671 if(m_debug) 6672 std::cout << " <MEMC " << name() 6673 << " CAS_RSP_FAIL> Request TGT_RSP to send a failure response" << std::endl; 6674 #endif 6675 } 6676 break; 6677 } 6678 //////////////////// 6679 case CAS_RSP_SUCCESS: // request TGT_RSP FSM to send a success response 6680 { 6681 if( not r_cas_to_tgt_rsp_req.read() ) 6682 { 6683 cmd_cas_fifo_get = true; 6684 r_cas_cpt = 0; 6685 r_cas_to_tgt_rsp_req = true; 6686 r_cas_to_tgt_rsp_data = 0; 6687 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read(); 6688 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read(); 6689 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read(); 6690 r_cas_fsm = CAS_IDLE; 6691 6692 #if DEBUG_MEMC_CAS 6693 if(m_debug) 6694 std::cout << " <MEMC " << name() 6695 << " CAS_RSP_SUCCESS> Request TGT_RSP to send a success response" << std::endl; 6696 #endif 6697 } 6698 break; 6699 } 6700 ///////////////////// 6701 case CAS_MISS_TRT_LOCK: // cache miss : request access to transaction Table 6702 { 6703 if(r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 6704 { 6705 size_t index = 0; 6706 bool hit_read = m_trt.hit_read( 6707 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()],index); 6708 bool hit_write = m_trt.hit_write( 6709 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()]); 6710 bool wok = not m_trt.full(index); 6711 6712 #if DEBUG_MEMC_CAS 6713 if(m_debug) 6714 { 6715 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_LOCK> Check TRT state" 6716 << " / hit_read = " << hit_read 6717 << " / hit_write = " << hit_write 6718 << " / wok = " << wok 6719 << " / index = " << index << std::endl; 6720 } 6721 #endif 6722 6723 if(hit_read or !wok or hit_write) // missing line already requested or no space in TRT 6724 { 6725 r_cas_fsm = CAS_WAIT; 6726 } 6727 else 6728 { 6729 r_cas_trt_index = index; 6730 r_cas_fsm = CAS_MISS_TRT_SET; 6731 } 6732 m_cpt_cas_fsm_n_trt_lock++; 6733 } 6734 6735 m_cpt_cas_fsm_trt_lock++; 6736 6737 break; 6738 } 6739 //////////////////// 6740 case CAS_MISS_TRT_SET: // register the GET transaction in TRT 6741 { 6742 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 6743 "MEMC ERROR in CAS_MISS_TRT_SET state: Bad TRT allocation"); 6744 6745 std::vector<be_t> be_vector; 6746 std::vector<data_t> data_vector; 6747 be_vector.clear(); 6748 data_vector.clear(); 6749 for(size_t i=0; i<m_words; i++) 6750 { 6751 be_vector.push_back(0); 6752 data_vector.push_back(0); 6753 } 6754 6755 m_trt.set(r_cas_trt_index.read(), 6756 true, // read request 6757 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()], 6758 m_cmd_cas_srcid_fifo.read(), 6759 m_cmd_cas_trdid_fifo.read(), 6760 m_cmd_cas_pktid_fifo.read(), 6761 false, // write request from processor 6762 0, 6763 0, 6764 be_vector, 6765 data_vector); 6766 r_cas_fsm = CAS_MISS_XRAM_REQ; 6767 6768 #if DEBUG_MEMC_CAS 6769 if(m_debug) 6770 { 6771 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_SET> Register a GET transaction in TRT" << std::hex 6772 << " / nline = " << m_nline[(addr_t) m_cmd_cas_addr_fifo.read()] 6773 << " / trt_index = " << r_cas_trt_index.read() << std::endl; 6774 } 6775 #endif 6776 break; 6777 } 6778 ////////////////////// 6779 case CAS_MISS_XRAM_REQ: // request the IXR_CMD FSM to fetch the missing line 6780 { 6781 if( not r_cas_to_ixr_cmd_req.read() ) 6782 { 6783 r_cas_to_ixr_cmd_req = true; 6784 r_cas_to_ixr_cmd_put = false; 6785 r_cas_to_ixr_cmd_index = r_cas_trt_index.read(); 6786 r_cas_fsm = CAS_WAIT; 6787 6788 #if DEBUG_MEMC_CAS 6789 if(m_debug) 6790 std::cout << " <MEMC " << name() << " CAS_MISS_XRAM_REQ> Request a GET transaction" 6791 << " / address = " << std::hex << (addr_t) m_cmd_cas_addr_fifo.read() 6792 << " / trt_index = " << std::dec << r_cas_trt_index.read() << std::endl; 6793 #endif 6794 } 6795 break; 6796 } 6797 } // end switch r_cas_fsm 6798 6799 6800 ////////////////////////////////////////////////////////////////////////////// 6801 // CC_SEND FSM 6802 ////////////////////////////////////////////////////////////////////////////// 6803 // The CC_SEND fsm controls the DSPIN initiator port on the coherence 6804 // network, used to update or invalidate cache lines in L1 caches. 6805 // 6806 // It implements a round-robin priority between the four possible client FSMs 6807 // XRAM_RSP > CAS > WRITE > CONFIG 6808 // 6809 // Each FSM can request the next services: 6810 // - r_xram_rsp_to_cc_send_multi_req : multi-inval 6811 // r_xram_rsp_to_cc_send_brdcast_req : broadcast-inval 6812 // - r_write_to_cc_send_multi_req : multi-update 6813 // r_write_to_cc_send_brdcast_req : broadcast-inval 6814 // - r_cas_to_cc_send_multi_req : multi-update 6815 // r_cas_to_cc_send_brdcast_req : broadcast-inval 6816 // - r_config_to_cc_send_multi_req : multi-inval 6817 // r_config_to_cc_send_brdcast_req : broadcast-inval 6818 // 6819 // An inval request is a double DSPIN flit command containing: 6820 // 1. the index of the line to be invalidated. 6821 // 6822 // An update request is a multi-flit DSPIN command containing: 6823 // 1. the index of the cache line to be updated. 6824 // 2. the index of the first modified word in the line. 6825 // 3. the data to update 6826 /////////////////////////////////////////////////////////////////////////////// 6827 6828 switch(r_cc_send_fsm.read()) 6829 { 6830 ///////////////////////// 6831 case CC_SEND_CONFIG_IDLE: // XRAM_RSP FSM has highest priority 6832 { 6833 // XRAM_RSP 6834 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 6835 r_xram_rsp_to_cc_send_multi_req.read()) 6836 { 6837 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 6838 m_cpt_inval++; 6839 break; 6840 } 6841 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 6842 { 6843 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 6844 m_cpt_inval++; 6845 break; 6846 } 6847 // CAS 6848 if(m_cas_to_cc_send_inst_fifo.rok() or 6849 r_cas_to_cc_send_multi_req.read()) 6850 { 6851 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6852 m_cpt_update++; 6853 break; 6854 } 6855 if(r_cas_to_cc_send_brdcast_req.read()) 6856 { 6857 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 6858 m_cpt_inval++; 6859 break; 6860 } 6861 6862 if(r_read_to_cc_send_req.read()) 6863 { 6864 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 6865 break; 6866 } 6867 6868 if(r_write_to_cc_send_req.read()) 6869 { 6870 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 6871 break; 6872 } 6873 6874 6875 // WRITE 6876 if(r_read_to_cc_send_req.read()) 6877 { 6878 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 6879 break; 6880 } 6881 6882 if(r_write_to_cc_send_req.read()) 6883 { 6884 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 6885 break; 6886 } 6887 if(m_write_to_cc_send_inst_fifo.rok() or 6888 r_write_to_cc_send_multi_req.read()) 6889 { 6890 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 6891 m_cpt_update++; 6892 break; 6893 } 6894 if(r_write_to_cc_send_brdcast_req.read()) 6895 { 6896 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 6897 m_cpt_inval++; 6898 break; 6899 } 6900 // CONFIG 6901 if(r_config_to_cc_send_multi_req.read()) 6902 { 6903 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 6904 m_cpt_inval++; 6905 break; 6906 } 6907 if(r_config_to_cc_send_brdcast_req.read()) 6908 { 6909 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 6910 m_cpt_inval++; 6911 break; 6912 } 6913 break; 6914 } 6915 //////////////////////// 6916 case CC_SEND_WRITE_IDLE: // CONFIG FSM has highest priority 6917 { 6918 // CONFIG 6919 if(r_config_to_cc_send_multi_req.read()) 6920 { 6921 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 6922 m_cpt_inval++; 6923 break; 6924 } 6925 if(r_config_to_cc_send_brdcast_req.read()) 6926 { 6927 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 6928 m_cpt_inval++; 6929 break; 6930 } 6931 // XRAM_RSP 6932 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 6933 r_xram_rsp_to_cc_send_multi_req.read()) 6934 { 6935 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 6936 m_cpt_inval++; 6937 break; 6938 } 6939 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 6940 { 6941 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 6942 m_cpt_inval++; 6943 break; 6944 } 6945 // CAS 6946 if(m_cas_to_cc_send_inst_fifo.rok() or 6947 r_cas_to_cc_send_multi_req.read()) 6948 { 6949 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6950 m_cpt_update++; 6951 break; 6952 } 6953 if(r_cas_to_cc_send_brdcast_req.read()) 6954 { 6955 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 6956 m_cpt_inval++; 6957 break; 6958 } 6959 // WRITE 6960 if(r_read_to_cc_send_req.read()) 6961 { 6962 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 6963 break; 6964 } 6965 6966 if(r_write_to_cc_send_req.read()) 6967 { 6968 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 6969 break; 6970 } 6971 if(m_write_to_cc_send_inst_fifo.rok() or 6972 r_write_to_cc_send_multi_req.read()) 6973 { 6974 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 6975 m_cpt_update++; 6976 break; 6977 } 6978 if(r_write_to_cc_send_brdcast_req.read()) 6979 { 6980 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 6981 m_cpt_inval++; 6982 break; 6983 } 6984 break; 6985 } 6986 /////////////////////////// 6987 case CC_SEND_XRAM_RSP_IDLE: // CAS FSM has highest priority 6988 { 6989 // CAS 6990 if(m_cas_to_cc_send_inst_fifo.rok() or 6991 r_cas_to_cc_send_multi_req.read()) 6992 { 6993 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6994 m_cpt_update++; 6995 break; 6996 } 6997 if(r_cas_to_cc_send_brdcast_req.read()) 6998 { 6999 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 7000 m_cpt_inval++; 7001 break; 7002 } 7003 7004 if(r_read_to_cc_send_req.read()) 7005 { 7006 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 7007 break; 7008 } 7009 7010 if(r_write_to_cc_send_req.read()) 7011 { 7012 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 7013 break; 7014 } 7015 7016 7017 // WRITE 7018 if(r_read_to_cc_send_req.read()) 7019 { 7020 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 7021 break; 7022 } 7023 7024 if(r_write_to_cc_send_req.read()) 7025 { 7026 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 7027 break; 7028 } 7029 if(m_write_to_cc_send_inst_fifo.rok() or 7030 r_write_to_cc_send_multi_req.read()) 7031 { 7032 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 7033 m_cpt_update++; 7034 break; 7035 } 7036 7037 if(r_write_to_cc_send_brdcast_req.read()) 7038 { 7039 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 7040 m_cpt_inval++; 7041 break; 7042 } 7043 // CONFIG 7044 if(r_config_to_cc_send_multi_req.read()) 7045 { 7046 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 7047 m_cpt_inval++; 7048 break; 7049 } 7050 if(r_config_to_cc_send_brdcast_req.read()) 7051 { 7052 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 7053 m_cpt_inval++; 7054 break; 7055 } 7056 // XRAM_RSP 7057 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 7058 r_xram_rsp_to_cc_send_multi_req.read()) 7059 { 7060 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 7061 m_cpt_inval++; 7062 break; 7063 } 7064 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 7065 { 7066 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 7067 m_cpt_inval++; 7068 break; 7069 } 7070 break; 7071 } 7072 ////////////////////// 7073 case CC_SEND_CAS_IDLE: // CLEANUP FSM has highest priority 7074 { 7075 7076 if(r_read_to_cc_send_req.read()) 7077 { 7078 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 7079 break; 7080 } 7081 7082 if(r_write_to_cc_send_req.read()) 7083 { 7084 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 7085 break; 7086 } 7087 7088 7089 if(m_write_to_cc_send_inst_fifo.rok() or 7090 r_write_to_cc_send_multi_req.read()) 7091 { 7092 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 7093 m_cpt_update++; 7094 break; 7095 } 7096 if(r_write_to_cc_send_brdcast_req.read()) 7097 { 7098 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 7099 m_cpt_inval++; 7100 break; 7101 } 7102 // CONFIG 7103 if(r_config_to_cc_send_multi_req.read()) 7104 { 7105 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 7106 m_cpt_inval++; 7107 break; 7108 } 7109 if(r_config_to_cc_send_brdcast_req.read()) 7110 { 7111 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 7112 m_cpt_inval++; 7113 break; 7114 } 7115 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 7116 r_xram_rsp_to_cc_send_multi_req.read()) 7117 { 7118 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 7119 m_cpt_inval++; 7120 break; 7121 } 7122 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 7123 { 7124 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 7125 m_cpt_inval++; 7126 break; 7127 } 7128 if(m_cas_to_cc_send_inst_fifo.rok() or 7129 r_cas_to_cc_send_multi_req.read()) 7130 { 7131 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 7132 m_cpt_update++; 7133 break; 7134 } 7135 if(r_cas_to_cc_send_brdcast_req.read()) 7136 { 7137 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 7138 m_cpt_inval++; 7139 break; 7140 } 7141 break; 7142 } 7143 ///////////////////////////////// 7144 case CC_SEND_CONFIG_INVAL_HEADER: // send first flit multi-inval (from CONFIG FSM) 7145 { 7146 if(m_config_to_cc_send_inst_fifo.rok()) 7147 { 7148 if(not p_dspin_m2p.read) break; 7149 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_NLINE; 7150 break; 7151 } 7152 if(r_config_to_cc_send_multi_req.read()) r_config_to_cc_send_multi_req = false; 7153 r_cc_send_fsm = CC_SEND_CONFIG_IDLE; 7154 break; 7155 } 7156 //////////////////////////////// 7157 case CC_SEND_CONFIG_INVAL_NLINE: // send second flit multi-inval (from CONFIG FSM) 7158 { 7159 if(not p_dspin_m2p.read) break; 7160 m_cpt_inval_mult++; 7161 config_to_cc_send_fifo_get = true; 7162 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 7163 7164 #if DEBUG_MEMC_CC_SEND 7165 if(m_debug) 7166 std::cout << " <MEMC " << name() 7167 << " CC_SEND_CONFIG_INVAL_NLINE> multi-inval for line " 7168 << std::hex << r_config_to_cc_send_nline.read() << std::endl; 7169 #endif 7170 break; 7171 } 7172 /////////////////////////////////// 7173 case CC_SEND_CONFIG_BRDCAST_HEADER: // send first flit BC-inval (from CONFIG FSM) 7174 { 7175 if(not p_dspin_m2p.read) break; 7176 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_NLINE; 7177 break; 7178 } 7179 ////////////////////////////////// 7180 case CC_SEND_CONFIG_BRDCAST_NLINE: // send second flit BC-inval (from CONFIG FSM) 7181 { 7182 if(not p_dspin_m2p.read) break; 7183 m_cpt_inval_brdcast++; 7184 r_config_to_cc_send_brdcast_req = false; 7185 r_cc_send_fsm = CC_SEND_CONFIG_IDLE; 7186 7187 #if DEBUG_MEMC_CC_SEND 7188 if(m_debug) 7189 std::cout << " <MEMC " << name() 7190 << " CC_SEND_CONFIG_BRDCAST_NLINE> BC-Inval for line " 7191 << std::hex << r_config_to_cc_send_nline.read() << std::endl; 7192 #endif 7193 break; 7194 } 7195 /////////////////////////////////// 7196 case CC_SEND_XRAM_RSP_INVAL_HEADER: // send first flit multi-inval (from XRAM_RSP FSM) 7197 { 7198 if(m_xram_rsp_to_cc_send_inst_fifo.rok()) 7199 { 7200 if(not p_dspin_m2p.read) break; 7201 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_NLINE; 7202 break; 7203 } 7204 if(r_xram_rsp_to_cc_send_multi_req.read()) r_xram_rsp_to_cc_send_multi_req = false; 7205 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 7206 break; 7207 } 7208 ////////////////////////////////// 7209 case CC_SEND_XRAM_RSP_INVAL_NLINE: // send second flit multi-inval (from XRAM_RSP FSM) 7210 { 7211 if(not p_dspin_m2p.read) break; 7212 m_cpt_inval_mult++; 7213 xram_rsp_to_cc_send_fifo_get = true; 7214 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 7215 7216 #if DEBUG_MEMC_CC_SEND 7217 if(m_debug) 7218 std::cout << " <MEMC " << name() 7219 << " CC_SEND_XRAM_RSP_INVAL_NLINE> Multicast-Inval for line " 7220 << std::hex << r_xram_rsp_to_cc_send_nline.read() << std::endl; 7221 #endif 7222 break; 7223 } 7224 ///////////////////////////////////// 7225 case CC_SEND_XRAM_RSP_BRDCAST_HEADER: // send first flit broadcast-inval (from XRAM_RSP FSM) 7226 { 7227 if(not p_dspin_m2p.read) break; 7228 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_NLINE; 7229 break; 7230 } 7231 //////////////////////////////////// 7232 case CC_SEND_XRAM_RSP_BRDCAST_NLINE: // send second flit broadcast-inval (from XRAM_RSP FSM) 7233 { 7234 if(not p_dspin_m2p.read) break; 7235 m_cpt_inval_brdcast++; 7236 r_xram_rsp_to_cc_send_brdcast_req = false; 7237 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 7238 7239 #if DEBUG_MEMC_CC_SEND 7240 if(m_debug) 7241 std::cout << " <MEMC " << name() 7242 << " CC_SEND_XRAM_RSP_BRDCAST_NLINE> BC-Inval for line " 7243 << std::hex << r_xram_rsp_to_cc_send_nline.read() << std::endl; 7244 #endif 7245 break; 7246 } 7247 7248 case CC_SEND_READ_NCC_INVAL_HEADER: 7249 { 7250 if(not p_dspin_m2p.read) break; 7251 7252 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_NLINE; 7253 break; 7254 } 7255 7256 case CC_SEND_READ_NCC_INVAL_NLINE: 7257 { 7258 if(not p_dspin_m2p.read) break; 7259 7260 r_read_to_cc_send_req = false; 7261 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 7262 7263 #if DEBUG_MEMC_CC_SEND 7264 if(m_debug) 7265 { 7266 std::cout 7267 << " <MEMC " << name() 7268 << " CC_SEND_READ_NCC_INVAL_HEADER> Inval for line " 7269 << r_read_to_cc_send_nline.read() 7270 << std::endl; 7271 } 7272 #endif 7273 break; 7274 } 7275 7276 7277 case CC_SEND_WRITE_NCC_INVAL_HEADER: 7278 { 7279 if(not p_dspin_m2p.read) break; 7280 7281 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_NLINE; 7282 break; 7283 } 7284 7285 case CC_SEND_WRITE_NCC_INVAL_NLINE: 7286 { 7287 if(not p_dspin_m2p.read) break; 7288 7289 r_write_to_cc_send_req = false; 7290 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 7291 7292 #if DEBUG_MEMC_CC_SEND 7293 if(m_debug) 7294 { 7295 std::cout 7296 << " <MEMC " << name() 7297 << " CC_SEND_WRITE_NCC_INVAL_HEADER> Inval for line " 7298 << r_write_to_cc_send_nline.read() 7299 << std::endl; 7300 } 7301 #endif 7302 break; 7303 } 7304 7305 7306 ////////////////////////////////// 7307 case CC_SEND_WRITE_BRDCAST_HEADER: // send first flit broadcast-inval (from WRITE FSM) 7308 { 7309 if(not p_dspin_m2p.read) break; 7310 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_NLINE; 7311 break; 7312 } 7313 ///////////////////////////////// 7314 case CC_SEND_WRITE_BRDCAST_NLINE: // send second flit broadcast-inval (from WRITE FSM) 7315 { 7316 if(not p_dspin_m2p.read) break; 7317 7318 m_cpt_inval_brdcast++; 7319 7320 r_write_to_cc_send_brdcast_req = false; 7321 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 7322 7323 #if DEBUG_MEMC_CC_SEND 7324 if(m_debug) 7325 std::cout << " <MEMC " << name() 7326 << " CC_SEND_WRITE_BRDCAST_NLINE> BC-Inval for line " 7327 << std::hex << r_write_to_cc_send_nline.read() << std::endl; 7328 #endif 7329 break; 7330 } 7331 /////////////////////////////// 7332 case CC_SEND_WRITE_UPDT_HEADER: // send first flit for a multi-update (from WRITE FSM) 7333 { 7334 if(m_write_to_cc_send_inst_fifo.rok()) 7335 { 7336 if(not p_dspin_m2p.read) break; 7337 7338 r_cc_send_fsm = CC_SEND_WRITE_UPDT_NLINE; 7339 break; 7340 } 7341 7342 if(r_write_to_cc_send_multi_req.read()) 7343 { 7344 r_write_to_cc_send_multi_req = false; 7345 } 7346 7347 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 7348 break; 7349 } 7350 ////////////////////////////// 7351 case CC_SEND_WRITE_UPDT_NLINE: // send second flit for a multi-update (from WRITE FSM) 7352 { 7353 if(not p_dspin_m2p.read) break; 7354 m_cpt_update_mult++; 7355 7356 r_cc_send_cpt = 0; 7357 r_cc_send_fsm = CC_SEND_WRITE_UPDT_DATA; 7358 7359 #if DEBUG_MEMC_CC_SEND 7360 if(m_debug) 7361 std::cout << " <MEMC " << name() 7362 << " CC_SEND_WRITE_UPDT_NLINE> Multicast-Update for line " 7363 << r_write_to_cc_send_nline.read() << std::endl; 7364 #endif 7365 break; 7366 } 7367 ///////////////////////////// 7368 case CC_SEND_WRITE_UPDT_DATA: // send N data flits for a multi-update (from WRITE FSM) 7369 { 7370 if(not p_dspin_m2p.read) break; 7371 if(r_cc_send_cpt.read() == r_write_to_cc_send_count.read()) 7372 { 7373 write_to_cc_send_fifo_get = true; 7374 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 7375 break; 7376 } 7377 7378 r_cc_send_cpt = r_cc_send_cpt.read() + 1; 7379 break; 7380 } 7381 //////////////////////////////// 7382 case CC_SEND_CAS_BRDCAST_HEADER: // send first flit broadcast-inval (from CAS FSM) 7383 { 7384 if(not p_dspin_m2p.read) break; 7385 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_NLINE; 7386 break; 7387 } 7388 /////////////////////////////// 7389 case CC_SEND_CAS_BRDCAST_NLINE: // send second flit broadcast-inval (from CAS FSM) 7390 { 7391 if(not p_dspin_m2p.read) break; 7392 m_cpt_inval_brdcast++; 7393 7394 r_cas_to_cc_send_brdcast_req = false; 7395 r_cc_send_fsm = CC_SEND_CAS_IDLE; 7396 7397 #if DEBUG_MEMC_CC_SEND 7398 if(m_debug) 7399 std::cout << " <MEMC " << name() 7400 << " CC_SEND_CAS_BRDCAST_NLINE> Broadcast-Inval for line " 7401 << r_cas_to_cc_send_nline.read() << std::endl; 7402 #endif 7403 break; 7404 } 7405 ///////////////////////////// 7406 case CC_SEND_CAS_UPDT_HEADER: // send first flit for a multi-update (from CAS FSM) 7407 { 7408 if(m_cas_to_cc_send_inst_fifo.rok()) 7409 { 7410 if(not p_dspin_m2p.read) break; 7411 7412 r_cc_send_fsm = CC_SEND_CAS_UPDT_NLINE; 7413 break; 7414 } 7415 7416 // no more packets to send for the multi-update 7417 if(r_cas_to_cc_send_multi_req.read()) 7418 { 7419 r_cas_to_cc_send_multi_req = false; 7420 } 7421 7422 r_cc_send_fsm = CC_SEND_CAS_IDLE; 7423 break; 7424 } 7425 //////////////////////////// 7426 case CC_SEND_CAS_UPDT_NLINE: // send second flit for a multi-update (from CAS FSM) 7427 { 7428 if(not p_dspin_m2p.read) break; 7429 7430 m_cpt_update_mult++; 7431 7432 r_cc_send_cpt = 0; 7433 r_cc_send_fsm = CC_SEND_CAS_UPDT_DATA; 7434 7435 #if DEBUG_MEMC_CC_SEND 7436 if(m_debug) 7437 std::cout << " <MEMC " << name() 7438 << " CC_SEND_CAS_UPDT_NLINE> Multicast-Update for line " 7439 << r_cas_to_cc_send_nline.read() << std::endl; 7440 #endif 7441 break; 7442 } 7443 /////////////////////////// 7444 case CC_SEND_CAS_UPDT_DATA: // send first data for a multi-update (from CAS FSM) 7445 { 7446 if(not p_dspin_m2p.read) break; 7447 7448 if(r_cas_to_cc_send_is_long.read()) 7449 { 7450 r_cc_send_fsm = CC_SEND_CAS_UPDT_DATA_HIGH; 7451 break; 7452 } 7453 7454 cas_to_cc_send_fifo_get = true; 7455 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 7456 break; 7457 } 7458 //////////////////////////////// 7459 case CC_SEND_CAS_UPDT_DATA_HIGH: // send second data for a multi-update (from CAS FSM) 7460 { 7461 if(not p_dspin_m2p.read) break; 7462 cas_to_cc_send_fifo_get = true; 7463 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 7464 break; 7465 } 7466 } 7467 // end switch r_cc_send_fsm 7468 7469 ////////////////////////////////////////////////////////////////////////////// 7470 // CC_RECEIVE FSM 7471 ////////////////////////////////////////////////////////////////////////////// 7472 // The CC_RECEIVE fsm controls the DSPIN target port on the coherence 7473 // network. 7474 ////////////////////////////////////////////////////////////////////////////// 7475 7476 switch(r_cc_receive_fsm.read()) 7477 { 7478 ///////////////////// 7479 case CC_RECEIVE_IDLE: 7480 { 7481 if(not p_dspin_p2m.write) break; 7482 7483 uint8_t type = 7484 DspinDhccpParam::dspin_get( 7485 p_dspin_p2m.data.read(), 7486 DspinDhccpParam::P2M_TYPE); 7487 7488 if((type == DspinDhccpParam::TYPE_CLEANUP_DATA) or 7489 (type == DspinDhccpParam::TYPE_CLEANUP_INST)) 7490 { 7491 r_cc_receive_fsm = CC_RECEIVE_CLEANUP; 7492 break; 7493 } 7494 7495 if(type == DspinDhccpParam::TYPE_MULTI_ACK) 7496 { 7497 r_cc_receive_fsm = CC_RECEIVE_MULTI_ACK; 7498 break; 7499 } 7500 7501 assert(false and 7502 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 7503 "Illegal type in coherence request"); 7504 7505 break; 7506 } 7507 //////////////////////// 7508 case CC_RECEIVE_CLEANUP: 7509 { 7510 // write first CLEANUP flit in CC_RECEIVE to CLEANUP fifo 7511 7512 if(not p_dspin_p2m.write or not m_cc_receive_to_cleanup_fifo.wok()) 7513 break; 7514 7515 cc_receive_to_cleanup_fifo_put = true; 7516 if(p_dspin_p2m.eop.read()) 7517 r_cc_receive_fsm = CC_RECEIVE_IDLE; 7518 7519 break; 7520 } 7521 //////////////////////////// 7522 case CC_RECEIVE_CLEANUP_EOP: 7523 { 7524 // write second CLEANUP flit in CC_RECEIVE to CLEANUP fifo 7525 7526 if(not p_dspin_p2m.write or not m_cc_receive_to_cleanup_fifo.wok()) 7527 break; 7528 7529 assert(p_dspin_p2m.eop.read() and 7530 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 7531 "CLEANUP command must have two flits"); 7532 7533 cc_receive_to_cleanup_fifo_put = true; 7534 if(p_dspin_p2m.eop.read()) 7535 r_cc_receive_fsm = CC_RECEIVE_IDLE; 7536 break; 7537 } 7538 7539 ////////////////////////// 7540 case CC_RECEIVE_MULTI_ACK: 7541 { 7542 // write MULTI_ACK flit in CC_RECEIVE to MULTI_ACK fifo 7543 7544 // wait for a WOK in the CC_RECEIVE to MULTI_ACK fifo 7545 if(not p_dspin_p2m.write or not m_cc_receive_to_multi_ack_fifo.wok()) 7546 break; 7547 7548 assert(p_dspin_p2m.eop.read() and 7549 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 7550 "MULTI_ACK command must have one flit"); 7551 7552 cc_receive_to_multi_ack_fifo_put = true; 7553 r_cc_receive_fsm = CC_RECEIVE_IDLE; 7554 break; 7555 } 7556 } 7557 ////////////////////////////////////////////////////////////////////////// 7558 // TGT_RSP FSM 7559 ////////////////////////////////////////////////////////////////////////// 7560 // The TGT_RSP fsm sends the responses on the VCI target port 7561 // with a round robin priority between eigth requests : 7562 // - r_config_to_tgt_rsp_req 7563 // - r_tgt_cmd_to_tgt_rsp_req 7564 // - r_read_to_tgt_rsp_req 7565 // - r_write_to_tgt_rsp_req 7566 // - r_cas_to_tgt_rsp_req 7567 // - r_cleanup_to_tgt_rsp_req 7568 // - r_xram_rsp_to_tgt_rsp_req 7569 // - r_multi_ack_to_tgt_rsp_req 7570 // 7571 // The ordering is : 7572 // config >tgt_cmd > read > write > cas > xram > multi_ack > cleanup 7573 ////////////////////////////////////////////////////////////////////////// 7574 7575 switch(r_tgt_rsp_fsm.read()) 7576 { 7577 ///////////////////////// 7578 case TGT_RSP_CONFIG_IDLE: // tgt_cmd requests have the highest priority 7579 { 7580 if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7581 else if(r_read_to_tgt_rsp_req) 7582 { 7583 r_tgt_rsp_fsm = TGT_RSP_READ; 7584 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7585 } 7586 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7587 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 7588 else if(r_xram_rsp_to_tgt_rsp_req) 7589 { 7590 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7591 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7592 } 7593 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 7594 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7595 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7596 break; 7597 } 7598 ////////////////////////// 7599 case TGT_RSP_TGT_CMD_IDLE: // read requests have the highest priority 7600 { 7601 if(r_read_to_tgt_rsp_req) 7602 { 7603 r_tgt_rsp_fsm = TGT_RSP_READ; 7604 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7605 } 7606 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7607 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 7608 else if(r_xram_rsp_to_tgt_rsp_req) 7609 { 7610 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7611 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7612 } 7613 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 7614 else if(r_cleanup_to_tgt_rsp_req) 7615 { 7616 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7617 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 7618 } 7619 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7620 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7621 break; 7622 } 7623 /////////////////////// 7624 case TGT_RSP_READ_IDLE: // write requests have the highest priority 7625 { 7626 if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7627 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 7628 else if(r_xram_rsp_to_tgt_rsp_req) 7629 { 7630 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7631 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7632 } 7633 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 7634 else if(r_cleanup_to_tgt_rsp_req) 7635 { 7636 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7637 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 7638 } 7639 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7640 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7641 else if(r_read_to_tgt_rsp_req) 7642 { 7643 r_tgt_rsp_fsm = TGT_RSP_READ; 7644 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7645 } 7646 break; 7647 } 7648 //////////////////////// 7649 case TGT_RSP_WRITE_IDLE: // cas requests have the highest priority 7650 { 7651 if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 7652 else if(r_xram_rsp_to_tgt_rsp_req) 7653 { 7654 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7655 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7656 } 7657 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 7658 else if(r_cleanup_to_tgt_rsp_req) 7659 { 7660 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7661 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 7662 } 7663 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7664 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7665 else if(r_read_to_tgt_rsp_req) 7666 { 7667 r_tgt_rsp_fsm = TGT_RSP_READ; 7668 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7669 } 7670 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7671 break; 7672 } 7673 /////////////////////// 7674 case TGT_RSP_CAS_IDLE: // xram_rsp requests have the highest priority 7675 { 7676 if(r_xram_rsp_to_tgt_rsp_req) 7677 { 7678 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7679 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7680 } 7681 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK ; 7682 else if(r_cleanup_to_tgt_rsp_req) 7683 { 7684 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7685 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 7686 } 7687 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7688 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7689 else if(r_read_to_tgt_rsp_req) 7690 { 7691 r_tgt_rsp_fsm = TGT_RSP_READ; 7692 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7693 } 7694 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7695 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 7696 break; 7697 } 7698 /////////////////////// 7699 case TGT_RSP_XRAM_IDLE: // multi ack requests have the highest priority 7700 { 7701 7702 if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK ; 7703 else if(r_cleanup_to_tgt_rsp_req) 7704 { 7705 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7706 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 7707 } 7708 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7709 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7710 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7711 else if(r_read_to_tgt_rsp_req) 7712 { 7713 r_tgt_rsp_fsm = TGT_RSP_READ; 7714 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7715 } 7716 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7717 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 7718 else if(r_xram_rsp_to_tgt_rsp_req) 7719 { 7720 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7721 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7722 } 7723 break; 7724 } 7725 //////////////////////////// 7726 case TGT_RSP_MULTI_ACK_IDLE: // cleanup requests have the highest priority 7727 { 7728 if(r_cleanup_to_tgt_rsp_req) 7729 { 7730 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7731 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 7732 } 7733 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7734 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7735 else if(r_read_to_tgt_rsp_req) 7736 { 7737 r_tgt_rsp_fsm = TGT_RSP_READ; 7738 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7739 } 7740 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7741 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 7742 else if(r_xram_rsp_to_tgt_rsp_req) 7743 { 7744 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7745 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7746 } 7747 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 7748 break; 7749 } 7750 ////////////////////////// 7751 case TGT_RSP_CLEANUP_IDLE: // tgt cmd requests have the highest priority 7752 { 7753 if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7754 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7755 else if(r_read_to_tgt_rsp_req) 7756 { 7757 r_tgt_rsp_fsm = TGT_RSP_READ; 7758 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7759 } 7760 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7761 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 7762 else if(r_xram_rsp_to_tgt_rsp_req) 7763 { 7764 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7765 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7766 } 7767 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK ; 7768 else if(r_cleanup_to_tgt_rsp_req) 7769 { 7770 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7771 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 7772 } 7773 break; 7774 } 7775 //////////////////// 7776 case TGT_RSP_CONFIG: // send the response for a config transaction 7777 { 7778 if ( p_vci_tgt.rspack ) 7779 { 7780 r_config_to_tgt_rsp_req = false; 7781 r_tgt_rsp_fsm = TGT_RSP_CONFIG_IDLE; 7782 7783 #if DEBUG_MEMC_TGT_RSP 7784 if( m_debug ) 7785 { 7786 std::cout 7787 << " <MEMC " << name() 7788 << " TGT_RSP_CONFIG> Config transaction completed response" 7789 << " / rsrcid = " << std::hex << r_config_to_tgt_rsp_srcid.read() 7790 << " / rtrdid = " << r_config_to_tgt_rsp_trdid.read() 7791 << " / rpktid = " << r_config_to_tgt_rsp_pktid.read() 7792 << std::endl; 7793 } 7794 #endif 7795 } 7796 break; 7797 } 7798 ///////////////////// 7799 case TGT_RSP_TGT_CMD: // send the response for a configuration access 7800 { 7801 if ( p_vci_tgt.rspack ) 7802 { 7803 r_tgt_cmd_to_tgt_rsp_req = false; 7804 r_tgt_rsp_fsm = TGT_RSP_TGT_CMD_IDLE; 7805 7806 #if DEBUG_MEMC_TGT_RSP 7807 if( m_debug ) 7808 { 7809 std::cout 7810 << " <MEMC " << name() 7811 << " TGT_RSP_TGT_CMD> Send response for a configuration access" 7812 << " / rsrcid = " << std::hex << r_tgt_cmd_to_tgt_rsp_srcid.read() 7813 << " / rtrdid = " << r_tgt_cmd_to_tgt_rsp_trdid.read() 7814 << " / rpktid = " << r_tgt_cmd_to_tgt_rsp_pktid.read() 7815 << " / error = " << r_tgt_cmd_to_tgt_rsp_error.read() 7816 << std::endl; 7817 } 7818 #endif 7819 } 7820 break; 7821 } 7822 ////////////////// 7823 case TGT_RSP_READ: // send the response to a read 7824 { 7825 if ( p_vci_tgt.rspack ) 7826 { 7827 7828 #if DEBUG_MEMC_TGT_RSP 7829 if( m_debug ) 7830 { 7831 std::cout 7832 << " <MEMC " << name() << " TGT_RSP_READ> Read response" 7833 << " / rsrcid = " << std::hex << r_read_to_tgt_rsp_srcid.read() 7834 << " / rtrdid = " << r_read_to_tgt_rsp_trdid.read() 7835 << " / rpktid = " << r_read_to_tgt_rsp_pktid.read() 7836 << " / rdata = " << r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() 7837 << " / cpt = " << std::dec << r_tgt_rsp_cpt.read() << std::endl; 7838 } 7839 #endif 7840 7841 7842 uint32_t last_word_idx = r_read_to_tgt_rsp_word.read() + 7843 r_read_to_tgt_rsp_length.read() - 1; 7844 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 7845 bool is_ll = ((r_read_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 7846 7847 if ((is_last_word and not is_ll) or 7848 (r_tgt_rsp_key_sent.read() and is_ll)) 7849 { 7850 // Last word in case of READ or second flit in case if LL 7851 r_tgt_rsp_key_sent = false; 7852 r_read_to_tgt_rsp_req = false; 7853 r_tgt_rsp_fsm = TGT_RSP_READ_IDLE; 7854 if (r_read_to_tgt_rsp_pktid.read() == 0x0) 7855 { 7856 m_cpt_read_data_unc ++; 7857 } 7858 else if (r_read_to_tgt_rsp_pktid.read() == 0x1) 7859 { 7860 m_cpt_read_data_miss_CC ++; 7861 } 7862 else if (r_read_to_tgt_rsp_pktid.read() == 0x2) 7863 { 7864 m_cpt_read_ins_unc ++; 7865 } 7866 else if (r_read_to_tgt_rsp_pktid.read() == 0x3) 7867 { 7868 m_cpt_read_ins_miss ++; 7869 } 7870 else if (r_read_to_tgt_rsp_pktid.read() == 0x6) 7871 { 7872 m_cpt_read_ll_CC ++; 7873 } 7874 else if (r_read_to_tgt_rsp_pktid.read() == 0x9) 7875 { 7876 m_cpt_read_data_miss_NCC ++; 7877 } 7878 else if (r_read_to_tgt_rsp_pktid.read() == 0x14) 7879 { 7880 m_cpt_read_ll_NCC ++; 7881 } 7882 else 7883 { 7884 m_cpt_read_WTF ++; 7885 } 7886 } 7887 else 7888 { 7889 if (is_ll) 7890 { 7891 r_tgt_rsp_key_sent = true; // Send second flit of ll 7892 } 7893 else 7894 { 7895 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; // Send next word of read 7896 } 7897 } 7898 } 7899 break; 7900 } 7901 ////////////////// 7902 case TGT_RSP_WRITE: // send the write acknowledge 7903 { 7904 if(p_vci_tgt.rspack) 7905 { 7906 7907 #if DEBUG_MEMC_TGT_RSP 7908 if(m_debug) 7909 std::cout << " <MEMC " << name() << " TGT_RSP_WRITE> Write response" 7910 << " / rsrcid = " << std::hex << r_write_to_tgt_rsp_srcid.read() 7911 << " / rtrdid = " << r_write_to_tgt_rsp_trdid.read() 7912 << " / rpktid = " << r_write_to_tgt_rsp_pktid.read() << std::endl; 7913 #endif 7914 r_tgt_rsp_fsm = TGT_RSP_WRITE_IDLE; 7915 r_write_to_tgt_rsp_req = false; 7916 } 7917 break; 7918 } 7919 ///////////////////// 7920 case TGT_RSP_CLEANUP: // pas clair pour moi (AG) 7921 { 7922 if(p_vci_tgt.rspack) 7923 { 7924 7925 #if DEBUG_MEMC_TGT_RSP 7926 if(m_debug) 7927 { 7928 std::cout << " <MEMC " << name() << " TGT_RSP_CLEANUP> Cleanup response" 7929 << " / rsrcid = " << std::dec << r_cleanup_to_tgt_rsp_srcid.read() 7930 << " / rtrdid = " << r_cleanup_to_tgt_rsp_trdid.read() 7931 << " / rpktid = " << r_cleanup_to_tgt_rsp_pktid.read() << std::endl 7932 << " / data = " << std::hex << r_cleanup_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() << std::dec << std::endl; 7933 } 7934 #endif 7935 7936 uint32_t last_word_idx = r_cleanup_to_tgt_rsp_first_word.read() + r_cleanup_to_tgt_rsp_length.read() - 1; 7937 bool is_ll = ((r_cleanup_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 7938 7939 if (r_cleanup_to_tgt_rsp_type.read() or ((r_tgt_rsp_cpt.read() == last_word_idx) and not is_ll) or (r_tgt_rsp_key_sent.read() and is_ll) ) 7940 { 7941 r_tgt_rsp_fsm = TGT_RSP_CLEANUP_IDLE; 7942 r_cleanup_to_tgt_rsp_req = false; 7943 r_tgt_rsp_key_sent = false; 7944 7945 7946 if (r_cleanup_to_tgt_rsp_pktid.read() == 0x0) 7947 { 7948 m_cpt_read_data_unc ++; 7949 } 7950 else if (r_cleanup_to_tgt_rsp_pktid.read() == 0x1) 7951 { 7952 m_cpt_read_data_miss_CC ++; 7953 } 7954 else if (r_cleanup_to_tgt_rsp_pktid.read() == 0x2) 7955 { 7956 m_cpt_read_ins_unc ++; 7957 } 7958 else if (r_cleanup_to_tgt_rsp_pktid.read() == 0x3) 7959 { 7960 m_cpt_read_ins_miss ++; 7961 } 7962 else if (r_cleanup_to_tgt_rsp_pktid.read() == 0x6) 7963 { 7964 m_cpt_read_ll_CC ++; 7965 } 7966 else if (r_cleanup_to_tgt_rsp_pktid.read() == 0x9) 7967 { 7968 m_cpt_read_data_miss_NCC ++; 7969 } 7970 else if (r_cleanup_to_tgt_rsp_pktid.read() == 0x14) 7971 { 7972 m_cpt_read_ll_NCC ++; 7973 } 7974 else if (!r_cleanup_to_tgt_rsp_type.read()) 7975 { 7976 m_cpt_read_WTF ++; 7977 } 7978 7979 } 7980 else 7981 { 7982 if (is_ll) 7983 { 7984 r_tgt_rsp_key_sent = true; 7985 } 7986 else 7987 { 7988 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; 7989 } 7990 } 7991 } 7992 break; 7993 } 7994 ///////////////// 7995 case TGT_RSP_CAS: // send one atomic word response 7996 { 7997 if(p_vci_tgt.rspack) 7998 { 7999 8000 #if DEBUG_MEMC_TGT_RSP 8001 if(m_debug) 8002 std::cout << " <MEMC " << name() << " TGT_RSP_CAS> CAS response" 8003 << " / rsrcid = " << std::hex << r_cas_to_tgt_rsp_srcid.read() 8004 << " / rtrdid = " << r_cas_to_tgt_rsp_trdid.read() 8005 << " / rpktid = " << r_cas_to_tgt_rsp_pktid.read() << std::endl; 8006 #endif 8007 r_tgt_rsp_fsm = TGT_RSP_CAS_IDLE; 8008 r_cas_to_tgt_rsp_req = false; 8009 } 8010 break; 8011 } 8012 ////////////////// 8013 case TGT_RSP_XRAM: // send the response after XRAM access 8014 { 8015 if ( p_vci_tgt.rspack ) 8016 { 8017 8018 #if DEBUG_MEMC_TGT_RSP 8019 if( m_debug ) 8020 std::cout << " <MEMC " << name() << " TGT_RSP_XRAM> Response following XRAM access" 8021 << " / rsrcid = " << std::hex << r_xram_rsp_to_tgt_rsp_srcid.read() 8022 << " / rtrdid = " << r_xram_rsp_to_tgt_rsp_trdid.read() 8023 << " / rpktid = " << r_xram_rsp_to_tgt_rsp_pktid.read() 8024 << " / rdata = " << r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() 8025 << " / cpt = " << std::dec << r_tgt_rsp_cpt.read() << std::endl; 8026 #endif 8027 uint32_t last_word_idx = r_xram_rsp_to_tgt_rsp_word.read() + 8028 r_xram_rsp_to_tgt_rsp_length.read() - 1; 8029 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 8030 bool is_ll = ((r_xram_rsp_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 8031 bool is_error = r_xram_rsp_to_tgt_rsp_rerror.read(); 8032 8033 if (((is_last_word or is_error) and not is_ll) or 8034 (r_tgt_rsp_key_sent.read() and is_ll)) 8035 { 8036 // Last word sent in case of READ or second flit sent in case if LL 8037 r_tgt_rsp_key_sent = false; 8038 r_xram_rsp_to_tgt_rsp_req = false; 8039 r_tgt_rsp_fsm = TGT_RSP_XRAM_IDLE; 8040 8041 8042 if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x0) 8043 { 8044 m_cpt_read_data_unc ++; 8045 } 8046 else if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x1) 8047 { 8048 m_cpt_read_data_miss_CC ++; 8049 } 8050 else if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x2) 8051 { 8052 m_cpt_read_ins_unc ++; 8053 } 8054 else if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x3) 8055 { 8056 m_cpt_read_ins_miss ++; 8057 } 8058 else if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x6) 8059 { 8060 m_cpt_read_ll_CC ++; 8061 } 8062 else if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x9) 8063 { 8064 m_cpt_read_data_miss_NCC ++; 8065 } 8066 else if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x14) 8067 { 8068 m_cpt_read_ll_NCC ++; 8069 } 8070 else 8071 { 8072 m_cpt_read_WTF ++; 8073 } 8074 8075 } 8076 else 8077 { 8078 if (is_ll) 8079 { 8080 r_tgt_rsp_key_sent = true; // Send second flit of ll 8081 } 8082 else 8083 { 8084 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; // Send next word of read 8085 } 8086 } 8087 } 8088 break; 8089 } 8090 /////////////////////// 8091 case TGT_RSP_MULTI_ACK: // send the write response after coherence transaction 8092 { 8093 if(p_vci_tgt.rspack) 8094 { 8095 8096 #if DEBUG_MEMC_TGT_RSP 8097 if(m_debug) 8098 std::cout << " <MEMC " << name() << " TGT_RSP_MULTI_ACK> Write response after coherence transaction" 8099 << " / rsrcid = " << std::hex << r_multi_ack_to_tgt_rsp_srcid.read() 8100 << " / rtrdid = " << r_multi_ack_to_tgt_rsp_trdid.read() 8101 << " / rpktid = " << r_multi_ack_to_tgt_rsp_pktid.read() << std::endl; 8102 #endif 8103 r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK_IDLE; 8104 r_multi_ack_to_tgt_rsp_req = false; 8105 } 8106 break; 8107 } 8108 } // end switch tgt_rsp_fsm 8109 8110 //////////////////////////////////////////////////////////////////////////////////// 8111 // ALLOC_UPT FSM 8112 //////////////////////////////////////////////////////////////////////////////////// 8113 // The ALLOC_UPT FSM allocates the access to the Update Table (UPT), 8114 // with a round robin priority between three FSMs, with the following order: 8115 // WRITE -> CAS -> MULTI_ACK 8116 // - The WRITE FSM initiates update transaction and sets a new entry in UPT. 8117 // - The CAS FSM does the same thing as the WRITE FSM. 8118 // - The MULTI_ACK FSM complete those trasactions and erase the UPT entry. 8119 // The resource is always allocated. 8120 ///////////////////////////////////////////////////////////////////////////////////// 8121 switch(r_alloc_upt_fsm.read()) 8122 { 8123 ///////////////////////// 8124 case ALLOC_UPT_WRITE: // allocated to WRITE FSM 8125 if (r_write_fsm.read() != WRITE_UPT_LOCK) 8126 { 8127 if (r_cas_fsm.read() == CAS_UPT_LOCK) 8128 r_alloc_upt_fsm = ALLOC_UPT_CAS; 8129 8130 else if (r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK) 8131 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK; 8132 else 8133 m_cpt_upt_unused++; 8134 } 8135 break; 8136 8137 ///////////////////////// 8138 case ALLOC_UPT_CAS: // allocated to CAS FSM 8139 if (r_cas_fsm.read() != CAS_UPT_LOCK) 8140 { 8141 if (r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK) 8142 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK; 8143 8144 else if (r_write_fsm.read() == WRITE_UPT_LOCK) 8145 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 8146 8147 else 8148 m_cpt_upt_unused++; 8149 } 8150 break; 8151 8152 ///////////////////////// 8153 case ALLOC_UPT_MULTI_ACK: // allocated to MULTI_ACK FSM 8154 if ((r_multi_ack_fsm.read() != MULTI_ACK_UPT_LOCK ) and 8155 (r_multi_ack_fsm.read() != MULTI_ACK_UPT_CLEAR)) 8156 { 8157 if (r_write_fsm.read() == WRITE_UPT_LOCK) 8158 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 8159 8160 else if (r_cas_fsm.read() == CAS_UPT_LOCK) 8161 r_alloc_upt_fsm = ALLOC_UPT_CAS; 8162 else 8163 m_cpt_upt_unused++; 8164 } 8165 break; 8166 } // end switch r_alloc_upt_fsm 8167 8168 //////////////////////////////////////////////////////////////////////////////////// 8169 // ALLOC_IVT FSM 8170 //////////////////////////////////////////////////////////////////////////////////// 8171 // The ALLOC_IVT FSM allocates the access to the Invalidate Table (IVT), 8172 // with a round robin priority between five FSMs, with the following order: 8173 // WRITE -> XRAM_RSP -> CLEANUP -> CAS -> CONFIG 8174 // - The WRITE FSM initiates broadcast invalidate transactions and sets a new entry 8175 // in IVT. 8176 // - The CAS FSM does the same thing as the WRITE FSM. 8177 // - The XRAM_RSP FSM initiates broadcast/multicast invalidate transaction and sets 8178 // a new entry in the IVT 8179 // - The CONFIG FSM does the same thing as the XRAM_RSP FSM 8180 // - The CLEANUP FSM complete those trasactions and erase the IVT entry. 8181 // The resource is always allocated. 8182 ///////////////////////////////////////////////////////////////////////////////////// 8183 switch(r_alloc_ivt_fsm.read()) 8184 { 8185 ////////////////////////// 8186 case ALLOC_IVT_WRITE: // allocated to WRITE FSM 8187 if ((r_write_fsm.read() != WRITE_BC_IVT_LOCK) and 8188 (r_write_fsm.read() != WRITE_IVT_LOCK_HIT_WB) and 8189 (r_write_fsm.read() != WRITE_MISS_IVT_LOCK)) 8190 { 8191 if(r_read_fsm.read() == READ_IVT_LOCK) 8192 r_alloc_ivt_fsm = ALLOC_IVT_READ; 8193 8194 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 8195 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 8196 8197 else if ((r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) or 8198 (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK_DATA)) 8199 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 8200 8201 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 8202 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 8203 8204 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 8205 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 8206 8207 else 8208 m_cpt_ivt_unused++; 8209 } 8210 break; 8211 8212 ////////////////////////// 8213 case ALLOC_IVT_READ: // allocated to READ FSM 8214 if (r_read_fsm.read() != READ_IVT_LOCK) 8215 { 8216 if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 8217 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 8218 8219 else if ((r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) or 8220 (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK_DATA)) 8221 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 8222 8223 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 8224 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 8225 8226 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 8227 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 8228 8229 else if ((r_write_fsm.read() == WRITE_BC_IVT_LOCK) or 8230 (r_write_fsm.read() == WRITE_IVT_LOCK_HIT_WB) or 8231 (r_write_fsm.read() == WRITE_MISS_IVT_LOCK)) 8232 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 8233 else 8234 m_cpt_ivt_unused++; 8235 } 8236 break; 8237 8238 ////////////////////////// 8239 case ALLOC_IVT_XRAM_RSP: // allocated to XRAM_RSP FSM 8240 if(r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK) 8241 { 8242 if ((r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) or 8243 (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK_DATA)) 8244 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 8245 8246 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 8247 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 8248 8249 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 8250 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 8251 8252 else if ((r_write_fsm.read() == WRITE_BC_IVT_LOCK) or 8253 (r_write_fsm.read() == WRITE_IVT_LOCK_HIT_WB) or 8254 (r_write_fsm.read() == WRITE_MISS_IVT_LOCK)) 8255 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 8256 8257 else if(r_read_fsm.read() == READ_IVT_LOCK) 8258 r_alloc_ivt_fsm = ALLOC_IVT_READ; 8259 8260 else 8261 m_cpt_ivt_unused++; 8262 } 8263 break; 8264 8265 ////////////////////////// 8266 case ALLOC_IVT_CLEANUP: // allocated to CLEANUP FSM 8267 if ((r_cleanup_fsm.read() != CLEANUP_IVT_LOCK ) and 8268 (r_cleanup_fsm.read() != CLEANUP_IVT_DECREMENT) and 8269 (r_cleanup_fsm.read() != CLEANUP_IVT_LOCK_DATA)) 8270 { 8271 if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 8272 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 8273 8274 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 8275 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 8276 8277 else if ((r_write_fsm.read() == WRITE_BC_IVT_LOCK) or 8278 (r_write_fsm.read() == WRITE_IVT_LOCK_HIT_WB) or 8279 (r_write_fsm.read() == WRITE_MISS_IVT_LOCK)) 8280 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 8281 8282 else if(r_read_fsm.read() == READ_IVT_LOCK) 8283 r_alloc_ivt_fsm = ALLOC_IVT_READ; 8284 8285 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 8286 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 8287 8288 else 8289 m_cpt_ivt_unused++; 8290 } 8291 break; 8292 8293 ////////////////////////// 8294 case ALLOC_IVT_CAS: // allocated to CAS FSM 8295 if (r_cas_fsm.read() != CAS_BC_IVT_LOCK) 8296 { 8297 if (r_config_fsm.read() == CONFIG_IVT_LOCK) 8298 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 8299 8300 else if ((r_write_fsm.read() == WRITE_BC_IVT_LOCK) or 8301 (r_write_fsm.read() == WRITE_IVT_LOCK_HIT_WB) or 8302 (r_write_fsm.read() == WRITE_MISS_IVT_LOCK)) 8303 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 8304 8305 else if(r_read_fsm.read() == READ_IVT_LOCK) 8306 r_alloc_ivt_fsm = ALLOC_IVT_READ; 8307 8308 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 8309 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 8310 8311 else if ((r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) or 8312 (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK_DATA)) 8313 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 8314 8315 else 8316 m_cpt_ivt_unused++; 8317 } 8318 break; 8319 8320 ////////////////////////// 8321 case ALLOC_IVT_CONFIG: // allocated to CONFIG FSM 8322 if (r_config_fsm.read() != CONFIG_IVT_LOCK) 8323 { 8324 if ((r_write_fsm.read() == WRITE_BC_IVT_LOCK) or 8325 (r_write_fsm.read() == WRITE_IVT_LOCK_HIT_WB) or 8326 (r_write_fsm.read() == WRITE_MISS_IVT_LOCK)) 8327 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 8328 8329 else if(r_read_fsm.read() == READ_IVT_LOCK) 8330 r_alloc_ivt_fsm = ALLOC_IVT_READ; 8331 8332 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 8333 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 8334 8335 else if ((r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) or 8336 (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK_DATA)) 8337 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 8338 8339 else if(r_cas_fsm.read() == CAS_BC_IVT_LOCK) 8340 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 8341 else 8342 m_cpt_ivt_unused++; 8343 } 8344 break; 8345 } // end switch r_alloc_ivt_fsm 8346 8347 //////////////////////////////////////////////////////////////////////////////////// 8348 // ALLOC_DIR FSM 8349 //////////////////////////////////////////////////////////////////////////////////// 8350 // The ALLOC_DIR FSM allocates the access to the directory and 8351 // the data cache with a round robin priority between 6 user FSMs : 8352 // The cyclic ordering is CONFIG > READ > WRITE > CAS > CLEANUP > XRAM_RSP 8353 // The ressource is always allocated. 8354 ///////////////////////////////////////////////////////////////////////////////////// 8355 8356 switch(r_alloc_dir_fsm.read()) 8357 { 8358 ///////////////////// 8359 case ALLOC_DIR_RESET: // Initializes the directory one SET per cycle. 8360 // All the WAYS of a SET initialized in parallel 8361 8362 r_alloc_dir_reset_cpt.write(r_alloc_dir_reset_cpt.read() + 1); 8363 8364 if(r_alloc_dir_reset_cpt.read() == (m_sets - 1)) 8365 { 8366 m_cache_directory.init(); 8367 r_alloc_dir_fsm = ALLOC_DIR_READ; 8368 } 8369 break; 8370 8371 ////////////////////// 8372 case ALLOC_DIR_CONFIG: // allocated to CONFIG FSM 8373 if ( (r_config_fsm.read() != CONFIG_DIR_REQ) and 8374 (r_config_fsm.read() != CONFIG_DIR_ACCESS) and 8375 (r_config_fsm.read() != CONFIG_TRT_LOCK) and 8376 (r_config_fsm.read() != CONFIG_TRT_SET) and 8377 (r_config_fsm.read() != CONFIG_IVT_LOCK) ) 8378 { 8379 if(r_read_fsm.read() == READ_DIR_REQ) 8380 r_alloc_dir_fsm = ALLOC_DIR_READ; 8381 8382 else if(r_write_fsm.read() == WRITE_DIR_REQ) 8383 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 8384 8385 else if(r_cas_fsm.read() == CAS_DIR_REQ) 8386 r_alloc_dir_fsm = ALLOC_DIR_CAS; 8387 8388 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 8389 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 8390 8391 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 8392 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 8393 } 8394 break; 8395 8396 //////////////////// 8397 case ALLOC_DIR_READ: // allocated to READ FSM 8398 if( ((r_read_fsm.read() != READ_DIR_REQ) and 8399 (r_read_fsm.read() != READ_DIR_LOCK) and 8400 (r_read_fsm.read() != READ_TRT_LOCK) and 8401 (r_read_fsm.read() != READ_HEAP_REQ) and 8402 (r_read_fsm.read() != READ_IVT_LOCK)) 8403 or 8404 ((r_read_fsm.read() == READ_TRT_LOCK) and 8405 (r_alloc_trt_fsm.read() == ALLOC_TRT_READ)) ) 8406 { 8407 if(r_write_fsm.read() == WRITE_DIR_REQ) 8408 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 8409 8410 else if(r_cas_fsm.read() == CAS_DIR_REQ) 8411 r_alloc_dir_fsm = ALLOC_DIR_CAS; 8412 8413 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 8414 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 8415 8416 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 8417 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 8418 8419 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 8420 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 8421 8422 else 8423 m_cpt_dir_unused++; 8424 } 8425 else 8426 m_cpt_read_fsm_dir_used++; 8427 break; 8428 8429 ///////////////////// 8430 case ALLOC_DIR_WRITE: 8431 if(((r_write_fsm.read() != WRITE_DIR_REQ) and 8432 (r_write_fsm.read() != WRITE_DIR_LOCK) and 8433 (r_write_fsm.read() != WRITE_BC_DIR_READ) and 8434 (r_write_fsm.read() != WRITE_DIR_HIT) and 8435 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 8436 (r_write_fsm.read() != WRITE_BC_IVT_LOCK) and 8437 (r_write_fsm.read() != WRITE_MISS_IVT_LOCK) and 8438 (r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 8439 (r_write_fsm.read() != WRITE_UPT_LOCK) and 8440 (r_write_fsm.read() != WRITE_UPT_HEAP_LOCK) and 8441 (r_write_fsm.read() != WRITE_IVT_LOCK_HIT_WB)) 8442 or 8443 ((r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) and 8444 (r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE)) 8445 or 8446 ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) and 8447 (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE))) 8448 { 8449 if(r_cas_fsm.read() == CAS_DIR_REQ) 8450 r_alloc_dir_fsm = ALLOC_DIR_CAS; 8451 8452 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 8453 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 8454 8455 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 8456 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 8457 8458 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 8459 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 8460 8461 else if(r_read_fsm.read() == READ_DIR_REQ) 8462 r_alloc_dir_fsm = ALLOC_DIR_READ; 8463 8464 else 8465 m_cpt_dir_unused++; 8466 } 8467 else 8468 m_cpt_write_fsm_dir_used++; 8469 break; 8470 8471 /////////////////// 8472 case ALLOC_DIR_CAS: // allocated to CAS FSM 8473 if(((r_cas_fsm.read() != CAS_DIR_REQ) and 8474 (r_cas_fsm.read() != CAS_DIR_LOCK) and 8475 (r_cas_fsm.read() != CAS_DIR_HIT_READ) and 8476 (r_cas_fsm.read() != CAS_DIR_HIT_COMPARE) and 8477 (r_cas_fsm.read() != CAS_DIR_HIT_WRITE) and 8478 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 8479 (r_cas_fsm.read() != CAS_BC_IVT_LOCK) and 8480 (r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 8481 (r_cas_fsm.read() != CAS_UPT_LOCK) and 8482 (r_cas_fsm.read() != CAS_UPT_HEAP_LOCK)) 8483 or 8484 ((r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) and 8485 (r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS)) 8486 or 8487 ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) and 8488 (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS))) 8489 { 8490 if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 8491 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 8492 8493 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 8494 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 8495 8496 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 8497 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 8498 8499 else if(r_read_fsm.read() == READ_DIR_REQ) 8500 r_alloc_dir_fsm = ALLOC_DIR_READ; 8501 8502 else if(r_write_fsm.read() == WRITE_DIR_REQ) 8503 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 8504 8505 else 8506 m_cpt_dir_unused++; 8507 } 8508 else 8509 m_cpt_cas_fsm_dir_used++; 8510 break; 8511 8512 /////////////////////// 8513 case ALLOC_DIR_CLEANUP: // allocated to CLEANUP FSM 8514 if((r_cleanup_fsm.read() != CLEANUP_DIR_REQ) and 8515 (r_cleanup_fsm.read() != CLEANUP_DIR_LOCK) and 8516 (r_cleanup_fsm.read() != CLEANUP_HEAP_REQ) and 8517 (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK) and 8518 (r_cleanup_fsm.read() != CLEANUP_IVT_LOCK_DATA)) 8519 { 8520 if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 8521 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 8522 8523 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 8524 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 8525 8526 else if(r_read_fsm.read() == READ_DIR_REQ) 8527 r_alloc_dir_fsm = ALLOC_DIR_READ; 8528 8529 else if(r_write_fsm.read() == WRITE_DIR_REQ) 8530 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 8531 8532 else if(r_cas_fsm.read() == CAS_DIR_REQ) 8533 r_alloc_dir_fsm = ALLOC_DIR_CAS; 8534 8535 else 8536 m_cpt_dir_unused++; 8537 } 8538 else 8539 m_cpt_cleanup_fsm_dir_used++; 8540 break; 8541 8542 //////////////////////// 8543 case ALLOC_DIR_XRAM_RSP: // allocated to XRAM_RSP FSM 8544 if( (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) and 8545 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) and 8546 (r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK)) 8547 { 8548 if(r_config_fsm.read() == CONFIG_DIR_REQ) 8549 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 8550 8551 else if(r_read_fsm.read() == READ_DIR_REQ) 8552 r_alloc_dir_fsm = ALLOC_DIR_READ; 8553 8554 else if(r_write_fsm.read() == WRITE_DIR_REQ) 8555 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 8556 8557 else if(r_cas_fsm.read() == CAS_DIR_REQ) 8558 r_alloc_dir_fsm = ALLOC_DIR_CAS; 8559 8560 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 8561 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 8562 8563 else 8564 m_cpt_dir_unused++; 8565 } 8566 else 8567 m_cpt_xram_rsp_fsm_dir_used++; 8568 break; 8569 8570 } // end switch alloc_dir_fsm 8571 8572 //////////////////////////////////////////////////////////////////////////////////// 8573 // ALLOC_TRT FSM 8574 //////////////////////////////////////////////////////////////////////////////////// 8575 // The ALLOC_TRT fsm allocates the access to the Transaction Table (write buffer) 8576 // with a round robin priority between 4 user FSMs : 8577 // The cyclic priority is READ > WRITE > CAS > XRAM_RSP 8578 // The ressource is always allocated. 8579 /////////////////////////////////////////////////////////////////////////////////// 8580 8581 switch(r_alloc_trt_fsm.read()) 8582 { 8583 //////////////////// 8584 case ALLOC_TRT_READ: 8585 if(r_read_fsm.read() != READ_TRT_LOCK) 8586 { 8587 if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 8588 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 8589 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 8590 8591 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 8592 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 8593 r_alloc_trt_fsm = ALLOC_TRT_CAS; 8594 8595 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 8596 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 8597 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 8598 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 8599 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 8600 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 8601 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 8602 8603 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 8604 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 8605 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 8606 8607 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 8608 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 8609 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 8610 8611 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 8612 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 8613 8614 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 8615 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 8616 8617 else 8618 m_cpt_trt_unused++; 8619 } 8620 else 8621 m_cpt_read_fsm_trt_used++; 8622 break; 8623 8624 ///////////////////// 8625 case ALLOC_TRT_WRITE: 8626 if((r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 8627 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 8628 (r_write_fsm.read() != WRITE_BC_IVT_LOCK)) 8629 { 8630 if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 8631 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 8632 r_alloc_trt_fsm = ALLOC_TRT_CAS; 8633 8634 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 8635 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 8636 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 8637 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 8638 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 8639 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 8640 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 8641 8642 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 8643 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 8644 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 8645 8646 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 8647 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 8648 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 8649 8650 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 8651 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 8652 8653 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 8654 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 8655 8656 else if(r_read_fsm.read() == READ_TRT_LOCK) 8657 r_alloc_trt_fsm = ALLOC_TRT_READ; 8658 8659 else 8660 m_cpt_trt_unused++; 8661 } 8662 else 8663 m_cpt_write_fsm_trt_used++; 8664 break; 8665 8666 //////////////////// 8667 case ALLOC_TRT_CAS: 8668 if((r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 8669 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 8670 (r_cas_fsm.read() != CAS_BC_IVT_LOCK)) 8671 { 8672 if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 8673 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 8674 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 8675 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 8676 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 8677 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 8678 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 8679 8680 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 8681 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 8682 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 8683 8684 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 8685 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 8686 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 8687 8688 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 8689 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 8690 8691 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 8692 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 8693 8694 else if(r_read_fsm.read() == READ_TRT_LOCK) 8695 r_alloc_trt_fsm = ALLOC_TRT_READ; 8696 8697 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 8698 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 8699 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 8700 8701 else 8702 m_cpt_trt_unused++; 8703 } 8704 else 8705 m_cpt_cas_fsm_trt_used++; 8706 break; 8707 /////////////////////// 8708 case ALLOC_TRT_IXR_CMD: 8709 if((r_ixr_cmd_fsm.read() != IXR_CMD_READ_TRT) and 8710 (r_ixr_cmd_fsm.read() != IXR_CMD_WRITE_TRT) and 8711 (r_ixr_cmd_fsm.read() != IXR_CMD_CAS_TRT) and 8712 (r_ixr_cmd_fsm.read() != IXR_CMD_XRAM_TRT) and 8713 (r_ixr_cmd_fsm.read() != IXR_CMD_CLEANUP_TRT) and 8714 (r_ixr_cmd_fsm.read() != IXR_CMD_CONFIG_TRT)) 8715 { 8716 if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 8717 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 8718 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 8719 8720 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 8721 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 8722 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 8723 8724 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 8725 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 8726 8727 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 8728 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 8729 8730 else if(r_read_fsm.read() == READ_TRT_LOCK) 8731 r_alloc_trt_fsm = ALLOC_TRT_READ; 8732 8733 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 8734 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 8735 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 8736 8737 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 8738 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 8739 r_alloc_trt_fsm = ALLOC_TRT_CAS; 8740 } 8741 break; 8742 8743 //////////////////////// 8744 case ALLOC_TRT_XRAM_RSP: 8745 if(((r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) or 8746 (r_alloc_dir_fsm.read() != ALLOC_DIR_XRAM_RSP)) and 8747 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) and 8748 (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_UPDT) and 8749 (r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK)) 8750 { 8751 if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 8752 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 8753 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 8754 8755 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 8756 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 8757 8758 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 8759 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 8760 8761 else if(r_read_fsm.read() == READ_TRT_LOCK) 8762 r_alloc_trt_fsm = ALLOC_TRT_READ; 8763 8764 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 8765 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 8766 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 8767 8768 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 8769 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 8770 r_alloc_trt_fsm = ALLOC_TRT_CAS; 8771 8772 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 8773 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 8774 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 8775 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 8776 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 8777 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 8778 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 8779 8780 else 8781 m_cpt_trt_unused++; 8782 } 8783 else 8784 m_cpt_xram_rsp_fsm_trt_used++; 8785 break; 8786 8787 //////////////////////// 8788 case ALLOC_TRT_IXR_RSP: 8789 if((r_ixr_rsp_fsm.read() != IXR_RSP_TRT_ERASE) and 8790 (r_ixr_rsp_fsm.read() != IXR_RSP_TRT_READ)) 8791 { 8792 if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 8793 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 8794 8795 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 8796 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 8797 8798 else if(r_read_fsm.read() == READ_TRT_LOCK) 8799 r_alloc_trt_fsm = ALLOC_TRT_READ; 8800 8801 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) || 8802 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 8803 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 8804 8805 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) || 8806 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 8807 r_alloc_trt_fsm = ALLOC_TRT_CAS; 8808 8809 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 8810 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 8811 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 8812 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 8813 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 8814 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 8815 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 8816 8817 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) && 8818 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 8819 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 8820 8821 else 8822 m_cpt_trt_unused++; 8823 } 8824 else 8825 m_cpt_ixr_fsm_trt_used++; 8826 break; 8827 8828 ////////////////////// 8829 case ALLOC_TRT_CONFIG: 8830 if((r_config_fsm.read() != CONFIG_TRT_LOCK) and 8831 (r_config_fsm.read() != CONFIG_TRT_SET)) 8832 { 8833 if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 8834 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 8835 8836 else if(r_read_fsm.read() == READ_TRT_LOCK) 8837 r_alloc_trt_fsm = ALLOC_TRT_READ; 8838 8839 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 8840 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 8841 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 8842 8843 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 8844 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 8845 r_alloc_trt_fsm = ALLOC_TRT_CAS; 8846 8847 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 8848 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 8849 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 8850 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 8851 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 8852 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 8853 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 8854 8855 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 8856 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 8857 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 8858 8859 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 8860 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 8861 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 8862 } 8863 break; 8864 8865 //////////////////////// 8866 case ALLOC_TRT_CLEANUP: 8867 /*ODCCP*///std::cout << "TRT ALLOCATED TO CLEANUP" << std::endl; 8868 if(r_cleanup_fsm.read() != CLEANUP_IXR_REQ) 8869 { 8870 if(r_read_fsm.read() == READ_TRT_LOCK) 8871 r_alloc_trt_fsm = ALLOC_TRT_READ; 8872 8873 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 8874 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 8875 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 8876 8877 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 8878 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 8879 r_alloc_trt_fsm = ALLOC_TRT_CAS; 8880 8881 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 8882 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 8883 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 8884 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 8885 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 8886 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 8887 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 8888 8889 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 8890 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 8891 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 8892 8893 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) || 8894 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 8895 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 8896 8897 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 8898 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 8899 } 8900 break; 8901 8902 8903 } // end switch alloc_trt_fsm 8904 8905 //////////////////////////////////////////////////////////////////////////////////// 8906 // ALLOC_HEAP FSM 8907 //////////////////////////////////////////////////////////////////////////////////// 8908 // The ALLOC_HEAP FSM allocates the access to the heap 8909 // with a round robin priority between 6 user FSMs : 8910 // The cyclic ordering is READ > WRITE > CAS > CLEANUP > XRAM_RSP > CONFIG 8911 // The ressource is always allocated. 8912 ///////////////////////////////////////////////////////////////////////////////////// 8913 8914 switch(r_alloc_heap_fsm.read()) 8915 { 8916 //////////////////// 8917 case ALLOC_HEAP_RESET: 8918 // Initializes the heap one ENTRY each cycle. 8919 8920 r_alloc_heap_reset_cpt.write(r_alloc_heap_reset_cpt.read() + 1); 8921 8922 if(r_alloc_heap_reset_cpt.read() == (m_heap_size-1)) 8923 { 8924 m_heap.init(); 8925 8926 r_alloc_heap_fsm = ALLOC_HEAP_READ; 8927 } 8928 break; 8929 8930 //////////////////// 8931 case ALLOC_HEAP_READ: 8932 if((r_read_fsm.read() != READ_HEAP_REQ) and 8933 (r_read_fsm.read() != READ_HEAP_LOCK) and 8934 (r_read_fsm.read() != READ_HEAP_ERASE)) 8935 { 8936 if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 8937 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 8938 8939 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 8940 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 8941 8942 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 8943 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 8944 8945 else if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 8946 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 8947 8948 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 8949 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 8950 8951 else 8952 m_cpt_heap_unused++; 8953 } 8954 else 8955 m_cpt_read_fsm_heap_used++; 8956 break; 8957 8958 ///////////////////// 8959 case ALLOC_HEAP_WRITE: 8960 if((r_write_fsm.read() != WRITE_UPT_HEAP_LOCK) and 8961 (r_write_fsm.read() != WRITE_UPT_REQ) and 8962 (r_write_fsm.read() != WRITE_UPT_NEXT)) 8963 { 8964 if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 8965 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 8966 8967 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 8968 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 8969 8970 else if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 8971 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 8972 8973 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 8974 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 8975 8976 else if(r_read_fsm.read() == READ_HEAP_REQ) 8977 r_alloc_heap_fsm = ALLOC_HEAP_READ; 8978 8979 else 8980 m_cpt_heap_unused++; 8981 } 8982 else 8983 m_cpt_write_fsm_heap_used++; 8984 break; 8985 8986 //////////////////// 8987 case ALLOC_HEAP_CAS: 8988 if((r_cas_fsm.read() != CAS_UPT_HEAP_LOCK) and 8989 (r_cas_fsm.read() != CAS_UPT_REQ) and 8990 (r_cas_fsm.read() != CAS_UPT_NEXT)) 8991 { 8992 if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 8993 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 8994 8995 else if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 8996 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 8997 8998 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 8999 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 9000 9001 else if(r_read_fsm.read() == READ_HEAP_REQ) 9002 r_alloc_heap_fsm = ALLOC_HEAP_READ; 9003 9004 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 9005 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 9006 9007 else 9008 m_cpt_heap_unused++; 9009 } 9010 else 9011 m_cpt_cas_fsm_heap_used++; 9012 break; 9013 9014 /////////////////////// 9015 case ALLOC_HEAP_CLEANUP: 9016 if((r_cleanup_fsm.read() != CLEANUP_HEAP_REQ) and 9017 (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK) and 9018 (r_cleanup_fsm.read() != CLEANUP_HEAP_SEARCH) and 9019 (r_cleanup_fsm.read() != CLEANUP_HEAP_CLEAN)) 9020 { 9021 if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 9022 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 9023 9024 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 9025 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 9026 9027 else if(r_read_fsm.read() == READ_HEAP_REQ) 9028 r_alloc_heap_fsm = ALLOC_HEAP_READ; 9029 9030 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 9031 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 9032 9033 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 9034 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 9035 9036 else 9037 m_cpt_heap_unused++; 9038 } 9039 else 9040 m_cpt_cleanup_fsm_heap_used++; 9041 break; 9042 9043 //////////////////////// 9044 case ALLOC_HEAP_XRAM_RSP: 9045 if((r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_REQ) and 9046 (r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_ERASE)) 9047 { 9048 if(r_config_fsm.read() == CONFIG_HEAP_REQ) 9049 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 9050 9051 else if(r_read_fsm.read() == READ_HEAP_REQ) 9052 r_alloc_heap_fsm = ALLOC_HEAP_READ; 9053 9054 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 9055 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 9056 9057 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 9058 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 9059 9060 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 9061 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 9062 9063 } 9064 break; 9065 9066 /////////////////////// 9067 case ALLOC_HEAP_CONFIG: 9068 if((r_config_fsm.read() != CONFIG_HEAP_REQ) and 9069 (r_config_fsm.read() != CONFIG_HEAP_SCAN)) 9070 { 9071 if(r_read_fsm.read() == READ_HEAP_REQ) 9072 r_alloc_heap_fsm = ALLOC_HEAP_READ; 9073 9074 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 9075 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 9076 9077 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 9078 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 9079 9080 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 9081 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 9082 9083 else if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 9084 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 9085 9086 else 9087 m_cpt_heap_unused++; 9088 } 9089 else 9090 m_cpt_xram_rsp_fsm_heap_used++; 9091 break; 9092 9093 } // end switch alloc_heap_fsm 9094 9095 ///////////////////////////////////////////////////////////////////// 9096 // TGT_CMD to READ FIFO 9097 ///////////////////////////////////////////////////////////////////// 9098 9099 m_cmd_read_addr_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 9100 p_vci_tgt.address.read() ); 9101 m_cmd_read_length_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 9102 p_vci_tgt.plen.read()>>2 ); 9103 m_cmd_read_srcid_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 9104 p_vci_tgt.srcid.read() ); 9105 m_cmd_read_trdid_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 9106 p_vci_tgt.trdid.read() ); 9107 m_cmd_read_pktid_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 9108 p_vci_tgt.pktid.read() ); 9109 9110 ///////////////////////////////////////////////////////////////////// 9111 // TGT_CMD to WRITE FIFO 9112 ///////////////////////////////////////////////////////////////////// 9113 9114 m_cmd_write_addr_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 9115 (addr_t)p_vci_tgt.address.read() ); 9116 m_cmd_write_eop_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 9117 p_vci_tgt.eop.read() ); 9118 m_cmd_write_srcid_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 9119 p_vci_tgt.srcid.read() ); 9120 m_cmd_write_trdid_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 9121 p_vci_tgt.trdid.read() ); 9122 m_cmd_write_pktid_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 9123 p_vci_tgt.pktid.read() ); 9124 m_cmd_write_data_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 9125 p_vci_tgt.wdata.read() ); 9126 m_cmd_write_be_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 9127 p_vci_tgt.be.read() ); 9128 9129 //////////////////////////////////////////////////////////////////////////////////// 9130 // TGT_CMD to CAS FIFO 9131 //////////////////////////////////////////////////////////////////////////////////// 9132 9133 m_cmd_cas_addr_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 9134 (addr_t)p_vci_tgt.address.read() ); 9135 m_cmd_cas_eop_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 9136 p_vci_tgt.eop.read() ); 9137 m_cmd_cas_srcid_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 9138 p_vci_tgt.srcid.read() ); 9139 m_cmd_cas_trdid_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 9140 p_vci_tgt.trdid.read() ); 9141 m_cmd_cas_pktid_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 9142 p_vci_tgt.pktid.read() ); 9143 m_cmd_cas_wdata_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 9144 p_vci_tgt.wdata.read() ); 9145 9146 //////////////////////////////////////////////////////////////////////////////////// 9147 // CC_RECEIVE to CLEANUP FIFO 9148 //////////////////////////////////////////////////////////////////////////////////// 9149 9150 if(cc_receive_to_cleanup_fifo_put) 9151 { 9152 if(cc_receive_to_cleanup_fifo_get) 9153 { 9154 m_cc_receive_to_cleanup_fifo.put_and_get( ((uint64_t)(p_dspin_p2m.eop.read()&0x1) << 32) | p_dspin_p2m.data.read() ); 9155 } 9156 else 9157 { 9158 m_cc_receive_to_cleanup_fifo.simple_put( ((uint64_t)(p_dspin_p2m.eop.read()&0x1) << 32) | p_dspin_p2m.data.read() ); 9159 } 9160 } 9161 else 9162 { 9163 if(cc_receive_to_cleanup_fifo_get) 9164 { 9165 m_cc_receive_to_cleanup_fifo.simple_get(); 9166 } 9167 } 9168 //m_cc_receive_to_cleanup_fifo.update( cc_receive_to_cleanup_fifo_get, 9169 // cc_receive_to_cleanup_fifo_put, 9170 // p_dspin_p2m.data.read() ); 9171 9172 //////////////////////////////////////////////////////////////////////////////////// 9173 // CC_RECEIVE to MULTI_ACK FIFO 9174 //////////////////////////////////////////////////////////////////////////////////// 9175 9176 m_cc_receive_to_multi_ack_fifo.update( cc_receive_to_multi_ack_fifo_get, 9177 cc_receive_to_multi_ack_fifo_put, 9178 p_dspin_p2m.data.read() ); 9179 9180 //////////////////////////////////////////////////////////////////////////////////// 9181 // WRITE to CC_SEND FIFO 9182 //////////////////////////////////////////////////////////////////////////////////// 9183 9184 m_write_to_cc_send_inst_fifo.update( write_to_cc_send_fifo_get, write_to_cc_send_fifo_put, 9185 write_to_cc_send_fifo_inst ); 9186 m_write_to_cc_send_srcid_fifo.update( write_to_cc_send_fifo_get, write_to_cc_send_fifo_put, 9187 write_to_cc_send_fifo_srcid ); 9188 9189 //////////////////////////////////////////////////////////////////////////////////// 9190 // CONFIG to CC_SEND FIFO 9191 //////////////////////////////////////////////////////////////////////////////////// 9192 9193 m_config_to_cc_send_inst_fifo.update( config_to_cc_send_fifo_get, config_to_cc_send_fifo_put, 9194 config_to_cc_send_fifo_inst ); 9195 m_config_to_cc_send_srcid_fifo.update( config_to_cc_send_fifo_get, config_to_cc_send_fifo_put, 9196 config_to_cc_send_fifo_srcid ); 9197 9198 //////////////////////////////////////////////////////////////////////////////////// 9199 // XRAM_RSP to CC_SEND FIFO 9200 //////////////////////////////////////////////////////////////////////////////////// 9201 9202 m_xram_rsp_to_cc_send_inst_fifo.update( xram_rsp_to_cc_send_fifo_get, xram_rsp_to_cc_send_fifo_put, 9203 xram_rsp_to_cc_send_fifo_inst ); 9204 m_xram_rsp_to_cc_send_srcid_fifo.update( xram_rsp_to_cc_send_fifo_get, xram_rsp_to_cc_send_fifo_put, 9205 xram_rsp_to_cc_send_fifo_srcid ); 9206 9207 //////////////////////////////////////////////////////////////////////////////////// 9208 // CAS to CC_SEND FIFO 9209 //////////////////////////////////////////////////////////////////////////////////// 9210 9211 m_cas_to_cc_send_inst_fifo.update( cas_to_cc_send_fifo_get, cas_to_cc_send_fifo_put, 9212 cas_to_cc_send_fifo_inst ); 9213 m_cas_to_cc_send_srcid_fifo.update( cas_to_cc_send_fifo_get, cas_to_cc_send_fifo_put, 9214 cas_to_cc_send_fifo_srcid ); 9215 m_cpt_cycles++; 9216 9217 } // end transition() 9218 9219 ///////////////////////////// 9220 tmpl(void)::genMoore() 9221 ///////////////////////////// 9222 { 9223 //////////////////////////////////////////////////////////// 9224 // Command signals on the p_vci_ixr port 9225 //////////////////////////////////////////////////////////// 9226 // DATA width is 8 bytes 9227 // The following values are not transmitted to XRAM 9228 // p_vci_ixr.be 9229 // p_vci_ixr.pktid 9230 // p_vci_ixr.cons 9231 // p_vci_ixr.wrap 9232 // p_vci_ixr.contig 9233 // p_vci_ixr.clen 9234 // p_vci_ixr.cfixed 9235 9236 p_vci_ixr.plen = 64; 9237 p_vci_ixr.srcid = m_srcid_x; 9238 p_vci_ixr.trdid = r_ixr_cmd_trdid.read(); 9239 p_vci_ixr.address = (addr_t)r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2); 9240 p_vci_ixr.be = 0xFF; 9241 p_vci_ixr.pktid = 0; 9242 p_vci_ixr.cons = false; 9243 p_vci_ixr.wrap = false; 9244 p_vci_ixr.contig = true; 9245 p_vci_ixr.clen = 0; 9246 p_vci_ixr.cfixed = false; 9247 9248 if ( (r_ixr_cmd_fsm.read() == IXR_CMD_READ_SEND) or 9249 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_SEND) or 9250 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_SEND) or 9251 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_SEND) or 9252 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_SEND) ) 9253 { 9254 p_vci_ixr.cmdval = true; 9255 9256 if ( r_ixr_cmd_get.read() ) // GET 9257 { 9258 p_vci_ixr.cmd = vci_param_ext::CMD_READ; 9259 p_vci_ixr.wdata = 0; 9260 p_vci_ixr.eop = true; 9261 } 9262 else // PUT 9263 { 9264 size_t word = r_ixr_cmd_word.read(); 9265 p_vci_ixr.cmd = vci_param_ext::CMD_WRITE; 9266 p_vci_ixr.wdata = ((wide_data_t)(r_ixr_cmd_wdata[word].read())) | 9267 ((wide_data_t)(r_ixr_cmd_wdata[word+1].read()) << 32); 9268 p_vci_ixr.eop = (word == (m_words-2)); 9269 } 9270 } 9271 else if (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_DATA_SEND) 9272 { 9273 p_vci_ixr.cmd = vci_param_ext::CMD_WRITE; 9274 p_vci_ixr.cmdval = true; 9275 /*p_vci_ixr.address = (addr_t)((r_cleanup_to_ixr_cmd_nline.read() * m_words + 9276 r_ixr_cmd_word.read()) * 4);*/ 9277 p_vci_ixr.address = (addr_t)r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2); 9278 p_vci_ixr.wdata = ((wide_data_t)(r_ixr_cmd_wdata[r_ixr_cmd_word.read()].read()) | 9279 ((wide_data_t)(r_ixr_cmd_wdata[r_ixr_cmd_word.read() + 1].read()) << 32)); 9280 p_vci_ixr.trdid = r_cleanup_to_ixr_cmd_index.read(); 9281 p_vci_ixr.eop = (r_ixr_cmd_word == (m_words - 2)); 9282 } 9283 9284 else 9285 { 9286 p_vci_ixr.cmdval = false; 9287 } 9288 9289 //////////////////////////////////////////////////// 9290 // Response signals on the p_vci_ixr port 9291 //////////////////////////////////////////////////// 9292 9293 if( (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ) or 9294 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) ) 9295 { 9296 p_vci_ixr.rspack = (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP); 9297 } 9298 else if (r_ixr_rsp_fsm.read() == IXR_RSP_ACK) 9299 { 9300 p_vci_ixr.rspack = true; 9301 } 9302 else // r_ixr_rsp_fsm == IXR_RSP_IDLE 9303 { 9304 p_vci_ixr.rspack = false; 9305 } 9306 9307 //////////////////////////////////////////////////// 9308 // Command signals on the p_vci_tgt port 9309 //////////////////////////////////////////////////// 9310 9311 switch((tgt_cmd_fsm_state_e) r_tgt_cmd_fsm.read()) 9312 { 9313 case TGT_CMD_IDLE: 9314 p_vci_tgt.cmdack = false; 9315 break; 9316 9317 case TGT_CMD_CONFIG: 9318 case TGT_CMD_ERROR: 9319 p_vci_tgt.cmdack = not r_tgt_cmd_to_tgt_rsp_req.read(); 9320 break; 9321 9322 case TGT_CMD_READ: 9323 p_vci_tgt.cmdack = m_cmd_read_addr_fifo.wok(); 9324 break; 9325 9326 case TGT_CMD_WRITE: 9327 p_vci_tgt.cmdack = m_cmd_write_addr_fifo.wok(); 9328 break; 9329 9330 case TGT_CMD_CAS: 9331 p_vci_tgt.cmdack = m_cmd_cas_addr_fifo.wok(); 9332 break; 9333 } 9334 9335 //////////////////////////////////////////////////// 9336 // Response signals on the p_vci_tgt port 9337 //////////////////////////////////////////////////// 9338 9339 switch(r_tgt_rsp_fsm.read()) 9340 { 9341 case TGT_RSP_CONFIG_IDLE: 9342 case TGT_RSP_TGT_CMD_IDLE: 9343 case TGT_RSP_READ_IDLE: 9344 case TGT_RSP_WRITE_IDLE: 9345 case TGT_RSP_CAS_IDLE: 9346 case TGT_RSP_XRAM_IDLE: 9347 case TGT_RSP_MULTI_ACK_IDLE: 9348 case TGT_RSP_CLEANUP_IDLE: 9349 { 9350 p_vci_tgt.rspval = false; 9351 p_vci_tgt.rsrcid = 0; 9352 p_vci_tgt.rdata = 0; 9353 p_vci_tgt.rpktid = 0; 9354 p_vci_tgt.rtrdid = 0; 9355 p_vci_tgt.rerror = 0; 9356 p_vci_tgt.reop = false; 9357 break; 9358 } 9359 case TGT_RSP_CONFIG: 9360 { 9361 p_vci_tgt.rspval = true; 9362 p_vci_tgt.rdata = 0; 9363 p_vci_tgt.rsrcid = r_config_to_tgt_rsp_srcid.read(); 9364 p_vci_tgt.rtrdid = r_config_to_tgt_rsp_trdid.read(); 9365 p_vci_tgt.rpktid = r_config_to_tgt_rsp_pktid.read(); 9366 p_vci_tgt.rerror = r_config_to_tgt_rsp_error.read(); 9367 p_vci_tgt.reop = true; 9368 9369 break; 9370 } 9371 9372 case TGT_RSP_TGT_CMD: 9373 { 9374 p_vci_tgt.rspval = true; 9375 p_vci_tgt.rdata = r_tgt_cmd_to_tgt_rsp_rdata.read(); 9376 p_vci_tgt.rsrcid = r_tgt_cmd_to_tgt_rsp_srcid.read(); 9377 p_vci_tgt.rtrdid = r_tgt_cmd_to_tgt_rsp_trdid.read(); 9378 p_vci_tgt.rpktid = r_tgt_cmd_to_tgt_rsp_pktid.read(); 9379 p_vci_tgt.rerror = r_tgt_cmd_to_tgt_rsp_error.read(); 9380 p_vci_tgt.reop = true; 9381 9382 break; 9383 } 9384 9385 case TGT_RSP_READ: 9386 { 9387 uint32_t last_word_idx = r_read_to_tgt_rsp_word.read() + r_read_to_tgt_rsp_length - 1; 9388 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 9389 bool is_ll = ((r_read_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 9390 9391 p_vci_tgt.rspval = true; 9392 9393 if ( is_ll and not r_tgt_rsp_key_sent.read() ) 9394 { 9395 // LL response first flit 9396 p_vci_tgt.rdata = r_read_to_tgt_rsp_ll_key.read(); 9397 } 9398 else 9399 { 9400 // LL response second flit or READ response 9401 p_vci_tgt.rdata = r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 9402 } 9403 9404 p_vci_tgt.rsrcid = r_read_to_tgt_rsp_srcid.read(); 9405 p_vci_tgt.rtrdid = r_read_to_tgt_rsp_trdid.read(); 9406 p_vci_tgt.rpktid = r_read_to_tgt_rsp_pktid.read(); 9407 p_vci_tgt.rerror = 0; 9408 p_vci_tgt.reop = (is_last_word and not is_ll) or (r_tgt_rsp_key_sent.read() and is_ll); 9409 break; 9410 } 9411 9412 case TGT_RSP_WRITE: 9413 p_vci_tgt.rspval = true; 9414 if(((r_write_to_tgt_rsp_pktid.read() & 0x7) == TYPE_SC) and r_write_to_tgt_rsp_sc_fail.read()) 9415 p_vci_tgt.rdata = 1; 9416 else 9417 p_vci_tgt.rdata = 0; 9418 p_vci_tgt.rsrcid = r_write_to_tgt_rsp_srcid.read(); 9419 p_vci_tgt.rtrdid = r_write_to_tgt_rsp_trdid.read(); 9420 p_vci_tgt.rpktid = r_write_to_tgt_rsp_pktid.read(); 9421 p_vci_tgt.rerror = 0; 9422 p_vci_tgt.reop = true; 9423 break; 9424 9425 case TGT_RSP_CLEANUP: 9426 { 9427 uint32_t last_word_idx = r_cleanup_to_tgt_rsp_first_word.read() + r_cleanup_to_tgt_rsp_length - 1; 9428 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 9429 bool is_ll = ((r_cleanup_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 9430 9431 p_vci_tgt.rspval = true; 9432 if (is_ll and not r_tgt_rsp_key_sent.read()) 9433 { 9434 p_vci_tgt.rdata = r_cleanup_to_tgt_rsp_ll_key.read(); 9435 } 9436 else if (!r_cleanup_to_tgt_rsp_type.read()) 9437 { 9438 p_vci_tgt.rdata = r_cleanup_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 9439 } 9440 else //if the CLEANUP fsm sends a SC_RSP, then it is a success (and it caused an inval) 9441 { 9442 p_vci_tgt.rdata = 0; 9443 } 9444 p_vci_tgt.rsrcid = r_cleanup_to_tgt_rsp_srcid.read(); 9445 p_vci_tgt.rtrdid = r_cleanup_to_tgt_rsp_trdid.read(); 9446 p_vci_tgt.rpktid = r_cleanup_to_tgt_rsp_pktid.read(); 9447 p_vci_tgt.rerror = 0; // Can be a CAS rsp 9448 p_vci_tgt.reop = r_cleanup_to_tgt_rsp_type.read() or (is_last_word and not is_ll) or (r_tgt_rsp_key_sent.read() and is_ll); 9449 break; 9450 } 9451 9452 case TGT_RSP_CAS: 9453 p_vci_tgt.rspval = true; 9454 p_vci_tgt.rdata = r_cas_to_tgt_rsp_data.read(); 9455 p_vci_tgt.rsrcid = r_cas_to_tgt_rsp_srcid.read(); 9456 p_vci_tgt.rtrdid = r_cas_to_tgt_rsp_trdid.read(); 9457 p_vci_tgt.rpktid = r_cas_to_tgt_rsp_pktid.read(); 9458 p_vci_tgt.rerror = 0; 9459 p_vci_tgt.reop = true; 9460 break; 9461 9462 case TGT_RSP_XRAM: 9463 { 9464 uint32_t last_word_idx = r_xram_rsp_to_tgt_rsp_word.read() + r_xram_rsp_to_tgt_rsp_length.read() - 1; 9465 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 9466 bool is_ll = ((r_xram_rsp_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 9467 bool is_error = r_xram_rsp_to_tgt_rsp_rerror.read(); 9468 9469 p_vci_tgt.rspval = true; 9470 9471 if( is_ll and not r_tgt_rsp_key_sent.read() ) 9472 { 9473 // LL response first flit 9474 p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_ll_key.read(); 9475 } 9476 else 9477 { 9478 // LL response second flit or READ response 9479 p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 9480 } 9481 9482 p_vci_tgt.rsrcid = r_xram_rsp_to_tgt_rsp_srcid.read(); 9483 p_vci_tgt.rtrdid = r_xram_rsp_to_tgt_rsp_trdid.read(); 9484 p_vci_tgt.rpktid = r_xram_rsp_to_tgt_rsp_pktid.read(); 9485 p_vci_tgt.rerror = is_error; 9486 p_vci_tgt.reop = (((is_last_word or is_error) and not is_ll) or 9487 (r_tgt_rsp_key_sent.read() and is_ll)); 9488 9489 break; 9490 } 9491 9492 case TGT_RSP_MULTI_ACK: 9493 p_vci_tgt.rspval = true; 9494 p_vci_tgt.rdata = 0; // Can be a CAS or SC rsp 9495 p_vci_tgt.rsrcid = r_multi_ack_to_tgt_rsp_srcid.read(); 9496 p_vci_tgt.rtrdid = r_multi_ack_to_tgt_rsp_trdid.read(); 9497 p_vci_tgt.rpktid = r_multi_ack_to_tgt_rsp_pktid.read(); 9498 p_vci_tgt.rerror = 0; 9499 p_vci_tgt.reop = true; 9500 break; 9501 } // end switch r_tgt_rsp_fsm 9502 9503 //////////////////////////////////////////////////////////////////// 9504 // p_dspin_m2p port (CC_SEND FSM) 9505 //////////////////////////////////////////////////////////////////// 9506 9507 p_dspin_m2p.write = false; 9508 p_dspin_m2p.eop = false; 9509 p_dspin_m2p.data = 0; 9510 9511 switch(r_cc_send_fsm.read()) 9512 { 9513 /////////////////////////// 9514 case CC_SEND_CONFIG_IDLE: 9515 case CC_SEND_XRAM_RSP_IDLE: 9516 case CC_SEND_WRITE_IDLE: 9517 case CC_SEND_CAS_IDLE: 9518 { 9519 break; 9520 } 9521 //////////////////////////////// 9522 case CC_SEND_CONFIG_INVAL_HEADER: 9523 { 9524 uint8_t multi_inval_type; 9525 if(m_config_to_cc_send_inst_fifo.read()) 9526 { 9527 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_INST; 9528 } 9529 else 9530 { 9531 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_DATA; 9532 } 9533 9534 uint64_t flit = 0; 9535 uint64_t dest = m_config_to_cc_send_srcid_fifo.read() << 9536 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 9537 9538 DspinDhccpParam::dspin_set( flit, 9539 dest, 9540 DspinDhccpParam::MULTI_INVAL_DEST); 9541 9542 DspinDhccpParam::dspin_set( flit, 9543 m_cc_global_id, 9544 DspinDhccpParam::MULTI_INVAL_SRCID); 9545 9546 DspinDhccpParam::dspin_set( flit, 9547 r_config_to_cc_send_trdid.read(), 9548 DspinDhccpParam::MULTI_INVAL_UPDT_INDEX); 9549 9550 DspinDhccpParam::dspin_set( flit, 9551 multi_inval_type, 9552 DspinDhccpParam::M2P_TYPE); 9553 p_dspin_m2p.write = true; 9554 p_dspin_m2p.data = flit; 9555 break; 9556 } 9557 //////////////////////////////// 9558 case CC_SEND_CONFIG_INVAL_NLINE: 9559 { 9560 uint64_t flit = 0; 9561 DspinDhccpParam::dspin_set( flit, 9562 r_config_to_cc_send_nline.read(), 9563 DspinDhccpParam::MULTI_INVAL_NLINE); 9564 p_dspin_m2p.eop = true; 9565 p_dspin_m2p.write = true; 9566 p_dspin_m2p.data = flit; 9567 break; 9568 } 9569 /////////////////////////////////// 9570 case CC_SEND_XRAM_RSP_INVAL_HEADER: 9571 { 9572 if(not m_xram_rsp_to_cc_send_inst_fifo.rok()) break; 9573 9574 uint8_t multi_inval_type; 9575 if(m_xram_rsp_to_cc_send_inst_fifo.read()) 9576 { 9577 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_INST; 9578 } 9579 else 9580 { 9581 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_DATA; 9582 } 9583 9584 uint64_t flit = 0; 9585 uint64_t dest = m_xram_rsp_to_cc_send_srcid_fifo.read() << 9586 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 9587 9588 DspinDhccpParam::dspin_set( flit, 9589 dest, 9590 DspinDhccpParam::MULTI_INVAL_DEST); 9591 9592 DspinDhccpParam::dspin_set( flit, 9593 m_cc_global_id, 9594 DspinDhccpParam::MULTI_INVAL_SRCID); 9595 9596 DspinDhccpParam::dspin_set( flit, 9597 r_xram_rsp_to_cc_send_trdid.read(), 9598 DspinDhccpParam::MULTI_INVAL_UPDT_INDEX); 9599 9600 DspinDhccpParam::dspin_set( flit, 9601 multi_inval_type, 9602 DspinDhccpParam::M2P_TYPE); 9603 p_dspin_m2p.write = true; 9604 p_dspin_m2p.data = flit; 9605 break; 9606 } 9607 9608 ////////////////////////////////// 9609 case CC_SEND_XRAM_RSP_INVAL_NLINE: 9610 { 9611 uint64_t flit = 0; 9612 9613 DspinDhccpParam::dspin_set( flit, 9614 r_xram_rsp_to_cc_send_nline.read(), 9615 DspinDhccpParam::MULTI_INVAL_NLINE); 9616 p_dspin_m2p.eop = true; 9617 p_dspin_m2p.write = true; 9618 p_dspin_m2p.data = flit; 9619 break; 9620 } 9621 9622 ///////////////////////////////////// 9623 case CC_SEND_CONFIG_BRDCAST_HEADER: 9624 case CC_SEND_XRAM_RSP_BRDCAST_HEADER: 9625 case CC_SEND_WRITE_BRDCAST_HEADER: 9626 case CC_SEND_CAS_BRDCAST_HEADER: 9627 { 9628 uint64_t flit = 0; 9629 9630 DspinDhccpParam::dspin_set( flit, 9631 m_broadcast_boundaries, 9632 DspinDhccpParam::BROADCAST_BOX); 9633 9634 DspinDhccpParam::dspin_set( flit, 9635 m_cc_global_id, 9636 DspinDhccpParam::BROADCAST_SRCID); 9637 9638 DspinDhccpParam::dspin_set( flit, 9639 1ULL, 9640 DspinDhccpParam::M2P_BC); 9641 p_dspin_m2p.write = true; 9642 p_dspin_m2p.data = flit; 9643 break; 9644 } 9645 //////////////////////////////////// 9646 case CC_SEND_XRAM_RSP_BRDCAST_NLINE: 9647 { 9648 uint64_t flit = 0; 9649 DspinDhccpParam::dspin_set( flit, 9650 r_xram_rsp_to_cc_send_nline.read(), 9651 DspinDhccpParam::BROADCAST_NLINE); 9652 p_dspin_m2p.write = true; 9653 p_dspin_m2p.eop = true; 9654 p_dspin_m2p.data = flit; 9655 break; 9656 } 9657 ////////////////////////////////// 9658 case CC_SEND_CONFIG_BRDCAST_NLINE: 9659 { 9660 uint64_t flit = 0; 9661 DspinDhccpParam::dspin_set( flit, 9662 r_config_to_cc_send_nline.read(), 9663 DspinDhccpParam::BROADCAST_NLINE); 9664 p_dspin_m2p.write = true; 9665 p_dspin_m2p.eop = true; 9666 p_dspin_m2p.data = flit; 9667 break; 9668 } 9669 ///////////////////////////////// 9670 9671 case CC_SEND_READ_NCC_INVAL_HEADER: 9672 { 9673 uint64_t flit = 0; 9674 9675 uint8_t multi_inval_type; 9676 if (r_read_to_cc_send_inst.read()) 9677 { 9678 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_INST; 9679 } 9680 else 9681 { 9682 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_DATA; 9683 } 9684 9685 DspinDhccpParam::dspin_set( 9686 flit, 9687 r_read_to_cc_send_dest.read(), 9688 DspinDhccpParam::MULTI_INVAL_DEST); 9689 9690 DspinDhccpParam::dspin_set( 9691 flit, 9692 m_cc_global_id, 9693 DspinDhccpParam::MULTI_INVAL_SRCID); 9694 9695 DspinDhccpParam::dspin_set( 9696 flit, 9697 DspinDhccpParam::TYPE_MULTI_INVAL_DATA, 9698 DspinDhccpParam::M2P_TYPE); 9699 9700 p_dspin_m2p.write = true; 9701 p_dspin_m2p.data = flit; 9702 9703 break; 9704 9705 } 9706 9707 9708 case CC_SEND_READ_NCC_INVAL_NLINE: 9709 { 9710 uint64_t flit = 0; 9711 9712 DspinDhccpParam::dspin_set( 9713 flit, 9714 r_read_to_cc_send_nline.read(), 9715 DspinDhccpParam::MULTI_INVAL_NLINE); 9716 9717 9718 p_dspin_m2p.write = true; 9719 p_dspin_m2p.data = flit; 9720 p_dspin_m2p.eop = true; 9721 9722 break; 9723 9724 } 9725 9726 case CC_SEND_WRITE_NCC_INVAL_HEADER: 9727 { 9728 uint64_t flit = 0; 9729 9730 DspinDhccpParam::dspin_set( 9731 flit, 9732 r_write_to_cc_send_dest.read(), 9733 DspinDhccpParam::MULTI_INVAL_DEST); 9734 9735 DspinDhccpParam::dspin_set( 9736 flit, 9737 m_cc_global_id, 9738 DspinDhccpParam::MULTI_INVAL_SRCID); 9739 9740 DspinDhccpParam::dspin_set( 9741 flit, 9742 DspinDhccpParam::TYPE_MULTI_INVAL_DATA, 9743 DspinDhccpParam::M2P_TYPE); 9744 9745 p_dspin_m2p.write = true; 9746 p_dspin_m2p.data = flit; 9747 9748 break; 9749 9750 } 9751 9752 case CC_SEND_WRITE_NCC_INVAL_NLINE: 9753 { 9754 uint64_t flit = 0; 9755 9756 DspinDhccpParam::dspin_set( 9757 flit, 9758 r_write_to_cc_send_nline.read(), 9759 DspinDhccpParam::MULTI_INVAL_NLINE); 9760 9761 9762 p_dspin_m2p.write = true; 9763 p_dspin_m2p.data = flit; 9764 p_dspin_m2p.eop = true; 9765 9766 break; 9767 9768 } 9769 9770 9771 case CC_SEND_WRITE_BRDCAST_NLINE: 9772 { 9773 uint64_t flit = 0; 9774 DspinDhccpParam::dspin_set( flit, 9775 r_write_to_cc_send_nline.read(), 9776 DspinDhccpParam::BROADCAST_NLINE); 9777 p_dspin_m2p.write = true; 9778 p_dspin_m2p.eop = true; 9779 p_dspin_m2p.data = flit; 9780 break; 9781 } 9782 /////////////////////////////// 9783 case CC_SEND_CAS_BRDCAST_NLINE: 9784 { 9785 uint64_t flit = 0; 9786 DspinDhccpParam::dspin_set( flit, 9787 r_cas_to_cc_send_nline.read(), 9788 DspinDhccpParam::BROADCAST_NLINE); 9789 p_dspin_m2p.write = true; 9790 p_dspin_m2p.eop = true; 9791 p_dspin_m2p.data = flit; 9792 break; 9793 } 9794 /////////////////////////////// 9795 case CC_SEND_WRITE_UPDT_HEADER: 9796 { 9797 if(not m_write_to_cc_send_inst_fifo.rok()) break; 9798 9799 uint8_t multi_updt_type; 9800 if(m_write_to_cc_send_inst_fifo.read()) 9801 { 9802 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_INST; 9803 } 9804 else 9805 { 9806 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_DATA; 9807 } 9808 9809 uint64_t flit = 0; 9810 uint64_t dest = 9811 m_write_to_cc_send_srcid_fifo.read() << 9812 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 9813 9814 DspinDhccpParam::dspin_set( 9815 flit, 9816 dest, 9817 DspinDhccpParam::MULTI_UPDT_DEST); 9818 9819 DspinDhccpParam::dspin_set( 9820 flit, 9821 m_cc_global_id, 9822 DspinDhccpParam::MULTI_UPDT_SRCID); 9823 9824 DspinDhccpParam::dspin_set( 9825 flit, 9826 r_write_to_cc_send_trdid.read(), 9827 DspinDhccpParam::MULTI_UPDT_UPDT_INDEX); 9828 9829 DspinDhccpParam::dspin_set( 9830 flit, 9831 multi_updt_type, 9832 DspinDhccpParam::M2P_TYPE); 9833 9834 p_dspin_m2p.write = true; 9835 p_dspin_m2p.data = flit; 9836 9837 break; 9838 } 9839 ////////////////////////////// 9840 case CC_SEND_WRITE_UPDT_NLINE: 9841 { 9842 uint64_t flit = 0; 9843 9844 DspinDhccpParam::dspin_set( 9845 flit, 9846 r_write_to_cc_send_index.read(), 9847 DspinDhccpParam::MULTI_UPDT_WORD_INDEX); 9848 9849 DspinDhccpParam::dspin_set( 9850 flit, 9851 r_write_to_cc_send_nline.read(), 9852 DspinDhccpParam::MULTI_UPDT_NLINE); 9853 9854 p_dspin_m2p.write = true; 9855 p_dspin_m2p.data = flit; 9856 9857 break; 9858 } 9859 ///////////////////////////// 9860 case CC_SEND_WRITE_UPDT_DATA: 9861 { 9862 9863 uint8_t multi_updt_cpt = 9864 r_cc_send_cpt.read() + r_write_to_cc_send_index.read(); 9865 9866 uint8_t multi_updt_be = r_write_to_cc_send_be[multi_updt_cpt].read(); 9867 uint32_t multi_updt_data = r_write_to_cc_send_data[multi_updt_cpt].read(); 9868 9869 uint64_t flit = 0; 9870 9871 DspinDhccpParam::dspin_set( 9872 flit, 9873 multi_updt_be, 9874 DspinDhccpParam::MULTI_UPDT_BE); 9875 9876 DspinDhccpParam::dspin_set( 9877 flit, 9878 multi_updt_data, 9879 DspinDhccpParam::MULTI_UPDT_DATA); 9880 9881 p_dspin_m2p.write = true; 9882 p_dspin_m2p.eop = (r_cc_send_cpt.read() == r_write_to_cc_send_count.read()); 9883 p_dspin_m2p.data = flit; 9884 9885 break; 9886 } 9887 //////////////////////////// 9888 case CC_SEND_CAS_UPDT_HEADER: 9889 { 9890 if (not m_cas_to_cc_send_inst_fifo.rok()) break; 9891 9892 uint8_t multi_updt_type; 9893 if(m_cas_to_cc_send_inst_fifo.read()) 9894 { 9895 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_INST; 9896 } 9897 else 9898 { 9899 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_DATA; 9900 } 9901 9902 uint64_t flit = 0; 9903 uint64_t dest = 9904 m_cas_to_cc_send_srcid_fifo.read() << 9905 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 9906 9907 DspinDhccpParam::dspin_set( 9908 flit, 9909 dest, 9910 DspinDhccpParam::MULTI_UPDT_DEST); 9911 9912 DspinDhccpParam::dspin_set( 9913 flit, 9914 m_cc_global_id, 9915 DspinDhccpParam::MULTI_UPDT_SRCID); 9916 9917 DspinDhccpParam::dspin_set( 9918 flit, 9919 r_cas_to_cc_send_trdid.read(), 9920 DspinDhccpParam::MULTI_UPDT_UPDT_INDEX); 9921 9922 DspinDhccpParam::dspin_set( 9923 flit, 9924 multi_updt_type, 9925 DspinDhccpParam::M2P_TYPE); 9926 9927 p_dspin_m2p.write = true; 9928 p_dspin_m2p.data = flit; 9929 9930 break; 9931 } 9932 //////////////////////////// 9933 case CC_SEND_CAS_UPDT_NLINE: 9934 { 9935 uint64_t flit = 0; 9936 9937 DspinDhccpParam::dspin_set( 9938 flit, 9939 r_cas_to_cc_send_index.read(), 9940 DspinDhccpParam::MULTI_UPDT_WORD_INDEX); 9941 9942 DspinDhccpParam::dspin_set( 9943 flit, 9944 r_cas_to_cc_send_nline.read(), 9945 DspinDhccpParam::MULTI_UPDT_NLINE); 9946 9947 p_dspin_m2p.write = true; 9948 p_dspin_m2p.data = flit; 9949 9950 break; 9951 } 9952 /////////////////////////// 9953 case CC_SEND_CAS_UPDT_DATA: 9954 { 9955 uint64_t flit = 0; 9956 9957 DspinDhccpParam::dspin_set( 9958 flit, 9959 0xF, 9960 DspinDhccpParam::MULTI_UPDT_BE); 9961 9962 DspinDhccpParam::dspin_set( 9963 flit, 9964 r_cas_to_cc_send_wdata.read(), 9965 DspinDhccpParam::MULTI_UPDT_DATA); 9966 9967 p_dspin_m2p.write = true; 9968 p_dspin_m2p.eop = not r_cas_to_cc_send_is_long.read(); 9969 p_dspin_m2p.data = flit; 9970 9971 break; 9972 } 9973 //////////////////////////////// 9974 case CC_SEND_CAS_UPDT_DATA_HIGH: 9975 { 9976 uint64_t flit = 0; 9977 9978 DspinDhccpParam::dspin_set( 9979 flit, 9980 0xF, 9981 DspinDhccpParam::MULTI_UPDT_BE); 9982 9983 DspinDhccpParam::dspin_set( 9984 flit, 9985 r_cas_to_cc_send_wdata_high.read(), 9986 DspinDhccpParam::MULTI_UPDT_DATA); 9987 9988 p_dspin_m2p.write = true; 9989 p_dspin_m2p.eop = true; 9990 p_dspin_m2p.data = flit; 9991 9992 break; 9993 } 9994 } 9995 9996 //////////////////////////////////////////////////////////////////// 9997 // p_dspin_clack port (CLEANUP FSM) 9998 //////////////////////////////////////////////////////////////////// 9999 10000 if ( r_cleanup_fsm.read() == CLEANUP_SEND_CLACK ) 10001 { 10002 uint8_t cleanup_ack_type; 10003 if(r_cleanup_inst.read()) 10004 { 10005 cleanup_ack_type = DspinDhccpParam::TYPE_CLACK_INST; 10006 } 10007 else 10008 { 10009 cleanup_ack_type = DspinDhccpParam::TYPE_CLACK_DATA; 10010 } 10011 10012 uint64_t flit = 0; 10013 uint64_t dest = r_cleanup_srcid.read() << 10014 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 10015 10016 DspinDhccpParam::dspin_set( 10017 flit, 10018 dest, 10019 DspinDhccpParam::CLACK_DEST); 10020 10021 DspinDhccpParam::dspin_set( 10022 flit, 10023 r_cleanup_nline.read() & 0xFFFF, 10024 DspinDhccpParam::CLACK_SET); 10025 10026 DspinDhccpParam::dspin_set( 10027 flit, 10028 r_cleanup_way_index.read(), 10029 DspinDhccpParam::CLACK_WAY); 10030 10031 DspinDhccpParam::dspin_set( 10032 flit, 10033 cleanup_ack_type, 10034 DspinDhccpParam::CLACK_TYPE); 10035 10036 p_dspin_clack.eop = true; 10037 p_dspin_clack.write = true; 10038 p_dspin_clack.data = flit; 10039 } 10040 else 10041 { 10042 p_dspin_clack.write = false; 10043 p_dspin_clack.eop = false; 10044 p_dspin_clack.data = 0; 10045 } 10046 /////////////////////////////////////////////////////////////////// 10047 // p_dspin_p2m port (CC_RECEIVE FSM) 10048 /////////////////////////////////////////////////////////////////// 10049 // 10050 switch(r_cc_receive_fsm.read()) 10051 { 10052 case CC_RECEIVE_IDLE: 10053 { 10054 p_dspin_p2m.read = false; 10055 break; 10056 } 10057 case CC_RECEIVE_CLEANUP: 10058 case CC_RECEIVE_CLEANUP_EOP: 10059 { 10060 p_dspin_p2m.read = m_cc_receive_to_cleanup_fifo.wok(); 10061 break; 10062 } 10063 case CC_RECEIVE_MULTI_ACK: 10064 { 10065 p_dspin_p2m.read = m_cc_receive_to_multi_ack_fifo.wok(); 10066 break; 10067 } 10068 } 10069 // end switch r_cc_send_fsm 10070 } // end genMoore() 10170 // end switch r_cc_send_fsm 10171 } // end genMoore() 10071 10172 10072 10173 } … … 10074 10175 10075 10176 // Local Variables: 10076 // tab-width: 210077 // c-basic-offset: 210177 // tab-width: 4 10178 // c-basic-offset: 4 10078 10179 // c-file-offsets:((innamespace . 0)(inline-open . 0)) 10079 10180 // indent-tabs-mode: nil 10080 10181 // End: 10081 10182 10082 // vim: filetype=cpp:expandtab:shiftwidth= 2:tabstop=2:softtabstop=210183 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=4:softtabstop=4 - Property svn:mergeinfo changed
Note: See TracChangeset
for help on using the changeset viewer.