- Timestamp:
- Aug 30, 2013, 6:28:10 PM (11 years ago)
- Location:
- trunk
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/modules/vci_mem_cache/caba/source/include/vci_mem_cache.h
r499 r504 404 404 405 405 // instrumentation counters 406 uint32_t m_cpt_cycles; // Counter of cycles 407 408 uint32_t m_cpt_read; // Number of READ transactions 409 uint32_t m_cpt_read_remote; // number of remote READ transactions 410 uint32_t m_cpt_read_flits; // number of flits for READs 411 uint32_t m_cpt_read_cost; // Number of (flits * distance) for READs 412 413 uint32_t m_cpt_read_miss; // Number of MISS READ 414 415 uint32_t m_cpt_write; // Number of WRITE transactions 416 uint32_t m_cpt_write_remote; // number of remote WRITE transactions 417 uint32_t m_cpt_write_flits; // number of flits for WRITEs 418 uint32_t m_cpt_write_cost; // Number of (flits * distance) for WRITEs 419 420 uint32_t m_cpt_write_miss; // Number of MISS WRITE 421 uint32_t m_cpt_write_cells; // Cumulated length for WRITE transactions 422 uint32_t m_cpt_write_dirty; // Cumulated length for WRITE transactions 423 uint32_t m_cpt_update; // Number of UPDATE transactions 424 uint32_t m_cpt_trt_rb; // Read blocked by a hit in trt 425 uint32_t m_cpt_trt_full; // Transaction blocked due to a full trt 426 uint32_t m_cpt_update_mult; // Number of targets for UPDATE 427 uint32_t m_cpt_inval; // Number of INVAL transactions 428 uint32_t m_cpt_inval_mult; // Number of targets for INVAL 429 uint32_t m_cpt_inval_brdcast; // Number of BROADCAST INVAL 430 uint32_t m_cpt_cleanup; // Number of CLEANUP transactions 431 uint32_t m_cpt_ll; // Number of LL transactions 432 uint32_t m_cpt_sc; // Number of SC transactions 433 uint32_t m_cpt_cas; // Number of CAS transactions 434 435 uint32_t m_cpt_cleanup_cost; // Number of (flits * distance) for CLEANUPs 436 437 uint32_t m_cpt_update_flits; // Number of flits for UPDATEs 438 uint32_t m_cpt_update_cost; // Number of (flits * distance) for UPDATEs 439 440 uint32_t m_cpt_inval_cost; // Number of (flits * distance) for INVALs 406 uint32_t m_cpt_cycles; // Counter of cycles 407 408 // Counters accessible in software (not yet but eventually) 409 uint32_t m_cpt_read_local; // Number of local READ transactions 410 uint32_t m_cpt_read_remote; // number of remote READ transactions 411 uint32_t m_cpt_read_cost; // Number of (flits * distance) for READs 412 413 uint32_t m_cpt_write_local; // Number of local WRITE transactions 414 uint32_t m_cpt_write_remote; // number of remote WRITE transactions 415 uint32_t m_cpt_write_flits_local; // number of flits for local WRITEs 416 uint32_t m_cpt_write_flits_remote; // number of flits for remote WRITEs 417 uint32_t m_cpt_write_cost; // Number of (flits * distance) for WRITEs 418 419 uint32_t m_cpt_ll_local; // Number of local LL transactions 420 uint32_t m_cpt_ll_remote; // number of remote LL transactions 421 uint32_t m_cpt_ll_cost; // Number of (flits * distance) for LLs 422 423 uint32_t m_cpt_sc_local; // Number of local SC transactions 424 uint32_t m_cpt_sc_remote; // number of remote SC transactions 425 uint32_t m_cpt_sc_cost; // Number of (flits * distance) for SCs 426 427 uint32_t m_cpt_cas_local; // Number of local SC transactions 428 uint32_t m_cpt_cas_remote; // number of remote SC transactions 429 uint32_t m_cpt_cas_cost; // Number of (flits * distance) for SCs 430 431 uint32_t m_cpt_update; // Number of requests causing an UPDATE 432 uint32_t m_cpt_update_local; // Number of local UPDATE transactions 433 uint32_t m_cpt_update_remote; // Number of remote UPDATE transactions 434 uint32_t m_cpt_update_cost; // Number of (flits * distance) for UPDT 435 436 uint32_t m_cpt_m_inval; // Number of requests causing M_INV 437 uint32_t m_cpt_m_inval_local; // Number of local M_INV transactions 438 uint32_t m_cpt_m_inval_remote; // Number of remote M_INV transactions 439 uint32_t m_cpt_m_inval_cost; // Number of (flits * distance) for M_INV 440 441 uint32_t m_cpt_br_inval; // Number of BROADCAST INVAL 442 443 uint32_t m_cpt_cleanup_local; // Number of local CLEANUP transactions 444 uint32_t m_cpt_cleanup_remote; // Number of remote CLEANUP transactions 445 uint32_t m_cpt_cleanup_cost; // Number of (flits * distance) for CLEANUPs 446 447 // Counters not accessible by software 448 uint32_t m_cpt_read_miss; // Number of MISS READ 449 uint32_t m_cpt_write_miss; // Number of MISS WRITE 450 uint32_t m_cpt_write_dirty; // Cumulated length for WRITE transactions 451 452 uint32_t m_cpt_trt_rb; // Read blocked by a hit in trt 453 uint32_t m_cpt_trt_full; // Transaction blocked due to a full trt 441 454 442 455 uint32_t m_cpt_get; 443 444 456 uint32_t m_cpt_put; 445 457 … … 466 478 const soclib::common::IntTab &tgtid_d, // global index INT network 467 479 const size_t cc_global_id, // global index CC network 480 const size_t x_width, // X width in platform 481 const size_t y_width, // Y width in platform 468 482 const size_t nways, // Number of ways per set 469 483 const size_t nsets, // Number of sets … … 479 493 ~VciMemCache(); 480 494 481 void print_stats( );495 void print_stats(bool activity_counters, bool stats); 482 496 void print_trace(); 483 497 void cache_monitor(addr_t addr); … … 490 504 void genMoore(); 491 505 void check_monitor(addr_t addr, data_t data, bool read); 506 uint32_t req_distance(uint32_t req_srcid); 507 bool is_local_req(uint32_t req_srcid); 492 508 493 509 // Component attributes … … 503 519 const size_t m_words; // Number of words in a line 504 520 const size_t m_cc_global_id; // global_index on cc network 521 const size_t m_xwidth; // number of x bits in platform 522 const size_t m_ywidth; // number of y bits in platform 505 523 size_t m_debug_start_cycle; 506 524 bool m_debug_ok; -
trunk/modules/vci_mem_cache/caba/source/src/vci_mem_cache.cpp
r499 r504 1 1 /* -*- c++ -*- 2 *3 * File : vci_mem_cache.cpp4 * Date : 30/10/20085 * Copyright : UPMC / LIP66 * Authors : Alain Greiner / Eric Guthmuller7 *8 * SOCLIB_LGPL_HEADER_BEGIN9 *10 * This file is part of SoCLib, GNU LGPLv2.1.11 *12 * SoCLib is free software; you can redistribute it and/or modify it13 * under the terms of the GNU Lesser General Public License as published14 * by the Free Software Foundation; version 2.1 of the License.15 *16 * SoCLib is distributed in the hope that it will be useful, but17 * WITHOUT ANY WARRANTY; without even the implied warranty of18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU19 * Lesser General Public License for more details.20 *21 * You should have received a copy of the GNU Lesser General Public22 * License along with SoCLib; if not, write to the Free Software23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA24 * 02110-1301 USA25 *26 * SOCLIB_LGPL_HEADER_END27 *28 * Maintainers: alain.greiner@lip6.fr29 * eric.guthmuller@polytechnique.edu30 * cesar.fuguet-tortolero@lip6.fr31 * alexandre.joannou@lip6.fr32 */2 * 3 * File : vci_mem_cache.cpp 4 * Date : 30/10/2008 5 * Copyright : UPMC / LIP6 6 * Authors : Alain Greiner / Eric Guthmuller 7 * 8 * SOCLIB_LGPL_HEADER_BEGIN 9 * 10 * This file is part of SoCLib, GNU LGPLv2.1. 11 * 12 * SoCLib is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU Lesser General Public License as published 14 * by the Free Software Foundation; version 2.1 of the License. 15 * 16 * SoCLib is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * Lesser General Public License for more details. 20 * 21 * You should have received a copy of the GNU Lesser General Public 22 * License along with SoCLib; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 24 * 02110-1301 USA 25 * 26 * SOCLIB_LGPL_HEADER_END 27 * 28 * Maintainers: alain.greiner@lip6.fr 29 * eric.guthmuller@polytechnique.edu 30 * cesar.fuguet-tortolero@lip6.fr 31 * alexandre.joannou@lip6.fr 32 */ 33 33 34 34 #include "../include/vci_mem_cache.h" … … 58 58 namespace soclib { namespace caba { 59 59 60 const char *tgt_cmd_fsm_str[] =61 {62 "TGT_CMD_IDLE",63 "TGT_CMD_ERROR",64 "TGT_CMD_READ",65 "TGT_CMD_WRITE",66 "TGT_CMD_CAS",67 "TGT_CMD_CONFIG"68 };69 const char *tgt_rsp_fsm_str[] =70 {71 "TGT_RSP_CONFIG_IDLE",72 "TGT_RSP_TGT_CMD_IDLE",73 "TGT_RSP_READ_IDLE",74 "TGT_RSP_WRITE_IDLE",75 "TGT_RSP_CAS_IDLE",76 "TGT_RSP_XRAM_IDLE",77 "TGT_RSP_MULTI_ACK_IDLE",78 "TGT_RSP_CLEANUP_IDLE",79 "TGT_RSP_CONFIG",80 "TGT_RSP_TGT_CMD",81 "TGT_RSP_READ",82 "TGT_RSP_WRITE",83 "TGT_RSP_CAS",84 "TGT_RSP_XRAM",85 "TGT_RSP_MULTI_ACK",86 "TGT_RSP_CLEANUP"87 };88 const char *cc_receive_fsm_str[] =89 {90 "CC_RECEIVE_IDLE",91 "CC_RECEIVE_CLEANUP",92 "CC_RECEIVE_CLEANUP_EOP",93 "CC_RECEIVE_MULTI_ACK"94 };95 const char *cc_send_fsm_str[] =96 {97 "CC_SEND_CONFIG_IDLE",98 "CC_SEND_XRAM_RSP_IDLE",99 "CC_SEND_WRITE_IDLE",100 "CC_SEND_CAS_IDLE",101 "CC_SEND_CONFIG_INVAL_HEADER",102 "CC_SEND_CONFIG_INVAL_NLINE",103 "CC_SEND_CONFIG_BRDCAST_HEADER",104 "CC_SEND_CONFIG_BRDCAST_NLINE",105 "CC_SEND_XRAM_RSP_BRDCAST_HEADER",106 "CC_SEND_XRAM_RSP_BRDCAST_NLINE",107 "CC_SEND_XRAM_RSP_INVAL_HEADER",108 "CC_SEND_XRAM_RSP_INVAL_NLINE",109 "CC_SEND_WRITE_BRDCAST_HEADER",110 "CC_SEND_WRITE_BRDCAST_NLINE",111 "CC_SEND_WRITE_UPDT_HEADER",112 "CC_SEND_WRITE_UPDT_NLINE",113 "CC_SEND_WRITE_UPDT_DATA",114 "CC_SEND_CAS_BRDCAST_HEADER",115 "CC_SEND_CAS_BRDCAST_NLINE",116 "CC_SEND_CAS_UPDT_HEADER",117 "CC_SEND_CAS_UPDT_NLINE",118 "CC_SEND_CAS_UPDT_DATA",119 "CC_SEND_CAS_UPDT_DATA_HIGH"120 };121 const char *multi_ack_fsm_str[] =122 {123 "MULTI_ACK_IDLE",124 "MULTI_ACK_UPT_LOCK",125 "MULTI_ACK_UPT_CLEAR",126 "MULTI_ACK_WRITE_RSP"127 };128 const char *config_fsm_str[] =129 {130 "CONFIG_IDLE",131 "CONFIG_LOOP",132 "CONFIG_WAIT",133 "CONFIG_RSP",134 "CONFIG_DIR_REQ",135 "CONFIG_DIR_ACCESS",136 "CONFIG_IVT_LOCK",137 "CONFIG_BC_SEND",138 "CONFIG_INVAL_SEND",139 "CONFIG_HEAP_REQ",140 "CONFIG_HEAP_SCAN",141 "CONFIG_HEAP_LAST",142 "CONFIG_TRT_LOCK",143 "CONFIG_TRT_SET",144 "CONFIG_PUT_REQ"145 };146 const char *read_fsm_str[] =147 {148 "READ_IDLE",149 "READ_DIR_REQ",150 "READ_DIR_LOCK",151 "READ_DIR_HIT",152 "READ_HEAP_REQ",153 "READ_HEAP_LOCK",154 "READ_HEAP_WRITE",155 "READ_HEAP_ERASE",156 "READ_HEAP_LAST",157 "READ_RSP",158 "READ_TRT_LOCK",159 "READ_TRT_SET",160 "READ_TRT_REQ"161 };162 const char *write_fsm_str[] =163 {164 "WRITE_IDLE",165 "WRITE_NEXT",166 "WRITE_DIR_REQ",167 "WRITE_DIR_LOCK",168 "WRITE_DIR_HIT",169 "WRITE_UPT_LOCK",170 "WRITE_UPT_HEAP_LOCK",171 "WRITE_UPT_REQ",172 "WRITE_UPT_NEXT",173 "WRITE_UPT_DEC",174 "WRITE_RSP",175 "WRITE_MISS_TRT_LOCK",176 "WRITE_MISS_TRT_DATA",177 "WRITE_MISS_TRT_SET",178 "WRITE_MISS_XRAM_REQ",179 "WRITE_BC_DIR_READ",180 "WRITE_BC_TRT_LOCK",181 "WRITE_BC_IVT_LOCK",182 "WRITE_BC_DIR_INVAL",183 "WRITE_BC_CC_SEND",184 "WRITE_BC_XRAM_REQ",185 "WRITE_WAIT"186 };187 const char *ixr_rsp_fsm_str[] =188 {189 "IXR_RSP_IDLE",190 "IXR_RSP_TRT_ERASE",191 "IXR_RSP_TRT_READ"192 };193 const char *xram_rsp_fsm_str[] =194 {195 "XRAM_RSP_IDLE",196 "XRAM_RSP_TRT_COPY",197 "XRAM_RSP_TRT_DIRTY",198 "XRAM_RSP_DIR_LOCK",199 "XRAM_RSP_DIR_UPDT",200 "XRAM_RSP_DIR_RSP",201 "XRAM_RSP_IVT_LOCK",202 "XRAM_RSP_INVAL_WAIT",203 "XRAM_RSP_INVAL",204 "XRAM_RSP_WRITE_DIRTY",205 "XRAM_RSP_HEAP_REQ",206 "XRAM_RSP_HEAP_ERASE",207 "XRAM_RSP_HEAP_LAST",208 "XRAM_RSP_ERROR_ERASE",209 "XRAM_RSP_ERROR_RSP"210 };211 const char *ixr_cmd_fsm_str[] =212 {213 "IXR_CMD_READ_IDLE",214 "IXR_CMD_WRITE_IDLE",215 "IXR_CMD_CAS_IDLE",216 "IXR_CMD_XRAM_IDLE",217 "IXR_CMD_CONFIG_IDLE",218 "IXR_CMD_READ_TRT",219 "IXR_CMD_WRITE_TRT",220 "IXR_CMD_CAS_TRT",221 "IXR_CMD_XRAM_TRT",222 "IXR_CMD_CONFIG_TRT",223 "IXR_CMD_READ_SEND",224 "IXR_CMD_WRITE_SEND",225 "IXR_CMD_CAS_SEND",226 "IXR_CMD_XRAM_SEND",227 "IXR_CMD_CONFIG_SEND"228 };229 const char *cas_fsm_str[] =230 {231 "CAS_IDLE",232 "CAS_DIR_REQ",233 "CAS_DIR_LOCK",234 "CAS_DIR_HIT_READ",235 "CAS_DIR_HIT_COMPARE",236 "CAS_DIR_HIT_WRITE",237 "CAS_UPT_LOCK",238 "CAS_UPT_HEAP_LOCK",239 "CAS_UPT_REQ",240 "CAS_UPT_NEXT",241 "CAS_BC_TRT_LOCK",242 "CAS_BC_IVT_LOCK",243 "CAS_BC_DIR_INVAL",244 "CAS_BC_CC_SEND",245 "CAS_BC_XRAM_REQ",246 "CAS_RSP_FAIL",247 "CAS_RSP_SUCCESS",248 "CAS_MISS_TRT_LOCK",249 "CAS_MISS_TRT_SET",250 "CAS_MISS_XRAM_REQ",251 "CAS_WAIT"252 };253 const char *cleanup_fsm_str[] =254 {255 "CLEANUP_IDLE",256 "CLEANUP_GET_NLINE",257 "CLEANUP_DIR_REQ",258 "CLEANUP_DIR_LOCK",259 "CLEANUP_DIR_WRITE",260 "CLEANUP_HEAP_REQ",261 "CLEANUP_HEAP_LOCK",262 "CLEANUP_HEAP_SEARCH",263 "CLEANUP_HEAP_CLEAN",264 "CLEANUP_HEAP_FREE",265 "CLEANUP_IVT_LOCK",266 "CLEANUP_IVT_DECREMENT",267 "CLEANUP_IVT_CLEAR",268 "CLEANUP_WRITE_RSP",269 "CLEANUP_SEND_CLACK"270 };271 const char *alloc_dir_fsm_str[] =272 {273 "ALLOC_DIR_RESET",274 "ALLOC_DIR_CONFIG",275 "ALLOC_DIR_READ",276 "ALLOC_DIR_WRITE",277 "ALLOC_DIR_CAS",278 "ALLOC_DIR_CLEANUP",279 "ALLOC_DIR_XRAM_RSP"280 };281 const char *alloc_trt_fsm_str[] =282 {283 "ALLOC_TRT_READ",284 "ALLOC_TRT_WRITE",285 "ALLOC_TRT_CAS",286 "ALLOC_TRT_XRAM_RSP",287 "ALLOC_TRT_IXR_RSP",288 "ALLOC_TRT_CONFIG",289 "ALLOC_TRT_IXR_CMD"290 };291 const char *alloc_upt_fsm_str[] =292 {293 "ALLOC_UPT_WRITE",294 "ALLOC_UPT_CAS",295 "ALLOC_UPT_MULTI_ACK"296 };297 const char *alloc_ivt_fsm_str[] =298 {299 "ALLOC_IVT_WRITE",300 "ALLOC_IVT_XRAM_RSP",301 "ALLOC_IVT_CLEANUP",302 "ALLOC_IVT_CAS",303 "ALLOC_IVT_CONFIG"304 };305 const char *alloc_heap_fsm_str[] =306 {307 "ALLOC_HEAP_RESET",308 "ALLOC_HEAP_READ",309 "ALLOC_HEAP_WRITE",310 "ALLOC_HEAP_CAS",311 "ALLOC_HEAP_CLEANUP",312 "ALLOC_HEAP_XRAM_RSP",313 "ALLOC_HEAP_CONFIG"314 };60 const char *tgt_cmd_fsm_str[] = 61 { 62 "TGT_CMD_IDLE", 63 "TGT_CMD_ERROR", 64 "TGT_CMD_READ", 65 "TGT_CMD_WRITE", 66 "TGT_CMD_CAS", 67 "TGT_CMD_CONFIG" 68 }; 69 const char *tgt_rsp_fsm_str[] = 70 { 71 "TGT_RSP_CONFIG_IDLE", 72 "TGT_RSP_TGT_CMD_IDLE", 73 "TGT_RSP_READ_IDLE", 74 "TGT_RSP_WRITE_IDLE", 75 "TGT_RSP_CAS_IDLE", 76 "TGT_RSP_XRAM_IDLE", 77 "TGT_RSP_MULTI_ACK_IDLE", 78 "TGT_RSP_CLEANUP_IDLE", 79 "TGT_RSP_CONFIG", 80 "TGT_RSP_TGT_CMD", 81 "TGT_RSP_READ", 82 "TGT_RSP_WRITE", 83 "TGT_RSP_CAS", 84 "TGT_RSP_XRAM", 85 "TGT_RSP_MULTI_ACK", 86 "TGT_RSP_CLEANUP" 87 }; 88 const char *cc_receive_fsm_str[] = 89 { 90 "CC_RECEIVE_IDLE", 91 "CC_RECEIVE_CLEANUP", 92 "CC_RECEIVE_CLEANUP_EOP", 93 "CC_RECEIVE_MULTI_ACK" 94 }; 95 const char *cc_send_fsm_str[] = 96 { 97 "CC_SEND_CONFIG_IDLE", 98 "CC_SEND_XRAM_RSP_IDLE", 99 "CC_SEND_WRITE_IDLE", 100 "CC_SEND_CAS_IDLE", 101 "CC_SEND_CONFIG_INVAL_HEADER", 102 "CC_SEND_CONFIG_INVAL_NLINE", 103 "CC_SEND_CONFIG_BRDCAST_HEADER", 104 "CC_SEND_CONFIG_BRDCAST_NLINE", 105 "CC_SEND_XRAM_RSP_BRDCAST_HEADER", 106 "CC_SEND_XRAM_RSP_BRDCAST_NLINE", 107 "CC_SEND_XRAM_RSP_INVAL_HEADER", 108 "CC_SEND_XRAM_RSP_INVAL_NLINE", 109 "CC_SEND_WRITE_BRDCAST_HEADER", 110 "CC_SEND_WRITE_BRDCAST_NLINE", 111 "CC_SEND_WRITE_UPDT_HEADER", 112 "CC_SEND_WRITE_UPDT_NLINE", 113 "CC_SEND_WRITE_UPDT_DATA", 114 "CC_SEND_CAS_BRDCAST_HEADER", 115 "CC_SEND_CAS_BRDCAST_NLINE", 116 "CC_SEND_CAS_UPDT_HEADER", 117 "CC_SEND_CAS_UPDT_NLINE", 118 "CC_SEND_CAS_UPDT_DATA", 119 "CC_SEND_CAS_UPDT_DATA_HIGH" 120 }; 121 const char *multi_ack_fsm_str[] = 122 { 123 "MULTI_ACK_IDLE", 124 "MULTI_ACK_UPT_LOCK", 125 "MULTI_ACK_UPT_CLEAR", 126 "MULTI_ACK_WRITE_RSP" 127 }; 128 const char *config_fsm_str[] = 129 { 130 "CONFIG_IDLE", 131 "CONFIG_LOOP", 132 "CONFIG_WAIT", 133 "CONFIG_RSP", 134 "CONFIG_DIR_REQ", 135 "CONFIG_DIR_ACCESS", 136 "CONFIG_IVT_LOCK", 137 "CONFIG_BC_SEND", 138 "CONFIG_INVAL_SEND", 139 "CONFIG_HEAP_REQ", 140 "CONFIG_HEAP_SCAN", 141 "CONFIG_HEAP_LAST", 142 "CONFIG_TRT_LOCK", 143 "CONFIG_TRT_SET", 144 "CONFIG_PUT_REQ" 145 }; 146 const char *read_fsm_str[] = 147 { 148 "READ_IDLE", 149 "READ_DIR_REQ", 150 "READ_DIR_LOCK", 151 "READ_DIR_HIT", 152 "READ_HEAP_REQ", 153 "READ_HEAP_LOCK", 154 "READ_HEAP_WRITE", 155 "READ_HEAP_ERASE", 156 "READ_HEAP_LAST", 157 "READ_RSP", 158 "READ_TRT_LOCK", 159 "READ_TRT_SET", 160 "READ_TRT_REQ" 161 }; 162 const char *write_fsm_str[] = 163 { 164 "WRITE_IDLE", 165 "WRITE_NEXT", 166 "WRITE_DIR_REQ", 167 "WRITE_DIR_LOCK", 168 "WRITE_DIR_HIT", 169 "WRITE_UPT_LOCK", 170 "WRITE_UPT_HEAP_LOCK", 171 "WRITE_UPT_REQ", 172 "WRITE_UPT_NEXT", 173 "WRITE_UPT_DEC", 174 "WRITE_RSP", 175 "WRITE_MISS_TRT_LOCK", 176 "WRITE_MISS_TRT_DATA", 177 "WRITE_MISS_TRT_SET", 178 "WRITE_MISS_XRAM_REQ", 179 "WRITE_BC_DIR_READ", 180 "WRITE_BC_TRT_LOCK", 181 "WRITE_BC_IVT_LOCK", 182 "WRITE_BC_DIR_INVAL", 183 "WRITE_BC_CC_SEND", 184 "WRITE_BC_XRAM_REQ", 185 "WRITE_WAIT" 186 }; 187 const char *ixr_rsp_fsm_str[] = 188 { 189 "IXR_RSP_IDLE", 190 "IXR_RSP_TRT_ERASE", 191 "IXR_RSP_TRT_READ" 192 }; 193 const char *xram_rsp_fsm_str[] = 194 { 195 "XRAM_RSP_IDLE", 196 "XRAM_RSP_TRT_COPY", 197 "XRAM_RSP_TRT_DIRTY", 198 "XRAM_RSP_DIR_LOCK", 199 "XRAM_RSP_DIR_UPDT", 200 "XRAM_RSP_DIR_RSP", 201 "XRAM_RSP_IVT_LOCK", 202 "XRAM_RSP_INVAL_WAIT", 203 "XRAM_RSP_INVAL", 204 "XRAM_RSP_WRITE_DIRTY", 205 "XRAM_RSP_HEAP_REQ", 206 "XRAM_RSP_HEAP_ERASE", 207 "XRAM_RSP_HEAP_LAST", 208 "XRAM_RSP_ERROR_ERASE", 209 "XRAM_RSP_ERROR_RSP" 210 }; 211 const char *ixr_cmd_fsm_str[] = 212 { 213 "IXR_CMD_READ_IDLE", 214 "IXR_CMD_WRITE_IDLE", 215 "IXR_CMD_CAS_IDLE", 216 "IXR_CMD_XRAM_IDLE", 217 "IXR_CMD_CONFIG_IDLE", 218 "IXR_CMD_READ_TRT", 219 "IXR_CMD_WRITE_TRT", 220 "IXR_CMD_CAS_TRT", 221 "IXR_CMD_XRAM_TRT", 222 "IXR_CMD_CONFIG_TRT", 223 "IXR_CMD_READ_SEND", 224 "IXR_CMD_WRITE_SEND", 225 "IXR_CMD_CAS_SEND", 226 "IXR_CMD_XRAM_SEND", 227 "IXR_CMD_CONFIG_SEND" 228 }; 229 const char *cas_fsm_str[] = 230 { 231 "CAS_IDLE", 232 "CAS_DIR_REQ", 233 "CAS_DIR_LOCK", 234 "CAS_DIR_HIT_READ", 235 "CAS_DIR_HIT_COMPARE", 236 "CAS_DIR_HIT_WRITE", 237 "CAS_UPT_LOCK", 238 "CAS_UPT_HEAP_LOCK", 239 "CAS_UPT_REQ", 240 "CAS_UPT_NEXT", 241 "CAS_BC_TRT_LOCK", 242 "CAS_BC_IVT_LOCK", 243 "CAS_BC_DIR_INVAL", 244 "CAS_BC_CC_SEND", 245 "CAS_BC_XRAM_REQ", 246 "CAS_RSP_FAIL", 247 "CAS_RSP_SUCCESS", 248 "CAS_MISS_TRT_LOCK", 249 "CAS_MISS_TRT_SET", 250 "CAS_MISS_XRAM_REQ", 251 "CAS_WAIT" 252 }; 253 const char *cleanup_fsm_str[] = 254 { 255 "CLEANUP_IDLE", 256 "CLEANUP_GET_NLINE", 257 "CLEANUP_DIR_REQ", 258 "CLEANUP_DIR_LOCK", 259 "CLEANUP_DIR_WRITE", 260 "CLEANUP_HEAP_REQ", 261 "CLEANUP_HEAP_LOCK", 262 "CLEANUP_HEAP_SEARCH", 263 "CLEANUP_HEAP_CLEAN", 264 "CLEANUP_HEAP_FREE", 265 "CLEANUP_IVT_LOCK", 266 "CLEANUP_IVT_DECREMENT", 267 "CLEANUP_IVT_CLEAR", 268 "CLEANUP_WRITE_RSP", 269 "CLEANUP_SEND_CLACK" 270 }; 271 const char *alloc_dir_fsm_str[] = 272 { 273 "ALLOC_DIR_RESET", 274 "ALLOC_DIR_CONFIG", 275 "ALLOC_DIR_READ", 276 "ALLOC_DIR_WRITE", 277 "ALLOC_DIR_CAS", 278 "ALLOC_DIR_CLEANUP", 279 "ALLOC_DIR_XRAM_RSP" 280 }; 281 const char *alloc_trt_fsm_str[] = 282 { 283 "ALLOC_TRT_READ", 284 "ALLOC_TRT_WRITE", 285 "ALLOC_TRT_CAS", 286 "ALLOC_TRT_XRAM_RSP", 287 "ALLOC_TRT_IXR_RSP", 288 "ALLOC_TRT_CONFIG", 289 "ALLOC_TRT_IXR_CMD" 290 }; 291 const char *alloc_upt_fsm_str[] = 292 { 293 "ALLOC_UPT_WRITE", 294 "ALLOC_UPT_CAS", 295 "ALLOC_UPT_MULTI_ACK" 296 }; 297 const char *alloc_ivt_fsm_str[] = 298 { 299 "ALLOC_IVT_WRITE", 300 "ALLOC_IVT_XRAM_RSP", 301 "ALLOC_IVT_CLEANUP", 302 "ALLOC_IVT_CAS", 303 "ALLOC_IVT_CONFIG" 304 }; 305 const char *alloc_heap_fsm_str[] = 306 { 307 "ALLOC_HEAP_RESET", 308 "ALLOC_HEAP_READ", 309 "ALLOC_HEAP_WRITE", 310 "ALLOC_HEAP_CAS", 311 "ALLOC_HEAP_CLEANUP", 312 "ALLOC_HEAP_XRAM_RSP", 313 "ALLOC_HEAP_CONFIG" 314 }; 315 315 316 316 #define tmpl(x) \ 317 template<typename vci_param_int, \ 318 typename vci_param_ext, \ 319 size_t dspin_in_width, \ 320 size_t dspin_out_width> x \ 321 VciMemCache<vci_param_int, vci_param_ext, dspin_in_width, dspin_out_width> 322 323 using namespace soclib::common; 324 325 //////////////////////////////// 326 // Constructor 327 //////////////////////////////// 328 329 tmpl(/**/) ::VciMemCache( 330 sc_module_name name, 331 const MappingTable &mtp, // mapping table for direct network 332 const MappingTable &mtx, // mapping table for external network 333 const IntTab &srcid_x, // global index on external network 334 const IntTab &tgtid_d, // global index on direct network 335 const size_t cc_global_id, // global index on cc network 336 const size_t nways, // number of ways per set 337 const size_t nsets, // number of associative sets 338 const size_t nwords, // number of words in cache line 339 const size_t max_copies, // max number of copies in heap 340 const size_t heap_size, // number of heap entries 341 const size_t trt_lines, // number of TRT entries 342 const size_t upt_lines, // number of UPT entries 343 const size_t ivt_lines, // number of IVT entries 344 const size_t debug_start_cycle, 345 const bool debug_ok) 346 347 : soclib::caba::BaseModule(name), 348 349 p_clk( "p_clk" ), 350 p_resetn( "p_resetn" ), 351 p_vci_tgt( "p_vci_tgt" ), 352 p_vci_ixr( "p_vci_ixr" ), 353 p_dspin_p2m( "p_dspin_p2m" ), 354 p_dspin_m2p( "p_dspin_m2p" ), 355 p_dspin_clack( "p_dspin_clack" ), 356 357 m_seglist( mtp.getSegmentList(tgtid_d) ), 358 m_nseg( 0 ), 359 m_srcid_x( mtx.indexForId(srcid_x) ), 360 m_initiators( 1 << vci_param_int::S ), 361 m_heap_size( heap_size ), 362 m_ways( nways ), 363 m_sets( nsets ), 364 m_words( nwords ), 365 m_cc_global_id( cc_global_id ), 366 m_debug_start_cycle( debug_start_cycle ), 367 m_debug_ok( debug_ok ), 368 m_trt_lines(trt_lines), 369 m_trt(this->name(), trt_lines, nwords), 370 m_upt_lines(upt_lines), 371 m_upt(upt_lines), 372 m_ivt(ivt_lines), 373 m_cache_directory(nways, nsets, nwords, vci_param_int::N), 374 m_cache_data(nways, nsets, nwords), 375 m_heap(m_heap_size), 376 m_max_copies( max_copies ), 377 m_llsc_table(), 317 template<typename vci_param_int, \ 318 typename vci_param_ext, \ 319 size_t dspin_in_width, \ 320 size_t dspin_out_width> x \ 321 VciMemCache<vci_param_int, vci_param_ext, dspin_in_width, dspin_out_width> 322 323 using namespace soclib::common; 324 325 //////////////////////////////// 326 // Constructor 327 //////////////////////////////// 328 329 tmpl(/**/)::VciMemCache( 330 sc_module_name name, 331 const MappingTable &mtp, // mapping table for direct network 332 const MappingTable &mtx, // mapping table for external network 333 const IntTab &srcid_x, // global index on external network 334 const IntTab &tgtid_d, // global index on direct network 335 const size_t cc_global_id, // global index on cc network 336 const size_t x_width, // number of x bits in platform 337 const size_t y_width, // number of x bits in platform 338 const size_t nways, // number of ways per set 339 const size_t nsets, // number of associative sets 340 const size_t nwords, // number of words in cache line 341 const size_t max_copies, // max number of copies in heap 342 const size_t heap_size, // number of heap entries 343 const size_t trt_lines, // number of TRT entries 344 const size_t upt_lines, // number of UPT entries 345 const size_t ivt_lines, // number of IVT entries 346 const size_t debug_start_cycle, 347 const bool debug_ok) 348 349 : soclib::caba::BaseModule(name), 350 351 p_clk( "p_clk" ), 352 p_resetn( "p_resetn" ), 353 p_vci_tgt( "p_vci_tgt" ), 354 p_vci_ixr( "p_vci_ixr" ), 355 p_dspin_p2m( "p_dspin_p2m" ), 356 p_dspin_m2p( "p_dspin_m2p" ), 357 p_dspin_clack( "p_dspin_clack" ), 358 359 m_seglist(mtp.getSegmentList(tgtid_d)), 360 m_nseg(0), 361 m_srcid_x( mtx.indexForId(srcid_x)), 362 m_initiators(1 << vci_param_int::S), 363 m_heap_size(heap_size), 364 m_ways(nways), 365 m_sets(nsets), 366 m_words(nwords), 367 m_cc_global_id(cc_global_id), 368 m_xwidth(x_width), 369 m_ywidth(y_width), 370 m_debug_start_cycle(debug_start_cycle), 371 m_debug_ok(debug_ok), 372 m_trt_lines(trt_lines), 373 m_trt(this->name(), trt_lines, nwords), 374 m_upt_lines(upt_lines), 375 m_upt(upt_lines), 376 m_ivt(ivt_lines), 377 m_cache_directory(nways, nsets, nwords, vci_param_int::N), 378 m_cache_data(nways, nsets, nwords), 379 m_heap(m_heap_size), 380 m_max_copies(max_copies), 381 m_llsc_table(), 378 382 379 383 #define L2 soclib::common::uint32_log2 380 m_x(L2(m_words), 2),381 m_y(L2(m_sets), L2(m_words) + 2),382 m_z(vci_param_int::N - L2(m_sets) - L2(m_words) - 2, L2(m_sets) + L2(m_words) + 2),383 m_nline(vci_param_int::N - L2(m_words) - 2, L2(m_words) + 2),384 m_x(L2(m_words), 2), 385 m_y(L2(m_sets), L2(m_words) + 2), 386 m_z(vci_param_int::N - L2(m_sets) - L2(m_words) - 2, L2(m_sets) + L2(m_words) + 2), 387 m_nline(vci_param_int::N - L2(m_words) - 2, L2(m_words) + 2), 384 388 #undef L2 385 389 386 // XMIN(5 bits) / XMAX(5 bits) / YMIN(5 bits) / YMAX(5 bits) 387 // 0b00000 / 0b11111 / 0b00000 / 0b11111 388 m_broadcast_boundaries(0x7C1F), 389 390 391 // FIFOs 392 m_cmd_read_addr_fifo("m_cmd_read_addr_fifo", 4), 393 m_cmd_read_length_fifo("m_cmd_read_length_fifo", 4), 394 m_cmd_read_srcid_fifo("m_cmd_read_srcid_fifo", 4), 395 m_cmd_read_trdid_fifo("m_cmd_read_trdid_fifo", 4), 396 m_cmd_read_pktid_fifo("m_cmd_read_pktid_fifo", 4), 397 398 m_cmd_write_addr_fifo("m_cmd_write_addr_fifo",8), 399 m_cmd_write_eop_fifo("m_cmd_write_eop_fifo",8), 400 m_cmd_write_srcid_fifo("m_cmd_write_srcid_fifo",8), 401 m_cmd_write_trdid_fifo("m_cmd_write_trdid_fifo",8), 402 m_cmd_write_pktid_fifo("m_cmd_write_pktid_fifo",8), 403 m_cmd_write_data_fifo("m_cmd_write_data_fifo",8), 404 m_cmd_write_be_fifo("m_cmd_write_be_fifo",8), 405 406 m_cmd_cas_addr_fifo("m_cmd_cas_addr_fifo",4), 407 m_cmd_cas_eop_fifo("m_cmd_cas_eop_fifo",4), 408 m_cmd_cas_srcid_fifo("m_cmd_cas_srcid_fifo",4), 409 m_cmd_cas_trdid_fifo("m_cmd_cas_trdid_fifo",4), 410 m_cmd_cas_pktid_fifo("m_cmd_cas_pktid_fifo",4), 411 m_cmd_cas_wdata_fifo("m_cmd_cas_wdata_fifo",4), 412 413 m_cc_receive_to_cleanup_fifo("m_cc_receive_to_cleanup_fifo", 4), 414 m_cc_receive_to_multi_ack_fifo("m_cc_receive_to_multi_ack_fifo", 4), 415 416 r_tgt_cmd_fsm("r_tgt_cmd_fsm"), 417 418 r_config_fsm( "r_config_fsm" ), 419 420 m_config_to_cc_send_inst_fifo( "m_config_to_cc_send_inst_fifo", 8 ), 421 m_config_to_cc_send_srcid_fifo( "m_config_to_cc_send_srcid_fifo", 8 ), 422 423 r_read_fsm( "r_read_fsm" ), 424 425 r_write_fsm( "r_write_fsm" ), 426 427 m_write_to_cc_send_inst_fifo("m_write_to_cc_send_inst_fifo",8), 428 m_write_to_cc_send_srcid_fifo("m_write_to_cc_send_srcid_fifo",8), 429 430 r_multi_ack_fsm("r_multi_ack_fsm"), 431 432 r_cleanup_fsm("r_cleanup_fsm"), 433 434 r_cas_fsm("r_cas_fsm"), 435 436 m_cas_to_cc_send_inst_fifo("m_cas_to_cc_send_inst_fifo",8), 437 m_cas_to_cc_send_srcid_fifo("m_cas_to_cc_send_srcid_fifo",8), 438 439 r_ixr_rsp_fsm("r_ixr_rsp_fsm"), 440 r_xram_rsp_fsm("r_xram_rsp_fsm"), 441 442 m_xram_rsp_to_cc_send_inst_fifo("m_xram_rsp_to_cc_send_inst_fifo",8), 443 m_xram_rsp_to_cc_send_srcid_fifo("m_xram_rsp_to_cc_send_srcid_fifo",8), 444 445 r_ixr_cmd_fsm("r_ixr_cmd_fsm"), 446 447 r_tgt_rsp_fsm("r_tgt_rsp_fsm"), 448 449 r_cc_send_fsm("r_cc_send_fsm"), 450 r_cc_receive_fsm("r_cc_receive_fsm"), 451 452 r_alloc_dir_fsm("r_alloc_dir_fsm"), 453 r_alloc_dir_reset_cpt("r_alloc_dir_reset_cpt"), 454 r_alloc_trt_fsm("r_alloc_trt_fsm"), 455 r_alloc_upt_fsm("r_alloc_upt_fsm"), 456 r_alloc_ivt_fsm("r_alloc_ivt_fsm"), 457 r_alloc_heap_fsm("r_alloc_heap_fsm"), 458 r_alloc_heap_reset_cpt("r_alloc_heap_reset_cpt") 459 { 460 std::cout << " - Building VciMemCache : " << name << std::endl; 461 462 assert(IS_POW_OF_2(nsets)); 463 assert(IS_POW_OF_2(nwords)); 464 assert(IS_POW_OF_2(nways)); 465 assert(nsets); 466 assert(nwords); 467 assert(nways); 468 469 // check Transaction table size 470 assert((uint32_log2(trt_lines) <= vci_param_ext::T) and 471 "MEMC ERROR : Need more bits for VCI TRDID field"); 472 473 // check internal and external data width 474 assert( (vci_param_int::B == 4 ) and 475 "MEMC ERROR : VCI internal data width must be 32 bits"); 476 477 assert( (vci_param_ext::B == 8) and 478 "MEMC ERROR : VCI external data width must be 64 bits"); 479 480 // Check coherence between internal & external addresses 481 assert( (vci_param_int::N == vci_param_ext::N) and 482 "MEMC ERROR : VCI internal & external addresses must have the same width"); 483 484 // Get the segments associated to the MemCache 485 std::list<soclib::common::Segment>::iterator seg; 486 size_t i = 0; 487 488 for(seg = m_seglist.begin(); seg != m_seglist.end() ; seg++) 390 // XMIN(5 bits) / XMAX(5 bits) / YMIN(5 bits) / YMAX(5 bits) 391 // 0b00000 / 0b11111 / 0b00000 / 0b11111 392 m_broadcast_boundaries(0x7C1F), 393 394 395 // FIFOs 396 m_cmd_read_addr_fifo("m_cmd_read_addr_fifo", 4), 397 m_cmd_read_length_fifo("m_cmd_read_length_fifo", 4), 398 m_cmd_read_srcid_fifo("m_cmd_read_srcid_fifo", 4), 399 m_cmd_read_trdid_fifo("m_cmd_read_trdid_fifo", 4), 400 m_cmd_read_pktid_fifo("m_cmd_read_pktid_fifo", 4), 401 402 m_cmd_write_addr_fifo("m_cmd_write_addr_fifo",8), 403 m_cmd_write_eop_fifo("m_cmd_write_eop_fifo",8), 404 m_cmd_write_srcid_fifo("m_cmd_write_srcid_fifo",8), 405 m_cmd_write_trdid_fifo("m_cmd_write_trdid_fifo",8), 406 m_cmd_write_pktid_fifo("m_cmd_write_pktid_fifo",8), 407 m_cmd_write_data_fifo("m_cmd_write_data_fifo",8), 408 m_cmd_write_be_fifo("m_cmd_write_be_fifo",8), 409 410 m_cmd_cas_addr_fifo("m_cmd_cas_addr_fifo",4), 411 m_cmd_cas_eop_fifo("m_cmd_cas_eop_fifo",4), 412 m_cmd_cas_srcid_fifo("m_cmd_cas_srcid_fifo",4), 413 m_cmd_cas_trdid_fifo("m_cmd_cas_trdid_fifo",4), 414 m_cmd_cas_pktid_fifo("m_cmd_cas_pktid_fifo",4), 415 m_cmd_cas_wdata_fifo("m_cmd_cas_wdata_fifo",4), 416 417 m_cc_receive_to_cleanup_fifo("m_cc_receive_to_cleanup_fifo", 4), 418 m_cc_receive_to_multi_ack_fifo("m_cc_receive_to_multi_ack_fifo", 4), 419 420 r_tgt_cmd_fsm("r_tgt_cmd_fsm"), 421 422 r_config_fsm( "r_config_fsm" ), 423 424 m_config_to_cc_send_inst_fifo( "m_config_to_cc_send_inst_fifo", 8 ), 425 m_config_to_cc_send_srcid_fifo( "m_config_to_cc_send_srcid_fifo", 8 ), 426 427 r_read_fsm( "r_read_fsm" ), 428 429 r_write_fsm( "r_write_fsm" ), 430 431 m_write_to_cc_send_inst_fifo("m_write_to_cc_send_inst_fifo",8), 432 m_write_to_cc_send_srcid_fifo("m_write_to_cc_send_srcid_fifo",8), 433 434 r_multi_ack_fsm("r_multi_ack_fsm"), 435 436 r_cleanup_fsm("r_cleanup_fsm"), 437 438 r_cas_fsm("r_cas_fsm"), 439 440 m_cas_to_cc_send_inst_fifo("m_cas_to_cc_send_inst_fifo",8), 441 m_cas_to_cc_send_srcid_fifo("m_cas_to_cc_send_srcid_fifo",8), 442 443 r_ixr_rsp_fsm("r_ixr_rsp_fsm"), 444 r_xram_rsp_fsm("r_xram_rsp_fsm"), 445 446 m_xram_rsp_to_cc_send_inst_fifo("m_xram_rsp_to_cc_send_inst_fifo",8), 447 m_xram_rsp_to_cc_send_srcid_fifo("m_xram_rsp_to_cc_send_srcid_fifo",8), 448 449 r_ixr_cmd_fsm("r_ixr_cmd_fsm"), 450 451 r_tgt_rsp_fsm("r_tgt_rsp_fsm"), 452 453 r_cc_send_fsm("r_cc_send_fsm"), 454 r_cc_receive_fsm("r_cc_receive_fsm"), 455 456 r_alloc_dir_fsm("r_alloc_dir_fsm"), 457 r_alloc_dir_reset_cpt("r_alloc_dir_reset_cpt"), 458 r_alloc_trt_fsm("r_alloc_trt_fsm"), 459 r_alloc_upt_fsm("r_alloc_upt_fsm"), 460 r_alloc_ivt_fsm("r_alloc_ivt_fsm"), 461 r_alloc_heap_fsm("r_alloc_heap_fsm"), 462 r_alloc_heap_reset_cpt("r_alloc_heap_reset_cpt") 463 { 464 std::cout << " - Building VciMemCache : " << name << std::endl; 465 466 assert(IS_POW_OF_2(nsets)); 467 assert(IS_POW_OF_2(nwords)); 468 assert(IS_POW_OF_2(nways)); 469 assert(nsets); 470 assert(nwords); 471 assert(nways); 472 473 // check Transaction table size 474 assert((uint32_log2(trt_lines) <= vci_param_ext::T) and 475 "MEMC ERROR : Need more bits for VCI TRDID field"); 476 477 // check internal and external data width 478 assert( (vci_param_int::B == 4 ) and 479 "MEMC ERROR : VCI internal data width must be 32 bits"); 480 481 assert( (vci_param_ext::B == 8) and 482 "MEMC ERROR : VCI external data width must be 64 bits"); 483 484 // Check coherence between internal & external addresses 485 assert( (vci_param_int::N == vci_param_ext::N) and 486 "MEMC ERROR : VCI internal & external addresses must have the same width"); 487 488 // Get the segments associated to the MemCache 489 std::list<soclib::common::Segment>::iterator seg; 490 size_t i = 0; 491 492 for (seg = m_seglist.begin(); seg != m_seglist.end(); seg++) 493 { 494 std::cout << " => segment " << seg->name() 495 << " / base = " << std::hex << seg->baseAddress() 496 << " / size = " << seg->size() << std::endl; 497 m_nseg++; 498 } 499 500 m_seg = new soclib::common::Segment*[m_nseg]; 501 502 for (seg = m_seglist.begin(); seg != m_seglist.end(); seg++) 503 { 504 if (seg->special() ) m_seg_config = i; 505 m_seg[i] = & (*seg); 506 i++; 507 } 508 509 // Allocation for IXR_RSP FSM 510 r_ixr_rsp_to_xram_rsp_rok = new sc_signal<bool>[m_trt_lines]; 511 512 // Allocation for XRAM_RSP FSM 513 r_xram_rsp_victim_data = new sc_signal<data_t>[nwords]; 514 r_xram_rsp_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 515 516 // Allocation for READ FSM 517 r_read_data = new sc_signal<data_t>[nwords]; 518 r_read_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 519 520 // Allocation for WRITE FSM 521 r_write_data = new sc_signal<data_t>[nwords]; 522 r_write_be = new sc_signal<be_t>[nwords]; 523 r_write_to_cc_send_data = new sc_signal<data_t>[nwords]; 524 r_write_to_cc_send_be = new sc_signal<be_t>[nwords]; 525 526 // Allocation for CAS FSM 527 r_cas_data = new sc_signal<data_t>[nwords]; 528 r_cas_rdata = new sc_signal<data_t>[2]; 529 530 // Allocation for IXR_CMD FSM 531 r_ixr_cmd_wdata = new sc_signal<data_t>[nwords]; 532 533 // Allocation for debug 534 m_debug_previous_data = new data_t[nwords]; 535 m_debug_data = new data_t[nwords]; 536 537 SC_METHOD(transition); 538 dont_initialize(); 539 sensitive << p_clk.pos(); 540 541 SC_METHOD(genMoore); 542 dont_initialize(); 543 sensitive << p_clk.neg(); 544 } // end constructor 545 546 547 ///////////////////////////////////////////////////// 548 tmpl(void) ::cache_monitor(addr_t addr) 549 ///////////////////////////////////////////////////// 489 550 { 490 std::cout << " => segment " << seg->name() 491 << " / base = " << std::hex << seg->baseAddress() 492 << " / size = " << seg->size() << std::endl; 493 m_nseg++; 494 } 495 496 m_seg = new soclib::common::Segment*[m_nseg]; 497 498 for(seg = m_seglist.begin() ; seg != m_seglist.end() ; seg++) 499 { 500 if ( seg->special() ) m_seg_config = i; 501 m_seg[i] = & (*seg); 502 i++; 503 } 504 505 // Allocation for IXR_RSP FSM 506 r_ixr_rsp_to_xram_rsp_rok = new sc_signal<bool>[m_trt_lines]; 507 508 // Allocation for XRAM_RSP FSM 509 r_xram_rsp_victim_data = new sc_signal<data_t>[nwords]; 510 r_xram_rsp_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 511 512 // Allocation for READ FSM 513 r_read_data = new sc_signal<data_t>[nwords]; 514 r_read_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 515 516 // Allocation for WRITE FSM 517 r_write_data = new sc_signal<data_t>[nwords]; 518 r_write_be = new sc_signal<be_t>[nwords]; 519 r_write_to_cc_send_data = new sc_signal<data_t>[nwords]; 520 r_write_to_cc_send_be = new sc_signal<be_t>[nwords]; 521 522 // Allocation for CAS FSM 523 r_cas_data = new sc_signal<data_t>[nwords]; 524 r_cas_rdata = new sc_signal<data_t>[2]; 525 526 // Allocation for IXR_CMD FSM 527 r_ixr_cmd_wdata = new sc_signal<data_t>[nwords]; 528 529 // Allocation for debug 530 m_debug_previous_data = new data_t[nwords]; 531 m_debug_data = new data_t[nwords]; 532 533 SC_METHOD(transition); 534 dont_initialize(); 535 sensitive << p_clk.pos(); 536 537 SC_METHOD(genMoore); 538 dont_initialize(); 539 sensitive << p_clk.neg(); 540 } // end constructor 541 542 543 ///////////////////////////////////////////////////// 544 tmpl(void) ::cache_monitor(addr_t addr) 545 ///////////////////////////////////////////////////// 546 { 547 size_t way = 0; 548 size_t set = 0; 549 DirectoryEntry entry = m_cache_directory.read_neutral(addr, &way, &set ); 550 551 // read data and compute data_change 552 bool data_change = false; 553 if ( entry.valid ) 554 { 555 for ( size_t word = 0 ; word<m_words ; word++ ) 551 size_t way = 0; 552 size_t set = 0; 553 DirectoryEntry entry = m_cache_directory.read_neutral(addr, &way, &set); 554 555 // read data and compute data_change 556 bool data_change = false; 557 if (entry.valid) 556 558 { 557 m_debug_data[word] = m_cache_data.read(way, set, word); 558 if ( m_debug_previous_valid and 559 (m_debug_data[word] != m_debug_previous_data[word]) ) 559 for (size_t word = 0; word<m_words; word++) 560 560 { 561 data_change = true; 561 m_debug_data[word] = m_cache_data.read(way, set, word); 562 if (m_debug_previous_valid and 563 (m_debug_data[word] != m_debug_previous_data[word])) 564 { 565 data_change = true; 566 } 562 567 } 563 568 } 569 570 // print values if any change 571 if ((entry.valid != m_debug_previous_valid) or 572 (entry.valid and (entry.count != m_debug_previous_count)) or 573 (entry.valid and (entry.dirty != m_debug_previous_dirty)) or data_change) 574 { 575 std::cout << "Monitor MEMC " << name() 576 << " at cycle " << std::dec << m_cpt_cycles 577 << " for address " << std::hex << addr 578 << " / VAL = " << std::dec << entry.valid 579 << " / WAY = " << way 580 << " / COUNT = " << entry.count 581 << " / DIRTY = " << entry.dirty 582 << " / DATA_CHANGE = " << data_change 583 << std::endl; 584 std::cout << std::hex << " /0:" << m_debug_data[0] 585 << "/1:" << m_debug_data[1] 586 << "/2:" << m_debug_data[2] 587 << "/3:" << m_debug_data[3] 588 << "/4:" << m_debug_data[4] 589 << "/5:" << m_debug_data[5] 590 << "/6:" << m_debug_data[6] 591 << "/7:" << m_debug_data[7] 592 << "/8:" << m_debug_data[8] 593 << "/9:" << m_debug_data[9] 594 << "/A:" << m_debug_data[10] 595 << "/B:" << m_debug_data[11] 596 << "/C:" << m_debug_data[12] 597 << "/D:" << m_debug_data[13] 598 << "/E:" << m_debug_data[14] 599 << "/F:" << m_debug_data[15] 600 << std::endl; 601 } 602 603 // register values 604 m_debug_previous_count = entry.count; 605 m_debug_previous_valid = entry.valid; 606 m_debug_previous_dirty = entry.dirty; 607 for (size_t word = 0; word < m_words; word++) 608 m_debug_previous_data[word] = m_debug_data[word]; 564 609 } 610 565 611 566 // print values if any change 567 if ( (entry.valid != m_debug_previous_valid) or 568 (entry.valid and (entry.count != m_debug_previous_count)) or 569 (entry.valid and (entry.dirty != m_debug_previous_dirty)) or data_change ) 612 ///////////////////////////////////////////////////// 613 tmpl(uint32_t)::req_distance(uint32_t req_srcid) 614 ///////////////////////////////////////////////////// 570 615 { 571 std::cout << "Monitor MEMC " << name() 572 << " at cycle " << std::dec << m_cpt_cycles 573 << " for address " << std::hex << addr 574 << " / VAL = " << std::dec << entry.valid 575 << " / WAY = " << way 576 << " / COUNT = " << entry.count 577 << " / DIRTY = " << entry.dirty 578 << " / DATA_CHANGE = " << data_change 579 << std::endl; 580 std::cout << std::hex << " /0:" << m_debug_data[0] 581 << "/1:" << m_debug_data[1] 582 << "/2:" << m_debug_data[2] 583 << "/3:" << m_debug_data[3] 584 << "/4:" << m_debug_data[4] 585 << "/5:" << m_debug_data[5] 586 << "/6:" << m_debug_data[6] 587 << "/7:" << m_debug_data[7] 588 << "/8:" << m_debug_data[8] 589 << "/9:" << m_debug_data[9] 590 << "/A:" << m_debug_data[10] 591 << "/B:" << m_debug_data[11] 592 << "/C:" << m_debug_data[12] 593 << "/D:" << m_debug_data[13] 594 << "/E:" << m_debug_data[14] 595 << "/F:" << m_debug_data[15] 596 << std::endl; 616 uint8_t self_x_srcid = m_cc_global_id >> (14 - m_xwidth); 617 uint8_t self_y_srcid = ((m_cc_global_id << m_xwidth) >> (14 - m_ywidth)); 618 619 uint8_t x_srcid = req_srcid >> (14 - m_xwidth); 620 uint8_t y_srcid = ((req_srcid << m_xwidth) >> (14 - m_ywidth)); 621 return abs(self_x_srcid - x_srcid) + abs(self_y_srcid - y_srcid); 597 622 } 598 623 599 // register values 600 m_debug_previous_count = entry.count; 601 m_debug_previous_valid = entry.valid; 602 m_debug_previous_dirty = entry.dirty; 603 for( size_t word=0 ; word<m_words ; word++ ) 604 m_debug_previous_data[word] = m_debug_data[word]; 605 } 606 607 ////////////////////////////////////////////////// 608 tmpl(void) ::print_trace() 609 ////////////////////////////////////////////////// 610 { 611 std::cout << "MEMC " << name() << std::endl; 612 std::cout << " " << tgt_cmd_fsm_str[r_tgt_cmd_fsm.read()] 624 625 ///////////////////////////////////////////////////// 626 tmpl(bool)::is_local_req(uint32_t req_srcid) 627 ///////////////////////////////////////////////////// 628 { 629 return req_distance(req_srcid) == 0; 630 } 631 632 633 ////////////////////////////////////////////////// 634 tmpl(void)::print_trace() 635 ////////////////////////////////////////////////// 636 { 637 std::cout << "MEMC " << name() << std::endl; 638 std::cout << " " << tgt_cmd_fsm_str[r_tgt_cmd_fsm.read()] 613 639 << " | " << tgt_rsp_fsm_str[r_tgt_rsp_fsm.read()] 614 640 << " | " << read_fsm_str[r_read_fsm.read()] … … 617 643 << " | " << config_fsm_str[r_config_fsm.read()] 618 644 << " | " << cleanup_fsm_str[r_cleanup_fsm.read()] << std::endl; 619 std::cout << " " << cc_send_fsm_str[r_cc_send_fsm.read()]645 std::cout << " " << cc_send_fsm_str[r_cc_send_fsm.read()] 620 646 << " | " << cc_receive_fsm_str[r_cc_receive_fsm.read()] 621 647 << " | " << multi_ack_fsm_str[r_multi_ack_fsm.read()] … … 623 649 << " | " << ixr_rsp_fsm_str[r_ixr_rsp_fsm.read()] 624 650 << " | " << xram_rsp_fsm_str[r_xram_rsp_fsm.read()] << std::endl; 625 std::cout << " " << alloc_dir_fsm_str[r_alloc_dir_fsm.read()]651 std::cout << " " << alloc_dir_fsm_str[r_alloc_dir_fsm.read()] 626 652 << " | " << alloc_trt_fsm_str[r_alloc_trt_fsm.read()] 627 653 << " | " << alloc_upt_fsm_str[r_alloc_upt_fsm.read()] 628 654 << " | " << alloc_ivt_fsm_str[r_alloc_ivt_fsm.read()] 629 655 << " | " << alloc_heap_fsm_str[r_alloc_heap_fsm.read()] << std::endl; 630 } 631 632 ///////////////////////////////////////// 633 tmpl(void) ::print_stats() 634 ///////////////////////////////////////// 635 { 636 std::cout << "----------------------------------" << std::dec << std::endl; 637 std::cout 638 << "MEM_CACHE " << name() << " / Time = " << m_cpt_cycles << std::endl 639 << "- READ RATE = " << (double) m_cpt_read/m_cpt_cycles << std::endl 640 << "- READ TOTAL = " << m_cpt_read << std::endl 641 << "- READ MISS RATE = " << (double) m_cpt_read_miss/m_cpt_read << std::endl 642 << "- WRITE RATE = " << (double) m_cpt_write/m_cpt_cycles << std::endl 643 << "- WRITE TOTAL = " << m_cpt_write << std::endl 644 << "- WRITE MISS RATE = " << (double) m_cpt_write_miss/m_cpt_write << std::endl 645 << "- WRITE BURST LENGTH = " << (double) m_cpt_write_cells/m_cpt_write << std::endl 646 << "- WRITE BURST TOTAL = " << m_cpt_write_cells << std::endl 647 << "- REQUESTS TRT FULL = " << m_cpt_trt_full << std::endl 648 << "- READ TRT BLOKED HIT = " << m_cpt_trt_rb << std::endl 649 << "- UPDATE RATE = " << (double) m_cpt_update/m_cpt_cycles << std::endl 650 << "- UPDATE ARITY = " << (double) m_cpt_update_mult/m_cpt_update << std::endl 651 << "- INVAL MULTICAST RATE = " << (double)(m_cpt_inval-m_cpt_inval_brdcast) /m_cpt_cycles << std::endl 652 << "- INVAL MULTICAST ARITY= " << (double) m_cpt_inval_mult/ (m_cpt_inval-m_cpt_inval_brdcast) << std::endl 653 << "- INVAL BROADCAST RATE = " << (double) m_cpt_inval_brdcast/m_cpt_cycles << std::endl 654 << "- SAVE DIRTY RATE = " << (double) m_cpt_write_dirty/m_cpt_cycles << std::endl 655 << "- CLEANUP RATE = " << (double) m_cpt_cleanup/m_cpt_cycles << std::endl 656 << "- LL RATE = " << (double) m_cpt_ll/m_cpt_cycles << std::endl 657 << "- SC RATE = " << (double) m_cpt_sc/m_cpt_cycles << std::endl 658 << "- CAS RATE = " << (double) m_cpt_cas/m_cpt_cycles << std::endl; 659 } 660 661 ///////////////////////////////// 662 tmpl(/**/) ::~VciMemCache() 663 ///////////////////////////////// 664 { 665 delete [] r_ixr_rsp_to_xram_rsp_rok; 666 667 delete [] r_xram_rsp_victim_data; 668 delete [] r_xram_rsp_to_tgt_rsp_data; 669 670 delete [] r_read_data; 671 delete [] r_read_to_tgt_rsp_data; 672 673 delete [] r_write_data; 674 delete [] r_write_be; 675 delete [] r_write_to_cc_send_data; 676 } 677 678 ////////////////////////////////// 679 tmpl(void) ::transition() 680 ////////////////////////////////// 681 { 682 using soclib::common::uint32_log2; 683 684 // RESET 685 if(! p_resetn.read()) 686 { 687 688 // Initializing FSMs 689 r_tgt_cmd_fsm = TGT_CMD_IDLE; 690 r_config_fsm = CONFIG_IDLE; 691 r_tgt_rsp_fsm = TGT_RSP_TGT_CMD_IDLE; 692 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 693 r_cc_receive_fsm = CC_RECEIVE_IDLE; 694 r_multi_ack_fsm = MULTI_ACK_IDLE; 695 r_read_fsm = READ_IDLE; 696 r_write_fsm = WRITE_IDLE; 697 r_cas_fsm = CAS_IDLE; 698 r_cleanup_fsm = CLEANUP_IDLE; 699 r_alloc_dir_fsm = ALLOC_DIR_RESET; 700 r_alloc_heap_fsm = ALLOC_HEAP_RESET; 701 r_alloc_trt_fsm = ALLOC_TRT_READ; 702 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 703 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 704 r_ixr_rsp_fsm = IXR_RSP_IDLE; 705 r_xram_rsp_fsm = XRAM_RSP_IDLE; 706 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 707 708 m_debug = false; 709 m_debug_previous_valid = false; 710 m_debug_previous_dirty = false; 711 m_debug_previous_count = 0; 712 713 // Initializing Tables 714 m_trt.init(); 715 m_upt.init(); 716 m_ivt.init(); 717 m_llsc_table.init(); 718 719 // initializing FIFOs and communication Buffers 720 721 m_cmd_read_addr_fifo.init(); 722 m_cmd_read_length_fifo.init(); 723 m_cmd_read_srcid_fifo.init(); 724 m_cmd_read_trdid_fifo.init(); 725 m_cmd_read_pktid_fifo.init(); 726 727 m_cmd_write_addr_fifo.init(); 728 m_cmd_write_eop_fifo.init(); 729 m_cmd_write_srcid_fifo.init(); 730 m_cmd_write_trdid_fifo.init(); 731 m_cmd_write_pktid_fifo.init(); 732 m_cmd_write_data_fifo.init(); 733 734 m_cmd_cas_addr_fifo.init() ; 735 m_cmd_cas_srcid_fifo.init() ; 736 m_cmd_cas_trdid_fifo.init() ; 737 m_cmd_cas_pktid_fifo.init() ; 738 m_cmd_cas_wdata_fifo.init() ; 739 m_cmd_cas_eop_fifo.init() ; 740 741 r_config_cmd = MEMC_CMD_NOP; 742 r_config_lock = false; 743 744 m_config_to_cc_send_inst_fifo.init(); 745 m_config_to_cc_send_srcid_fifo.init(); 746 747 r_tgt_cmd_to_tgt_rsp_req = false; 748 749 r_read_to_tgt_rsp_req = false; 750 r_read_to_ixr_cmd_req = false; 751 752 r_write_to_tgt_rsp_req = false; 753 r_write_to_ixr_cmd_req = false; 754 r_write_to_cc_send_multi_req = false; 755 r_write_to_cc_send_brdcast_req = false; 756 r_write_to_multi_ack_req = false; 757 758 m_write_to_cc_send_inst_fifo.init(); 759 m_write_to_cc_send_srcid_fifo.init(); 760 761 r_cleanup_to_tgt_rsp_req = false; 762 763 m_cc_receive_to_cleanup_fifo.init(); 764 765 r_multi_ack_to_tgt_rsp_req = false; 766 767 m_cc_receive_to_multi_ack_fifo.init(); 768 769 r_cas_to_tgt_rsp_req = false; 770 r_cas_cpt = 0 ; 771 r_cas_lfsr = -1 ; 772 r_cas_to_ixr_cmd_req = false; 773 r_cas_to_cc_send_multi_req = false; 774 r_cas_to_cc_send_brdcast_req = false; 775 776 m_cas_to_cc_send_inst_fifo.init(); 777 m_cas_to_cc_send_srcid_fifo.init(); 778 779 for(size_t i=0; i<m_trt_lines ; i++) 656 } 657 658 659 ///////////////////////////////////////// 660 tmpl(void)::print_stats(bool activity_counters = true, bool stats = true) 661 ///////////////////////////////////////// 780 662 { 781 r_ixr_rsp_to_xram_rsp_rok[i] = false; 782 } 783 784 r_xram_rsp_to_tgt_rsp_req = false; 785 r_xram_rsp_to_cc_send_multi_req = false; 786 r_xram_rsp_to_cc_send_brdcast_req = false; 787 r_xram_rsp_to_ixr_cmd_req = false; 788 r_xram_rsp_trt_index = 0; 789 790 m_xram_rsp_to_cc_send_inst_fifo.init(); 791 m_xram_rsp_to_cc_send_srcid_fifo.init(); 792 793 r_alloc_dir_reset_cpt = 0; 794 r_alloc_heap_reset_cpt = 0; 795 796 r_tgt_rsp_key_sent = false; 797 798 // Activity counters 799 m_cpt_cycles = 0; 800 m_cpt_read = 0; 801 m_cpt_read_miss = 0; 802 m_cpt_write = 0; 803 m_cpt_write_miss = 0; 804 m_cpt_write_cells = 0; 805 m_cpt_write_dirty = 0; 806 m_cpt_update = 0; 807 m_cpt_update_mult = 0; 808 m_cpt_inval_brdcast = 0; 809 m_cpt_inval = 0; 810 m_cpt_inval_mult = 0; 811 m_cpt_cleanup = 0; 812 m_cpt_ll = 0; 813 m_cpt_sc = 0; 814 m_cpt_cas = 0; 815 m_cpt_trt_full = 0; 816 m_cpt_trt_rb = 0; 817 818 return; 819 } 820 821 bool cmd_read_fifo_put = false; 822 bool cmd_read_fifo_get = false; 823 824 bool cmd_write_fifo_put = false; 825 bool cmd_write_fifo_get = false; 826 827 bool cmd_cas_fifo_put = false; 828 bool cmd_cas_fifo_get = false; 829 830 bool cc_receive_to_cleanup_fifo_get = false; 831 bool cc_receive_to_cleanup_fifo_put = false; 832 833 bool cc_receive_to_multi_ack_fifo_get = false; 834 bool cc_receive_to_multi_ack_fifo_put = false; 835 836 bool write_to_cc_send_fifo_put = false; 837 bool write_to_cc_send_fifo_get = false; 838 bool write_to_cc_send_fifo_inst = false; 839 size_t write_to_cc_send_fifo_srcid = 0; 840 841 bool xram_rsp_to_cc_send_fifo_put = false; 842 bool xram_rsp_to_cc_send_fifo_get = false; 843 bool xram_rsp_to_cc_send_fifo_inst = false; 844 size_t xram_rsp_to_cc_send_fifo_srcid = 0; 845 846 bool config_to_cc_send_fifo_put = false; 847 bool config_to_cc_send_fifo_get = false; 848 bool config_to_cc_send_fifo_inst = false; 849 size_t config_to_cc_send_fifo_srcid = 0; 850 851 bool cas_to_cc_send_fifo_put = false; 852 bool cas_to_cc_send_fifo_get = false; 853 bool cas_to_cc_send_fifo_inst = false; 854 size_t cas_to_cc_send_fifo_srcid = 0; 855 856 m_debug = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 857 858 #if DEBUG_MEMC_GLOBAL 859 if(m_debug) 860 { 861 std::cout 862 << "---------------------------------------------" << std::dec << std::endl 863 << "MEM_CACHE " << name() 864 << " ; Time = " << m_cpt_cycles << std::endl 865 << " - TGT_CMD FSM = " << tgt_cmd_fsm_str[r_tgt_cmd_fsm.read()] << std::endl 866 << " - TGT_RSP FSM = " << tgt_rsp_fsm_str[r_tgt_rsp_fsm.read()] << std::endl 867 << " - CC_SEND FSM = " << cc_send_fsm_str[r_cc_send_fsm.read()] << std::endl 868 << " - CC_RECEIVE FSM = " << cc_receive_fsm_str[r_cc_receive_fsm.read()] << std::endl 869 << " - MULTI_ACK FSM = " << multi_ack_fsm_str[r_multi_ack_fsm.read()] << std::endl 870 << " - READ FSM = " << read_fsm_str[r_read_fsm.read()] << std::endl 871 << " - WRITE FSM = " << write_fsm_str[r_write_fsm.read()] << std::endl 872 << " - CAS FSM = " << cas_fsm_str[r_cas_fsm.read()] << std::endl 873 << " - CLEANUP FSM = " << cleanup_fsm_str[r_cleanup_fsm.read()] << std::endl 874 << " - IXR_CMD FSM = " << ixr_cmd_fsm_str[r_ixr_cmd_fsm.read()] << std::endl 875 << " - IXR_RSP FSM = " << ixr_rsp_fsm_str[r_ixr_rsp_fsm.read()] << std::endl 876 << " - XRAM_RSP FSM = " << xram_rsp_fsm_str[r_xram_rsp_fsm.read()] << std::endl 877 << " - ALLOC_DIR FSM = " << alloc_dir_fsm_str[r_alloc_dir_fsm.read()] << std::endl 878 << " - ALLOC_TRT FSM = " << alloc_trt_fsm_str[r_alloc_trt_fsm.read()] << std::endl 879 << " - ALLOC_UPT FSM = " << alloc_upt_fsm_str[r_alloc_upt_fsm.read()] << std::endl 880 << " - ALLOC_HEAP FSM = " << alloc_heap_fsm_str[r_alloc_heap_fsm.read()] << std::endl; 881 } 882 #endif 883 884 //////////////////////////////////////////////////////////////////////////////////// 885 // TGT_CMD FSM 886 //////////////////////////////////////////////////////////////////////////////////// 887 // The TGT_CMD_FSM controls the incoming VCI command pakets from the processors, 888 // and dispatch these commands to the proper FSM through dedicated FIFOs. 889 // 890 // There are 5 types of commands accepted in the XRAM segment: 891 // - READ : A READ request has a length of 1 VCI flit. It can be a single word 892 // or an entire cache line, depending on the PLEN value => READ FSM 893 // - WRITE : A WRITE request has a maximum length of 16 flits, and can only 894 // concern words in a same line => WRITE FSM 895 // - CAS : A CAS request has a length of 2 flits or 4 flits => CAS FSM 896 // - LL : An LL request has a length of 1 flit => READ FSM 897 // - SC : An SC request has a length of 2 flits. First flit contains the 898 // acces key, second flit the data to write => WRITE FSM. 899 // 900 // The READ/WRITE commands accepted in the configuration segment are targeting 901 // configuration or status registers. They must contain one single flit. 902 // - For almost all addressable registers, the response is returned immediately. 903 // - For MEMC_CMD_TYPE, the response is delayed until the operation is completed. 904 //////////////////////////////////////////////////////////////////////////////////// 905 906 //std::cout << std::endl << "tgt_cmd_fsm" << std::endl; 907 908 switch(r_tgt_cmd_fsm.read()) 909 { 910 ////////////////// 911 case TGT_CMD_IDLE: // waiting a VCI command (RAM or CONFIG) 912 if(p_vci_tgt.cmdval) 913 { 914 915 #if DEBUG_MEMC_TGT_CMD 916 if(m_debug) 917 std::cout << " <MEMC " << name() 918 << " TGT_CMD_IDLE> Receive command from srcid " 919 << std::hex << p_vci_tgt.srcid.read() 920 << " / address " << std::hex << p_vci_tgt.address.read() << std::endl; 921 #endif 922 // checking segmentation violation 923 addr_t address = p_vci_tgt.address.read(); 924 uint32_t plen = p_vci_tgt.plen.read(); 925 bool found = false; 926 bool config = false; 927 928 for(size_t seg_id = 0 ; (seg_id < m_nseg) and not found ; seg_id++) 929 { 930 if( m_seg[seg_id]->contains(address) and 931 m_seg[seg_id]->contains(address + plen - vci_param_int::B) ) 932 { 933 found = true; 934 if ( m_seg[seg_id]->special() ) config = true; 935 } 663 std::cout << "**********************************" << std::dec << std::endl; 664 std::cout << "*** MEM_CACHE " << name() << std::endl; 665 std::cout << "**********************************" << std::dec << std::endl; 666 if (activity_counters) { 667 std::cout << " ----------------------------------" << std::dec << std::endl; 668 std::cout << " --- Activity Counters ---" << std::dec << std::endl; 669 std::cout << " ----------------------------------" << std::dec << std::endl; 670 std::cout 671 << " - NUMBER OF CYCLES = " << m_cpt_cycles << std::endl 672 << std::endl 673 << " - LOCAL READ = " << m_cpt_read_local << std::endl 674 << " - REMOTE READ = " << m_cpt_read_remote << std::endl 675 << " - READ COST (FLITS * DIST) = " << m_cpt_read_cost << std::endl 676 << std::endl 677 << " - LOCAL WRITE = " << m_cpt_write_local << std::endl 678 << " - REMOTE WRITE = " << m_cpt_write_remote << std::endl 679 << " - WRITE FLITS LOCAL = " << m_cpt_write_flits_local << std::endl 680 << " - WRITE FLITS REMOTE = " << m_cpt_write_flits_remote << std::endl 681 << " - WRITE COST (FLITS * DIST) = " << m_cpt_write_cost << std::endl 682 << std::endl 683 << " - LOCAL LL = " << m_cpt_ll_local << std::endl 684 << " - REMOTE LL = " << m_cpt_ll_remote << std::endl 685 << " - LL COST (FLITS * DIST) = " << m_cpt_ll_cost << std::endl 686 << std::endl 687 << " - LOCAL SC = " << m_cpt_sc_local << std::endl 688 << " - REMOTE SC = " << m_cpt_sc_remote << std::endl 689 << " - SC COST (FLITS * DIST) = " << m_cpt_sc_cost << std::endl 690 << std::endl 691 << " - LOCAL CAS = " << m_cpt_cas_local << std::endl 692 << " - REMOTE CAS = " << m_cpt_cas_remote << std::endl 693 << " - CAS COST (FLITS * DIST) = " << m_cpt_cas_cost << std::endl 694 << std::endl 695 << " - REQUESTS TRIG. UPDATE = " << m_cpt_update << std::endl 696 << " - LOCAL UPDATE = " << m_cpt_update_local << std::endl 697 << " - REMOTE UPDATE = " << m_cpt_update_remote << std::endl 698 << " - UPDT COST (FLITS * DIST) = " << m_cpt_update_cost << std::endl 699 << std::endl 700 << " - REQUESTS TRIG. M_INV = " << m_cpt_m_inval << std::endl 701 << " - LOCAL M_INV = " << m_cpt_m_inval_local << std::endl 702 << " - REMOTE M_INV = " << m_cpt_m_inval_remote << std::endl 703 << " - M_INV COST (FLITS * DIST) = " << m_cpt_m_inval_cost << std::endl 704 << std::endl 705 << " - BROADCAT INVAL = " << m_cpt_br_inval << std::endl 706 << std::endl 707 << " - LOCAL CLEANUP = " << m_cpt_cleanup_local << std::endl 708 << " - REMOTE CLEANUP = " << m_cpt_cleanup_remote << std::endl 709 << " - CLNUP COST (FLITS * DIST) = " << m_cpt_cleanup_cost << std::endl 710 << std::endl 711 << std::endl 712 << " - READ MISS = " << m_cpt_read_miss << std::endl 713 << " - WRITE MISS = " << m_cpt_write_miss << std::endl 714 << " - WRITE DIRTY = " << m_cpt_write_dirty << std::endl 715 << " - RD BLOCKED BY HIT IN TRT = " << m_cpt_trt_rb << std::endl 716 << " - TRANS BLOCKED BY FULL TRT = " << m_cpt_trt_full << std::endl 717 << " - PUT = " << m_cpt_put << std::endl 718 << " - GET = " << m_cpt_get << std::endl; 936 719 } 937 720 938 if ( not found ) /////////// out of segment error 939 { 940 r_tgt_cmd_fsm = TGT_CMD_ERROR; 941 } 942 else if ( config ) /////////// configuration command 943 { 944 if ( not p_vci_tgt.eop.read() ) r_tgt_cmd_fsm = TGT_CMD_ERROR; 945 else r_tgt_cmd_fsm = TGT_CMD_CONFIG; 946 } 947 else //////////// memory access 948 { 949 if ( p_vci_tgt.cmd.read() == vci_param_int::CMD_READ ) 950 { 951 // check that the pktid is either : 952 // TYPE_READ_DATA_UNC 953 // TYPE_READ_DATA_MISS 954 // TYPE_READ_INS_UNC 955 // TYPE_READ_INS_MISS 956 // ==> bit2 must be zero with the TSAR encoding 957 // ==> mask = 0b0100 = 0x4 958 assert( ((p_vci_tgt.pktid.read() & 0x4) == 0x0) and 959 "The type specified in the pktid field is incompatible with the READ CMD"); 960 r_tgt_cmd_fsm = TGT_CMD_READ; 961 } 962 else if(p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) 963 { 964 // check that the pktid is TYPE_WRITE 965 // ==> TYPE_WRITE = X100 with the TSAR encoding 966 // ==> mask = 0b0111 = 0x7 967 assert(((p_vci_tgt.pktid.read() & 0x7) == 0x4) and 968 "The type specified in the pktid field is incompatible with the WRITE CMD"); 969 r_tgt_cmd_fsm = TGT_CMD_WRITE; 970 } 971 else if(p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) 972 { 973 // check that the pktid is TYPE_LL 974 // ==> TYPE_LL = X110 with the TSAR encoding 975 // ==> mask = 0b0111 = 0x7 976 assert(((p_vci_tgt.pktid.read() & 0x7) == 0x6) and 977 "The type specified in the pktid field is incompatible with the LL CMD"); 978 r_tgt_cmd_fsm = TGT_CMD_READ; 979 } 980 else if(p_vci_tgt.cmd.read() == vci_param_int::CMD_NOP) 981 { 982 // check that the pktid is either : 983 // TYPE_CAS 984 // TYPE_SC 985 // ==> TYPE_CAS = X101 with the TSAR encoding 986 // ==> TYPE_SC = X111 with the TSAR encoding 987 // ==> mask = 0b0101 = 0x5 988 assert(((p_vci_tgt.pktid.read() & 0x5) == 0x5) and 989 "The type specified in the pktid field is incompatible with the NOP CMD"); 990 991 if((p_vci_tgt.pktid.read() & 0x7) == TYPE_CAS) r_tgt_cmd_fsm = TGT_CMD_CAS; 992 else r_tgt_cmd_fsm = TGT_CMD_WRITE; 993 } 994 else 995 { 996 r_tgt_cmd_fsm = TGT_CMD_ERROR; 997 } 721 if (stats) { 722 std::cout << " ----------------------------------" << std::dec << std::endl; 723 std::cout << " --- Calculated Stats ---" << std::dec << std::endl; 724 std::cout << " ----------------------------------" << std::dec << std::endl; 725 std::cout 726 << " - READ TOTAL = " << m_cpt_read_local + m_cpt_read_remote << std::endl 727 << " - READ RATE = " << (double) (m_cpt_read_local + m_cpt_read_remote) / m_cpt_cycles << std::endl 728 << " - LOCAL READ RATE = " << (double) m_cpt_read_local / m_cpt_cycles << std::endl 729 << " - REMOTE READ RATE = " << (double) m_cpt_read_remote / m_cpt_cycles << std::endl 730 << " - READ MISS RATE = " << (double) m_cpt_read_miss / (m_cpt_read_local + m_cpt_read_remote) << std::endl 731 << std::endl 732 << " - WRITE TOTAL = " << m_cpt_write_local + m_cpt_write_remote << std::endl 733 << " - WRITE RATE = " << (double) (m_cpt_write_local + m_cpt_write_remote) / m_cpt_cycles << std::endl 734 << " - LOCAL WRITE RATE = " << (double) m_cpt_write_local / m_cpt_cycles << std::endl 735 << " - REMOTE WRITE RATE = " << (double) m_cpt_write_remote / m_cpt_cycles << std::endl 736 << " - WRITE MISS RATE = " << (double) m_cpt_write_miss / (m_cpt_write_local + m_cpt_write_remote) << std::endl 737 << " - WRITE BURST TOTAL = " << m_cpt_write_flits_local + m_cpt_write_flits_remote << std::endl 738 << " - WRITE BURST AVERAGE = " << (double) (m_cpt_write_flits_local + m_cpt_write_flits_remote) / (m_cpt_write_local + m_cpt_write_remote) << std::endl 739 << " - LOCAL WRITE BURST AV. = " << (double) m_cpt_write_flits_local / (m_cpt_write_local + m_cpt_write_remote) << std::endl 740 << " - REMOTE WRITE BURST AV = " << (double) m_cpt_write_flits_remote / (m_cpt_write_local + m_cpt_write_remote) << std::endl 741 << std::endl 742 << " - UPDATE RATE = " << (double) m_cpt_update / m_cpt_cycles << std::endl 743 << " - AV. UPDATE PER UP REQ = " << (double) (m_cpt_update_local + m_cpt_update_remote) / m_cpt_update << std::endl 744 << " - AV. LOC UPDT PER UP REQ = " << (double) m_cpt_update_local / m_cpt_update << std::endl 745 << " - AV. REMOTE UPDT PER UP REQ = " << (double) m_cpt_update_remote / m_cpt_update << std::endl 746 << std::endl 747 << " - INVAL MULTICAST RATE = " << (double) m_cpt_m_inval / m_cpt_cycles << std::endl 748 << " - AVE. INVAL PER M_INV = " << (double) (m_cpt_m_inval_local + m_cpt_m_inval_remote) / m_cpt_m_inval << std::endl 749 << " - AV. LOC INV PER M_INV = " << (double) m_cpt_m_inval_local / m_cpt_m_inval << std::endl 750 << " - AV. REM INV PER M_INV = " << (double) m_cpt_m_inval_remote / m_cpt_m_inval << std::endl 751 << std::endl 752 << " - INVAL BROADCAST RATE = " << (double) m_cpt_br_inval / m_cpt_cycles << std::endl 753 << " - WRITE DIRTY RATE = " << (double) m_cpt_write_dirty / m_cpt_cycles << std::endl 754 << std::endl 755 << " - CLEANUP RATE = " << (double) (m_cpt_cleanup_local + m_cpt_cleanup_remote) / m_cpt_cycles << std::endl 756 << " - LOCAL CLEANUP RATE = " << (double) m_cpt_cleanup_local / m_cpt_cycles << std::endl 757 << " - REMOTE CLEANUP RATE = " << (double) m_cpt_cleanup_remote / m_cpt_cycles << std::endl 758 << " - LL RATE = " << (double) (m_cpt_ll_local + m_cpt_ll_remote) / m_cpt_cycles << std::endl 759 << " - LOCAL LL RATE = " << (double) m_cpt_ll_local / m_cpt_cycles << std::endl 760 << " - REMOTE LL RATE = " << (double) m_cpt_ll_remote / m_cpt_cycles << std::endl 761 << " - SC RATE = " << (double) (m_cpt_sc_local + m_cpt_sc_remote) / m_cpt_cycles << std::endl 762 << " - LOCAL SC RATE = " << (double) m_cpt_sc_local / m_cpt_cycles << std::endl 763 << " - REMOTE SC RATE = " << (double) m_cpt_sc_remote / m_cpt_cycles << std::endl 764 << " - CAS RATE = " << (double) (m_cpt_cas_local + m_cpt_cas_remote) / m_cpt_cycles << std::endl 765 << " - LOCAL CAS RATE = " << (double) m_cpt_cas_local / m_cpt_cycles << std::endl 766 << " - REMOTE CAS RATE = " << (double) m_cpt_cas_remote / m_cpt_cycles << std::endl; 998 767 } 999 768 } 1000 break; 1001 1002 /////////////////// 1003 case TGT_CMD_ERROR: // response error must be sent 1004 1005 // wait if pending request 1006 if(r_tgt_cmd_to_tgt_rsp_req.read()) break; 1007 1008 // consume all the command packet flits before sending response error 1009 if ( p_vci_tgt.cmdval and p_vci_tgt.eop ) 769 770 771 ///////////////////////////////// 772 tmpl(/**/)::~VciMemCache() 773 ///////////////////////////////// 1010 774 { 1011 r_tgt_cmd_to_tgt_rsp_srcid = p_vci_tgt.srcid.read(); 1012 r_tgt_cmd_to_tgt_rsp_trdid = p_vci_tgt.trdid.read(); 1013 r_tgt_cmd_to_tgt_rsp_pktid = p_vci_tgt.pktid.read(); 1014 r_tgt_cmd_to_tgt_rsp_req = true; 1015 r_tgt_cmd_to_tgt_rsp_error = 1; 1016 r_tgt_cmd_fsm = TGT_CMD_IDLE; 775 delete [] r_ixr_rsp_to_xram_rsp_rok; 776 777 delete [] r_xram_rsp_victim_data; 778 delete [] r_xram_rsp_to_tgt_rsp_data; 779 780 delete [] r_read_data; 781 delete [] r_read_to_tgt_rsp_data; 782 783 delete [] r_write_data; 784 delete [] r_write_be; 785 delete [] r_write_to_cc_send_data; 786 print_stats(); 787 } 788 789 ////////////////////////////////// 790 tmpl(void) ::transition() 791 ////////////////////////////////// 792 { 793 using soclib::common::uint32_log2; 794 795 // RESET 796 if (! p_resetn.read()) 797 { 798 799 // Initializing FSMs 800 r_tgt_cmd_fsm = TGT_CMD_IDLE; 801 r_config_fsm = CONFIG_IDLE; 802 r_tgt_rsp_fsm = TGT_RSP_TGT_CMD_IDLE; 803 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 804 r_cc_receive_fsm = CC_RECEIVE_IDLE; 805 r_multi_ack_fsm = MULTI_ACK_IDLE; 806 r_read_fsm = READ_IDLE; 807 r_write_fsm = WRITE_IDLE; 808 r_cas_fsm = CAS_IDLE; 809 r_cleanup_fsm = CLEANUP_IDLE; 810 r_alloc_dir_fsm = ALLOC_DIR_RESET; 811 r_alloc_heap_fsm = ALLOC_HEAP_RESET; 812 r_alloc_trt_fsm = ALLOC_TRT_READ; 813 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 814 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 815 r_ixr_rsp_fsm = IXR_RSP_IDLE; 816 r_xram_rsp_fsm = XRAM_RSP_IDLE; 817 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 818 819 m_debug = false; 820 m_debug_previous_valid = false; 821 m_debug_previous_dirty = false; 822 m_debug_previous_count = 0; 823 824 // Initializing Tables 825 m_trt.init(); 826 m_upt.init(); 827 m_ivt.init(); 828 m_llsc_table.init(); 829 830 // initializing FIFOs and communication Buffers 831 832 m_cmd_read_addr_fifo.init(); 833 m_cmd_read_length_fifo.init(); 834 m_cmd_read_srcid_fifo.init(); 835 m_cmd_read_trdid_fifo.init(); 836 m_cmd_read_pktid_fifo.init(); 837 838 m_cmd_write_addr_fifo.init(); 839 m_cmd_write_eop_fifo.init(); 840 m_cmd_write_srcid_fifo.init(); 841 m_cmd_write_trdid_fifo.init(); 842 m_cmd_write_pktid_fifo.init(); 843 m_cmd_write_data_fifo.init(); 844 845 m_cmd_cas_addr_fifo.init() ; 846 m_cmd_cas_srcid_fifo.init() ; 847 m_cmd_cas_trdid_fifo.init() ; 848 m_cmd_cas_pktid_fifo.init() ; 849 m_cmd_cas_wdata_fifo.init() ; 850 m_cmd_cas_eop_fifo.init() ; 851 852 r_config_cmd = MEMC_CMD_NOP; 853 r_config_lock = false; 854 855 m_config_to_cc_send_inst_fifo.init(); 856 m_config_to_cc_send_srcid_fifo.init(); 857 858 r_tgt_cmd_to_tgt_rsp_req = false; 859 860 r_read_to_tgt_rsp_req = false; 861 r_read_to_ixr_cmd_req = false; 862 863 r_write_to_tgt_rsp_req = false; 864 r_write_to_ixr_cmd_req = false; 865 r_write_to_cc_send_multi_req = false; 866 r_write_to_cc_send_brdcast_req = false; 867 r_write_to_multi_ack_req = false; 868 869 m_write_to_cc_send_inst_fifo.init(); 870 m_write_to_cc_send_srcid_fifo.init(); 871 872 r_cleanup_to_tgt_rsp_req = false; 873 874 m_cc_receive_to_cleanup_fifo.init(); 875 876 r_multi_ack_to_tgt_rsp_req = false; 877 878 m_cc_receive_to_multi_ack_fifo.init(); 879 880 r_cas_to_tgt_rsp_req = false; 881 r_cas_cpt = 0 ; 882 r_cas_lfsr = -1 ; 883 r_cas_to_ixr_cmd_req = false; 884 r_cas_to_cc_send_multi_req = false; 885 r_cas_to_cc_send_brdcast_req = false; 886 887 m_cas_to_cc_send_inst_fifo.init(); 888 m_cas_to_cc_send_srcid_fifo.init(); 889 890 for (size_t i = 0; i < m_trt_lines ; i++) 891 { 892 r_ixr_rsp_to_xram_rsp_rok[i] = false; 893 } 894 895 r_xram_rsp_to_tgt_rsp_req = false; 896 r_xram_rsp_to_cc_send_multi_req = false; 897 r_xram_rsp_to_cc_send_brdcast_req = false; 898 r_xram_rsp_to_ixr_cmd_req = false; 899 r_xram_rsp_trt_index = 0; 900 901 m_xram_rsp_to_cc_send_inst_fifo.init(); 902 m_xram_rsp_to_cc_send_srcid_fifo.init(); 903 904 r_alloc_dir_reset_cpt = 0; 905 r_alloc_heap_reset_cpt = 0; 906 907 r_tgt_rsp_key_sent = false; 908 909 // Activity counters 910 m_cpt_cycles = 0; 911 m_cpt_read_local = 0; 912 m_cpt_read_remote = 0; 913 m_cpt_read_cost = 0; 914 m_cpt_write_local = 0; 915 m_cpt_write_remote = 0; 916 m_cpt_write_flits_local = 0; 917 m_cpt_write_flits_remote = 0; 918 m_cpt_write_cost = 0; 919 m_cpt_ll_local = 0; 920 m_cpt_ll_remote = 0; 921 m_cpt_ll_cost = 0; 922 m_cpt_sc_local = 0; 923 m_cpt_sc_remote = 0; 924 m_cpt_sc_cost = 0; 925 m_cpt_cas_local = 0; 926 m_cpt_cas_remote = 0; 927 m_cpt_cas_cost = 0; 928 m_cpt_update = 0; 929 m_cpt_update_local = 0; 930 m_cpt_update_remote = 0; 931 m_cpt_update_cost = 0; 932 m_cpt_m_inval = 0; 933 m_cpt_m_inval_local = 0; 934 m_cpt_m_inval_remote = 0; 935 m_cpt_m_inval_cost = 0; 936 m_cpt_br_inval = 0; 937 m_cpt_cleanup_local = 0; 938 m_cpt_cleanup_remote = 0; 939 m_cpt_cleanup_cost = 0; 940 941 m_cpt_read_miss = 0; 942 m_cpt_write_miss = 0; 943 m_cpt_write_dirty = 0; 944 m_cpt_trt_rb = 0; 945 m_cpt_trt_full = 0; 946 m_cpt_get = 0; 947 m_cpt_put = 0; 948 949 return; 950 } 951 952 bool cmd_read_fifo_put = false; 953 bool cmd_read_fifo_get = false; 954 955 bool cmd_write_fifo_put = false; 956 bool cmd_write_fifo_get = false; 957 958 bool cmd_cas_fifo_put = false; 959 bool cmd_cas_fifo_get = false; 960 961 bool cc_receive_to_cleanup_fifo_get = false; 962 bool cc_receive_to_cleanup_fifo_put = false; 963 964 bool cc_receive_to_multi_ack_fifo_get = false; 965 bool cc_receive_to_multi_ack_fifo_put = false; 966 967 bool write_to_cc_send_fifo_put = false; 968 bool write_to_cc_send_fifo_get = false; 969 bool write_to_cc_send_fifo_inst = false; 970 size_t write_to_cc_send_fifo_srcid = 0; 971 972 bool xram_rsp_to_cc_send_fifo_put = false; 973 bool xram_rsp_to_cc_send_fifo_get = false; 974 bool xram_rsp_to_cc_send_fifo_inst = false; 975 size_t xram_rsp_to_cc_send_fifo_srcid = 0; 976 977 bool config_to_cc_send_fifo_put = false; 978 bool config_to_cc_send_fifo_get = false; 979 bool config_to_cc_send_fifo_inst = false; 980 size_t config_to_cc_send_fifo_srcid = 0; 981 982 bool cas_to_cc_send_fifo_put = false; 983 bool cas_to_cc_send_fifo_get = false; 984 bool cas_to_cc_send_fifo_inst = false; 985 size_t cas_to_cc_send_fifo_srcid = 0; 986 987 m_debug = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 988 989 #if DEBUG_MEMC_GLOBAL 990 if (m_debug) 991 { 992 std::cout 993 << "---------------------------------------------" << std::dec << std::endl 994 << "MEM_CACHE " << name() 995 << " ; Time = " << m_cpt_cycles << std::endl 996 << " - TGT_CMD FSM = " << tgt_cmd_fsm_str[r_tgt_cmd_fsm.read()] << std::endl 997 << " - TGT_RSP FSM = " << tgt_rsp_fsm_str[r_tgt_rsp_fsm.read()] << std::endl 998 << " - CC_SEND FSM = " << cc_send_fsm_str[r_cc_send_fsm.read()] << std::endl 999 << " - CC_RECEIVE FSM = " << cc_receive_fsm_str[r_cc_receive_fsm.read()] << std::endl 1000 << " - MULTI_ACK FSM = " << multi_ack_fsm_str[r_multi_ack_fsm.read()] << std::endl 1001 << " - READ FSM = " << read_fsm_str[r_read_fsm.read()] << std::endl 1002 << " - WRITE FSM = " << write_fsm_str[r_write_fsm.read()] << std::endl 1003 << " - CAS FSM = " << cas_fsm_str[r_cas_fsm.read()] << std::endl 1004 << " - CLEANUP FSM = " << cleanup_fsm_str[r_cleanup_fsm.read()] << std::endl 1005 << " - IXR_CMD FSM = " << ixr_cmd_fsm_str[r_ixr_cmd_fsm.read()] << std::endl 1006 << " - IXR_RSP FSM = " << ixr_rsp_fsm_str[r_ixr_rsp_fsm.read()] << std::endl 1007 << " - XRAM_RSP FSM = " << xram_rsp_fsm_str[r_xram_rsp_fsm.read()] << std::endl 1008 << " - ALLOC_DIR FSM = " << alloc_dir_fsm_str[r_alloc_dir_fsm.read()] << std::endl 1009 << " - ALLOC_TRT FSM = " << alloc_trt_fsm_str[r_alloc_trt_fsm.read()] << std::endl 1010 << " - ALLOC_UPT FSM = " << alloc_upt_fsm_str[r_alloc_upt_fsm.read()] << std::endl 1011 << " - ALLOC_HEAP FSM = " << alloc_heap_fsm_str[r_alloc_heap_fsm.read()] << std::endl; 1012 } 1013 #endif 1014 1015 //////////////////////////////////////////////////////////////////////////////////// 1016 // TGT_CMD FSM 1017 //////////////////////////////////////////////////////////////////////////////////// 1018 // The TGT_CMD_FSM controls the incoming VCI command pakets from the processors, 1019 // and dispatch these commands to the proper FSM through dedicated FIFOs. 1020 // 1021 // There are 5 types of commands accepted in the XRAM segment: 1022 // - READ : A READ request has a length of 1 VCI flit. It can be a single word 1023 // or an entire cache line, depending on the PLEN value => READ FSM 1024 // - WRITE : A WRITE request has a maximum length of 16 flits, and can only 1025 // concern words in a same line => WRITE FSM 1026 // - CAS : A CAS request has a length of 2 flits or 4 flits => CAS FSM 1027 // - LL : An LL request has a length of 1 flit => READ FSM 1028 // - SC : An SC request has a length of 2 flits. First flit contains the 1029 // acces key, second flit the data to write => WRITE FSM. 1030 // 1031 // The READ/WRITE commands accepted in the configuration segment are targeting 1032 // configuration or status registers. They must contain one single flit. 1033 // - For almost all addressable registers, the response is returned immediately. 1034 // - For MEMC_CMD_TYPE, the response is delayed until the operation is completed. 1035 //////////////////////////////////////////////////////////////////////////////////// 1036 1037 1038 switch (r_tgt_cmd_fsm.read()) 1039 { 1040 ////////////////// 1041 case TGT_CMD_IDLE: // waiting a VCI command (RAM or CONFIG) 1042 if (p_vci_tgt.cmdval) 1043 { 1017 1044 1018 1045 #if DEBUG_MEMC_TGT_CMD 1019 if(m_debug) 1020 std::cout << " <MEMC " << name() 1021 << " TGT_CMD_ERROR> Segmentation violation:" 1022 << " address = " << std::hex << p_vci_tgt.address.read() 1023 << " / srcid = " << p_vci_tgt.srcid.read() 1024 << " / trdid = " << p_vci_tgt.trdid.read() 1025 << " / pktid = " << p_vci_tgt.pktid.read() 1026 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1027 #endif 1028 1029 } 1030 break; 1031 1032 //////////////////// 1033 case TGT_CMD_CONFIG: // execute config request and return response 1034 { 1035 addr_t seg_base = m_seg[m_seg_config]->baseAddress(); 1036 addr_t address = p_vci_tgt.address.read(); 1037 size_t cell = (address - seg_base)/vci_param_int::B; 1038 1039 bool need_rsp; 1040 size_t error; 1041 uint32_t rdata = 0; // default value 1042 uint32_t wdata = p_vci_tgt.wdata.read(); 1043 1044 if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_READ) // get lock 1045 and (cell == MEMC_LOCK) ) 1046 if (m_debug) 1047 std::cout << " <MEMC " << name() 1048 << " TGT_CMD_IDLE> Receive command from srcid " 1049 << std::hex << p_vci_tgt.srcid.read() 1050 << " / address " << std::hex << p_vci_tgt.address.read() << std::endl; 1051 #endif 1052 // checking segmentation violation 1053 addr_t address = p_vci_tgt.address.read(); 1054 uint32_t plen = p_vci_tgt.plen.read(); 1055 bool found = false; 1056 bool config = false; 1057 1058 for (size_t seg_id = 0; (seg_id < m_nseg) && !found; seg_id++) 1059 { 1060 if (m_seg[seg_id]->contains(address) && 1061 m_seg[seg_id]->contains(address + plen - vci_param_int::B)) 1062 { 1063 found = true; 1064 if (m_seg[seg_id]->special()) config = true; 1065 } 1066 } 1067 1068 if (!found) /////////// out of segment error 1069 { 1070 r_tgt_cmd_fsm = TGT_CMD_ERROR; 1071 } 1072 else if (config) /////////// configuration command 1073 { 1074 if (!p_vci_tgt.eop.read()) r_tgt_cmd_fsm = TGT_CMD_ERROR; 1075 else r_tgt_cmd_fsm = TGT_CMD_CONFIG; 1076 } 1077 else //////////// memory access 1078 { 1079 if (p_vci_tgt.cmd.read() == vci_param_int::CMD_READ) 1080 { 1081 // check that the pktid is either : 1082 // TYPE_READ_DATA_UNC 1083 // TYPE_READ_DATA_MISS 1084 // TYPE_READ_INS_UNC 1085 // TYPE_READ_INS_MISS 1086 // ==> bit2 must be zero with the TSAR encoding 1087 // ==> mask = 0b0100 = 0x4 1088 assert(((p_vci_tgt.pktid.read() & 0x4) == 0x0) and 1089 "The type specified in the pktid field is incompatible with the READ CMD"); 1090 r_tgt_cmd_fsm = TGT_CMD_READ; 1091 } 1092 else if (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) 1093 { 1094 // check that the pktid is TYPE_WRITE 1095 // ==> TYPE_WRITE = X100 with the TSAR encoding 1096 // ==> mask = 0b0111 = 0x7 1097 assert(((p_vci_tgt.pktid.read() & 0x7) == 0x4) and 1098 "The type specified in the pktid field is incompatible with the WRITE CMD"); 1099 r_tgt_cmd_fsm = TGT_CMD_WRITE; 1100 } 1101 else if (p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) 1102 { 1103 // check that the pktid is TYPE_LL 1104 // ==> TYPE_LL = X110 with the TSAR encoding 1105 // ==> mask = 0b0111 = 0x7 1106 assert(((p_vci_tgt.pktid.read() & 0x7) == 0x6) and 1107 "The type specified in the pktid field is incompatible with the LL CMD"); 1108 r_tgt_cmd_fsm = TGT_CMD_READ; 1109 } 1110 else if (p_vci_tgt.cmd.read() == vci_param_int::CMD_NOP) 1111 { 1112 // check that the pktid is either : 1113 // TYPE_CAS 1114 // TYPE_SC 1115 // ==> TYPE_CAS = X101 with the TSAR encoding 1116 // ==> TYPE_SC = X111 with the TSAR encoding 1117 // ==> mask = 0b0101 = 0x5 1118 assert(((p_vci_tgt.pktid.read() & 0x5) == 0x5) and 1119 "The type specified in the pktid field is incompatible with the NOP CMD"); 1120 1121 if ((p_vci_tgt.pktid.read() & 0x7) == TYPE_CAS) r_tgt_cmd_fsm = TGT_CMD_CAS; 1122 else r_tgt_cmd_fsm = TGT_CMD_WRITE; 1123 } 1124 else 1125 { 1126 r_tgt_cmd_fsm = TGT_CMD_ERROR; 1127 } 1128 } 1129 } 1130 break; 1131 1132 /////////////////// 1133 case TGT_CMD_ERROR: // response error must be sent 1134 1135 // wait if pending request 1136 if (r_tgt_cmd_to_tgt_rsp_req.read()) break; 1137 1138 // consume all the command packet flits before sending response error 1139 if (p_vci_tgt.cmdval and p_vci_tgt.eop) 1140 { 1141 r_tgt_cmd_to_tgt_rsp_srcid = p_vci_tgt.srcid.read(); 1142 r_tgt_cmd_to_tgt_rsp_trdid = p_vci_tgt.trdid.read(); 1143 r_tgt_cmd_to_tgt_rsp_pktid = p_vci_tgt.pktid.read(); 1144 r_tgt_cmd_to_tgt_rsp_req = true; 1145 r_tgt_cmd_to_tgt_rsp_error = 1; 1146 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1147 1148 #if DEBUG_MEMC_TGT_CMD 1149 if (m_debug) 1150 std::cout << " <MEMC " << name() 1151 << " TGT_CMD_ERROR> Segmentation violation:" 1152 << " address = " << std::hex << p_vci_tgt.address.read() 1153 << " / srcid = " << p_vci_tgt.srcid.read() 1154 << " / trdid = " << p_vci_tgt.trdid.read() 1155 << " / pktid = " << p_vci_tgt.pktid.read() 1156 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1157 #endif 1158 1159 } 1160 break; 1161 1162 //////////////////// 1163 case TGT_CMD_CONFIG: // execute config request and return response 1164 { 1165 addr_t seg_base = m_seg[m_seg_config]->baseAddress(); 1166 addr_t address = p_vci_tgt.address.read(); 1167 size_t cell = (address - seg_base) / vci_param_int::B; 1168 1169 bool need_rsp; 1170 size_t error; 1171 uint32_t rdata = 0; // default value 1172 uint32_t wdata = p_vci_tgt.wdata.read(); 1173 1174 if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_READ) // get lock 1175 and (cell == MEMC_LOCK)) 1176 { 1177 rdata = (uint32_t) r_config_lock.read(); 1178 need_rsp = true; 1179 error = 0; 1180 r_config_lock = true; 1181 if (rdata == 0) 1182 { 1183 r_tgt_cmd_srcid = p_vci_tgt.srcid.read(); 1184 r_tgt_cmd_trdid = p_vci_tgt.trdid.read(); 1185 r_tgt_cmd_pktid = p_vci_tgt.pktid.read(); 1186 } 1187 } 1188 else if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // release lock 1189 and (cell == MEMC_LOCK)) 1190 { 1191 need_rsp = true; 1192 error = 0; 1193 r_config_lock = false; 1194 } 1195 else if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_lo 1196 and (cell == MEMC_ADDR_LO)) 1197 { 1198 assert( ((wdata % (m_words * vci_param_int::B)) == 0) and 1199 "VCI_MEM_CACHE CONFIG ERROR: The buffer must be aligned on a cache line"); 1200 1201 need_rsp = true; 1202 error = 0; 1203 r_config_address = (r_config_address.read() & 0xFFFFFFFF00000000LL) | 1204 ((addr_t)wdata); 1205 } 1206 else if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_hi 1207 and (cell == MEMC_ADDR_HI)) 1208 1209 { 1210 need_rsp = true; 1211 error = 0; 1212 r_config_address = (r_config_address.read() & 0x00000000FFFFFFFFLL) | 1213 (((addr_t) wdata) << 32); 1214 } 1215 else if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set buf_lines 1216 and (cell == MEMC_BUF_LENGTH)) 1217 { 1218 need_rsp = true; 1219 error = 0; 1220 size_t lines = wdata / (m_words << 2); 1221 if (wdata % (m_words << 2)) lines++; 1222 r_config_cmd_lines = lines; 1223 r_config_rsp_lines = lines; 1224 } 1225 else if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set cmd type 1226 and (cell == MEMC_CMD_TYPE)) 1227 { 1228 need_rsp = false; 1229 error = 0; 1230 r_config_cmd = wdata; 1231 1232 // prepare delayed response from CONFIG FSM 1233 r_config_srcid = p_vci_tgt.srcid.read(); 1234 r_config_trdid = p_vci_tgt.trdid.read(); 1235 r_config_pktid = p_vci_tgt.pktid.read(); 1236 } 1237 else 1238 { 1239 need_rsp = true; 1240 error = 1; 1241 } 1242 1243 if (need_rsp) 1244 { 1245 // blocked if previous pending request to TGT_RSP FSM 1246 if (r_tgt_cmd_to_tgt_rsp_req.read()) break; 1247 1248 r_tgt_cmd_to_tgt_rsp_srcid = p_vci_tgt.srcid.read(); 1249 r_tgt_cmd_to_tgt_rsp_trdid = p_vci_tgt.trdid.read(); 1250 r_tgt_cmd_to_tgt_rsp_pktid = p_vci_tgt.pktid.read(); 1251 r_tgt_cmd_to_tgt_rsp_req = true; 1252 r_tgt_cmd_to_tgt_rsp_error = error; 1253 r_tgt_cmd_to_tgt_rsp_rdata = rdata; 1254 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1255 } 1256 else 1257 { 1258 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1259 } 1260 1261 #if DEBUG_MEMC_TGT_CMD 1262 if (m_debug) 1263 std::cout << " <MEMC " << name() << " TGT_CMD_CONFIG> Configuration request:" 1264 << " address = " << std::hex << p_vci_tgt.address.read() 1265 << " / wdata = " << p_vci_tgt.wdata.read() 1266 << " / need_rsp = " << need_rsp 1267 << " / error = " << error << std::endl; 1268 #endif 1269 break; 1270 } 1271 ////////////////// 1272 case TGT_CMD_READ: // Push a read request into read fifo 1273 1274 // check that the read does not cross a cache line limit. 1275 if (((m_x[(addr_t) p_vci_tgt.address.read()] + (p_vci_tgt.plen.read() >> 2)) > 16) and 1276 (p_vci_tgt.cmd.read() != vci_param_int::CMD_LOCKED_READ)) 1277 { 1278 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 1279 << " illegal address/plen for VCI read command" << std::endl; 1280 exit(0); 1281 } 1282 // check single flit 1283 if (!p_vci_tgt.eop.read()) 1284 { 1285 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 1286 << " read command packet must contain one single flit" << std::endl; 1287 exit(0); 1288 } 1289 // check plen for LL 1290 if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) and 1291 (p_vci_tgt.plen.read() != 8)) 1292 { 1293 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 1294 << " ll command packets must have a plen of 8" << std::endl; 1295 exit(0); 1296 } 1297 1298 if (p_vci_tgt.cmdval and m_cmd_read_addr_fifo.wok()) 1299 { 1300 1301 #if DEBUG_MEMC_TGT_CMD 1302 if (m_debug) 1303 std::cout << " <MEMC " << name() << " TGT_CMD_READ> Push into read_fifo:" 1304 << " address = " << std::hex << p_vci_tgt.address.read() 1305 << " / srcid = " << p_vci_tgt.srcid.read() 1306 << " / trdid = " << p_vci_tgt.trdid.read() 1307 << " / pktid = " << p_vci_tgt.pktid.read() 1308 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1309 #endif 1310 cmd_read_fifo_put = true; 1311 // <Activity counters> 1312 if (p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) { 1313 if (is_local_req(p_vci_tgt.srcid.read())) m_cpt_ll_local++; 1314 else m_cpt_ll_remote++; 1315 m_cpt_ll_cost += req_distance(p_vci_tgt.srcid.read()); // LL on a single word 1316 } 1317 else { 1318 if (is_local_req(p_vci_tgt.srcid.read())) m_cpt_read_local++; 1319 else m_cpt_read_remote++; 1320 m_cpt_read_cost += m_words * req_distance(p_vci_tgt.srcid.read()); 1321 } 1322 // </Activity counters> 1323 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1324 } 1325 break; 1326 1327 /////////////////// 1328 case TGT_CMD_WRITE: 1329 if (p_vci_tgt.cmdval and m_cmd_write_addr_fifo.wok()) 1330 { 1331 1332 #if DEBUG_MEMC_TGT_CMD 1333 if (m_debug) 1334 std::cout << " <MEMC " << name() << " TGT_CMD_WRITE> Push into write_fifo:" 1335 << " address = " << std::hex << p_vci_tgt.address.read() 1336 << " / srcid = " << p_vci_tgt.srcid.read() 1337 << " / trdid = " << p_vci_tgt.trdid.read() 1338 << " / pktid = " << p_vci_tgt.pktid.read() 1339 << " / wdata = " << p_vci_tgt.wdata.read() 1340 << " / be = " << p_vci_tgt.be.read() 1341 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1342 #endif 1343 cmd_write_fifo_put = true; 1344 // <Activity counters> 1345 if (p_vci_tgt.cmd.read() == vci_param_int::CMD_NOP) { 1346 m_cpt_sc_cost += req_distance(p_vci_tgt.srcid.read()); 1347 } 1348 else { 1349 if (is_local_req(p_vci_tgt.srcid.read())) m_cpt_write_flits_local++; 1350 else m_cpt_write_flits_remote++; 1351 m_cpt_write_cost += req_distance(p_vci_tgt.srcid.read()); 1352 } 1353 // </Activity counters> 1354 1355 if (p_vci_tgt.eop) { 1356 // <Activity counters> 1357 if (p_vci_tgt.cmd.read() == vci_param_int::CMD_NOP) { 1358 if (is_local_req(p_vci_tgt.srcid.read())) m_cpt_sc_local++; 1359 else m_cpt_sc_remote++; 1360 1361 } 1362 else { 1363 if (is_local_req(p_vci_tgt.srcid.read())) m_cpt_write_local++; 1364 else m_cpt_write_remote++; 1365 } 1366 // </Activity counters> 1367 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1368 } 1369 } 1370 break; 1371 1372 ///////////////// 1373 case TGT_CMD_CAS: 1374 if ((p_vci_tgt.plen.read() != 8) and (p_vci_tgt.plen.read() != 16)) 1375 { 1376 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_CAS state" 1377 << "illegal format for CAS command " << std::endl; 1378 exit(0); 1379 } 1380 1381 if (p_vci_tgt.cmdval and m_cmd_cas_addr_fifo.wok()) 1382 { 1383 1384 #if DEBUG_MEMC_TGT_CMD 1385 if (m_debug) 1386 std::cout << " <MEMC " << name() << " TGT_CMD_CAS> Pushing command into cmd_cas_fifo:" 1387 << " address = " << std::hex << p_vci_tgt.address.read() 1388 << " srcid = " << p_vci_tgt.srcid.read() 1389 << " trdid = " << p_vci_tgt.trdid.read() 1390 << " pktid = " << p_vci_tgt.pktid.read() 1391 << " wdata = " << p_vci_tgt.wdata.read() 1392 << " be = " << p_vci_tgt.be.read() 1393 << " plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1394 #endif 1395 cmd_cas_fifo_put = true; 1396 if (p_vci_tgt.eop) { 1397 // <Activity counters> 1398 if (is_local_req(p_vci_tgt.srcid.read())) m_cpt_cas_local++; 1399 else m_cpt_cas_remote++; 1400 m_cpt_cas_cost += req_distance(p_vci_tgt.srcid.read()); 1401 // </Activity counters> 1402 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1403 } 1404 } 1405 break; 1406 } // end switch tgt_cmd_fsm 1407 1408 ///////////////////////////////////////////////////////////////////////// 1409 // MULTI_ACK FSM 1410 ///////////////////////////////////////////////////////////////////////// 1411 // This FSM controls the response to the multicast update requests sent 1412 // by the memory cache to the L1 caches and update the UPT. 1413 // 1414 // - The FSM decrements the proper entry in UPT, 1415 // and clear the UPT entry when all responses have been received. 1416 // - If required, it sends a request to the TGT_RSP FSM to complete 1417 // a pending write transaction. 1418 // 1419 // All those multi-ack packets are one flit packet. 1420 // The index in the UPT is defined in the TRDID field. 1421 //////////////////////////////////////////////////////////////////////// 1422 1423 //std::cout << std::endl << "multi_ack_fsm" << std::endl; 1424 1425 switch(r_multi_ack_fsm.read()) 1046 1426 { 1047 rdata = (uint32_t)r_config_lock.read(); 1048 need_rsp = true; 1049 error = 0; 1050 r_config_lock = true; 1051 if ( rdata == 0 ) 1052 { 1053 r_tgt_cmd_srcid = p_vci_tgt.srcid.read(); 1054 r_tgt_cmd_trdid = p_vci_tgt.trdid.read(); 1055 r_tgt_cmd_pktid = p_vci_tgt.pktid.read(); 1056 } 1057 } 1058 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // release lock 1059 and (cell == MEMC_LOCK) ) 1427 //////////////////// 1428 case MULTI_ACK_IDLE: 1429 { 1430 bool multi_ack_fifo_rok = m_cc_receive_to_multi_ack_fifo.rok(); 1431 1432 // No CC_RECEIVE FSM request and no WRITE FSM request 1433 if (not multi_ack_fifo_rok and not r_write_to_multi_ack_req.read()) 1434 break; 1435 1436 uint8_t updt_index; 1437 1438 // handling WRITE FSM request to decrement update table response 1439 // counter if no CC_RECEIVE FSM request 1440 if (not multi_ack_fifo_rok) 1441 { 1442 updt_index = r_write_to_multi_ack_upt_index.read(); 1443 r_write_to_multi_ack_req = false; 1444 } 1445 // Handling CC_RECEIVE FSM request 1446 else 1447 { 1448 uint64_t flit = m_cc_receive_to_multi_ack_fifo.read(); 1449 updt_index = DspinDhccpParam::dspin_get(flit, 1450 DspinDhccpParam::MULTI_ACK_UPDT_INDEX); 1451 1452 cc_receive_to_multi_ack_fifo_get = true; 1453 } 1454 1455 assert((updt_index < m_upt.size()) and 1456 "VCI_MEM_CACHE ERROR in MULTI_ACK_IDLE : " 1457 "index too large for UPT"); 1458 1459 r_multi_ack_upt_index = updt_index; 1460 r_multi_ack_fsm = MULTI_ACK_UPT_LOCK; 1461 1462 #if DEBUG_MEMC_MULTI_ACK 1463 if (m_debug) 1464 { 1465 if (multi_ack_fifo_rok) 1466 { 1467 std::cout << " <MEMC " << name() 1468 << " MULTI_ACK_IDLE> Response for UPT entry " 1469 << (size_t)updt_index << std::endl; 1470 } 1471 else 1472 { 1473 std::cout << " <MEMC " << name() 1474 << " MULTI_ACK_IDLE> Write FSM request to decrement UPT entry " 1475 << updt_index << std::endl; 1476 } 1477 } 1478 #endif 1479 break; 1480 } 1481 1482 //////////////////////// 1483 case MULTI_ACK_UPT_LOCK: 1484 { 1485 // get lock to the UPDATE table 1486 if (r_alloc_upt_fsm.read() != ALLOC_UPT_MULTI_ACK) break; 1487 1488 // decrement the number of expected responses 1489 size_t count = 0; 1490 bool valid = m_upt.decrement(r_multi_ack_upt_index.read(), count); 1491 1492 if (not valid) 1493 { 1494 std::cout << "VCI_MEM_CACHE ERROR " << name() 1495 << " MULTI_ACK_UPT_LOCK state" << std::endl 1496 << "unsuccessful access to decrement the UPT" << std::endl; 1497 exit(0); 1498 } 1499 1500 if (count == 0) 1501 { 1502 r_multi_ack_fsm = MULTI_ACK_UPT_CLEAR; 1503 } 1504 else 1505 { 1506 r_multi_ack_fsm = MULTI_ACK_IDLE; 1507 } 1508 1509 #if DEBUG_MEMC_MULTI_ACK 1510 if (m_debug) 1511 std::cout << " <MEMC " << name() 1512 << " MULTI_ACK_UPT_LOCK> Decrement the responses counter for UPT:" 1513 << " entry = " << r_multi_ack_upt_index.read() 1514 << " / rsp_count = " << std::dec << count << std::endl; 1515 #endif 1516 break; 1517 } 1518 1519 ///////////////////////// 1520 case MULTI_ACK_UPT_CLEAR: // Clear UPT entry / Test if rsp or ack required 1521 { 1522 if (r_alloc_upt_fsm.read() != ALLOC_UPT_MULTI_ACK) 1523 { 1524 std::cout << "VCI_MEM_CACHE ERROR " << name() 1525 << " MULTI_ACK_UPT_CLEAR state" 1526 << " bad UPT allocation" << std::endl; 1527 exit(0); 1528 } 1529 1530 r_multi_ack_srcid = m_upt.srcid(r_multi_ack_upt_index.read()); 1531 r_multi_ack_trdid = m_upt.trdid(r_multi_ack_upt_index.read()); 1532 r_multi_ack_pktid = m_upt.pktid(r_multi_ack_upt_index.read()); 1533 r_multi_ack_nline = m_upt.nline(r_multi_ack_upt_index.read()); 1534 bool need_rsp = m_upt.need_rsp(r_multi_ack_upt_index.read()); 1535 1536 // clear the UPT entry 1537 m_upt.clear(r_multi_ack_upt_index.read()); 1538 1539 if ( need_rsp ) r_multi_ack_fsm = MULTI_ACK_WRITE_RSP; 1540 else r_multi_ack_fsm = MULTI_ACK_IDLE; 1541 1542 #if DEBUG_MEMC_MULTI_ACK 1543 if (m_debug) 1544 std::cout << " <MEMC " << name() 1545 << " MULTI_ACK_UPT_CLEAR> Clear UPT entry " 1546 << std::dec << r_multi_ack_upt_index.read() << std::endl; 1547 #endif 1548 break; 1549 } 1550 ///////////////////////// 1551 case MULTI_ACK_WRITE_RSP: // Post a response request to TGT_RSP FSM 1552 // Wait if pending request 1553 { 1554 if (r_multi_ack_to_tgt_rsp_req.read()) break; 1555 1556 r_multi_ack_to_tgt_rsp_req = true; 1557 r_multi_ack_to_tgt_rsp_srcid = r_multi_ack_srcid.read(); 1558 r_multi_ack_to_tgt_rsp_trdid = r_multi_ack_trdid.read(); 1559 r_multi_ack_to_tgt_rsp_pktid = r_multi_ack_pktid.read(); 1560 r_multi_ack_fsm = MULTI_ACK_IDLE; 1561 1562 #if DEBUG_MEMC_MULTI_ACK 1563 if (m_debug) 1564 std::cout << " <MEMC " << name() << " MULTI_ACK_WRITE_RSP>" 1565 << " Request TGT_RSP FSM to send a response to srcid " 1566 << std::hex << r_multi_ack_srcid.read() << std::endl; 1567 #endif 1568 break; 1569 } 1570 } // end switch r_multi_ack_fsm 1571 1572 //////////////////////////////////////////////////////////////////////////////////// 1573 // CONFIG FSM 1574 //////////////////////////////////////////////////////////////////////////////////// 1575 // The CONFIG FSM handles the VCI configuration requests (INVAL & SYNC). 1576 // The target buffer can have any size, and there is one single command for 1577 // all cache lines covered by the target buffer. 1578 // 1579 // An INVAL or SYNC configuration operation is defined by the following registers: 1580 // - bool r_config_cmd : INVAL / SYNC / NOP 1581 // - uint64_t r_config_address : buffer base address 1582 // - uint32_t r_config_cmd_lines : number of lines to be handled 1583 // - uint32_t r_config_rsp_lines : number of lines not completed 1584 // 1585 // For both INVAL and SYNC commands, the CONFIG FSM contains the loop handling 1586 // all cache lines covered by the buffer. The various lines of a given buffer 1587 // can be pipelined: the CONFIG FSM does not wait the response for line (n) to send 1588 // the command for line (n+1). It decrements the r_config_cmd_lines counter until 1589 // the last request has been registered in TRT (for a SYNC), or in IVT (for an INVAL). 1590 // 1591 // - INVAL request: 1592 // For each line, it access to the DIR. 1593 // In case of miss, it does nothing, and a response is requested to TGT_RSP FSM. 1594 // In case of hit, with no copies in L1 caches, the line is invalidated and 1595 // a response is requested to TGT_RSP FSM. 1596 // If there is copies, a multi-inval, or a broadcast-inval coherence transaction 1597 // is launched and registered in UPT. The multi-inval transaction completion 1598 // is signaled by the CLEANUP FSM by decrementing the r_config_rsp_lines counter. 1599 // The CONFIG INVAL response is sent only when the last line has been invalidated. 1600 // TODO : The target buffer address must be aligned on a cache line boundary. 1601 // This constraint can be released, but it requires to make 2 PUT transactions 1602 // for the first and the last line... 1603 // 1604 // - SYNC request: 1605 // For each line, it access to the DIR. 1606 // In case of miss, it does nothing, and a response is requested to TGT_RSP FSM. 1607 // In case of hit, a PUT transaction is registered in TRT and a request is sent 1608 // to IXR_CMD FSM. The IXR_RSP FSM decrements the r_config_rsp_lines counter 1609 // when a PUT response is received. 1610 // The CONFIG SYNC response is sent only when the last PUT response is received. 1611 // 1612 // From the software point of view, a configuration request is a sequence 1613 // of 6 atomic accesses in an uncached segment. A dedicated lock is used 1614 // to handle only one configuration command at a given time: 1615 // - Read MEMC_LOCK : Get the lock 1616 // - Write MEMC_ADDR_LO : Set the buffer address LSB 1617 // - Write MEMC_ADDR_HI : Set the buffer address MSB 1618 // - Write MEMC_BUF_LENGTH : set buffer length (bytes) 1619 // - Write MEMC_CMD_TYPE : launch the actual operation 1620 // - WRITE MEMC_LOCK : release the lock 1621 //////////////////////////////////////////////////////////////////////////////////// 1622 1623 //std::cout << std::endl << "config_fsm" << std::endl; 1624 1625 switch( r_config_fsm.read()) 1060 1626 { 1061 need_rsp = true; 1062 error = 0; 1063 r_config_lock = false; 1064 } 1065 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_lo 1066 and (cell == MEMC_ADDR_LO) ) 1627 ///////////////// 1628 case CONFIG_IDLE: // waiting a config request 1629 { 1630 if (r_config_cmd.read() != MEMC_CMD_NOP ) 1631 { 1632 r_config_fsm = CONFIG_LOOP; 1633 1634 #if DEBUG_MEMC_CONFIG 1635 if (m_debug) 1636 std::cout << " <MEMC " << name() << " CONFIG_IDLE> Config Request received" 1637 << " / address = " << std::hex << r_config_address.read() 1638 << " / lines = " << std::dec << r_config_cmd_lines.read() 1639 << " / type = " << r_config_cmd.read() << std::endl; 1640 #endif 1641 } 1642 break; 1643 } 1644 ///////////////// 1645 case CONFIG_LOOP: // test if last line to be handled 1646 { 1647 if (r_config_cmd_lines.read() == 0 ) 1648 { 1649 r_config_cmd = MEMC_CMD_NOP; 1650 r_config_fsm = CONFIG_WAIT; 1651 } 1652 else 1653 { 1654 r_config_fsm = CONFIG_DIR_REQ; 1655 } 1656 1657 #if DEBUG_MEMC_CONFIG 1658 if (m_debug) 1659 std::cout << " <MEMC " << name() << " CONFIG_LOOP>" 1660 << " / address = " << std::hex << r_config_address.read() 1661 << " / lines not handled = " << std::dec << r_config_cmd_lines.read() 1662 << " / command = " << r_config_cmd.read() << std::endl; 1663 #endif 1664 break; 1665 } 1666 ///////////////// 1667 case CONFIG_WAIT: // wait completion (last response) 1668 { 1669 if (r_config_rsp_lines.read() == 0 ) // last response received 1670 { 1671 r_config_fsm = CONFIG_RSP; 1672 } 1673 1674 #if DEBUG_MEMC_CONFIG 1675 if (m_debug) 1676 std::cout << " <MEMC " << name() << " CONFIG_WAIT>" 1677 << " / lines to do = " << std::dec << r_config_rsp_lines.read() << std::endl; 1678 #endif 1679 break; 1680 } 1681 //////////////// 1682 case CONFIG_RSP: // request TGT_RSP FSM to return response 1683 { 1684 if (not r_config_to_tgt_rsp_req.read()) 1685 { 1686 r_config_to_tgt_rsp_srcid = r_config_srcid.read(); 1687 r_config_to_tgt_rsp_trdid = r_config_trdid.read(); 1688 r_config_to_tgt_rsp_pktid = r_config_pktid.read(); 1689 r_config_to_tgt_rsp_error = false; 1690 r_config_to_tgt_rsp_req = true; 1691 r_config_fsm = CONFIG_IDLE; 1692 1693 #if DEBUG_MEMC_CONFIG 1694 if (m_debug) 1695 std::cout << " <MEMC " << name() << " CONFIG_RSP> Request TGT_RSP FSM to return response:" 1696 << " error = " << r_config_to_tgt_rsp_error.read() 1697 << " / rsrcid = " << std::hex << r_config_srcid.read() 1698 << " / rtrdid = " << std::hex << r_config_trdid.read() 1699 << " / rpktid = " << std::hex << r_config_pktid.read() << std::endl; 1700 #endif 1701 } 1702 break; 1703 1704 } 1705 //////////////////// 1706 case CONFIG_DIR_REQ: // Request directory lock 1707 { 1708 if (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG ) 1709 { 1710 r_config_fsm = CONFIG_DIR_ACCESS; 1711 } 1712 1713 #if DEBUG_MEMC_CONFIG 1714 if (m_debug) 1715 std::cout << " <MEMC " << name() << " CONFIG_DIR_REQ>" 1716 << " Request DIR access" << std::endl; 1717 #endif 1718 break; 1719 } 1720 /////////////////////// 1721 case CONFIG_DIR_ACCESS: // Access directory and decode config command 1722 { 1723 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1724 "MEMC ERROR in CONFIG_DIR_ACCESS state: bad DIR allocation"); 1725 1726 size_t way = 0; 1727 DirectoryEntry entry = m_cache_directory.read(r_config_address.read(), way); 1728 1729 r_config_dir_way = way; 1730 r_config_dir_copy_inst = entry.owner.inst; 1731 r_config_dir_copy_srcid = entry.owner.srcid; 1732 r_config_dir_is_cnt = entry.is_cnt; 1733 r_config_dir_lock = entry.lock; 1734 r_config_dir_count = entry.count; 1735 r_config_dir_ptr = entry.ptr; 1736 1737 if (entry.valid and // hit & inval command 1738 (r_config_cmd.read() == MEMC_CMD_INVAL)) 1739 { 1740 r_config_fsm = CONFIG_IVT_LOCK; 1741 } 1742 else if (entry.valid and // hit & sync command 1743 entry.dirty and 1744 (r_config_cmd.read() == MEMC_CMD_SYNC)) 1745 { 1746 r_config_fsm = CONFIG_TRT_LOCK; 1747 } 1748 else // miss : return to LOOP 1749 { 1750 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1751 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 1752 r_config_address = r_config_address.read() + (m_words<<2); 1753 r_config_fsm = CONFIG_LOOP; 1754 } 1755 1756 #if DEBUG_MEMC_CONFIG 1757 if (m_debug) 1758 std::cout << " <MEMC " << name() << " CONFIG_DIR_ACCESS> Accessing directory: " 1759 << " address = " << std::hex << r_config_address.read() 1760 << " / hit = " << std::dec << entry.valid 1761 << " / dirty = " << entry.dirty 1762 << " / count = " << entry.count 1763 << " / is_cnt = " << entry.is_cnt << std::endl; 1764 #endif 1765 break; 1766 } 1767 ///////////////////// 1768 case CONFIG_TRT_LOCK: // enter this state in case of SYNC command 1769 // to a dirty cache line 1770 // keep DIR lock, and try to get TRT lock 1771 // return to LOOP state if TRT full 1772 // reset dirty bit in DIR and register a PUT 1773 // trabsaction in TRT if not full. 1774 { 1775 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1776 "MEMC ERROR in CONFIG_TRT_LOCK state: bad DIR allocation"); 1777 1778 if (r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG ) 1779 { 1780 size_t index = 0; 1781 bool wok = not m_trt.full(index); 1782 1783 if (not wok ) 1784 { 1785 r_config_fsm = CONFIG_LOOP; 1786 } 1787 else 1788 { 1789 size_t way = r_config_dir_way.read(); 1790 size_t set = m_y[r_config_address.read()]; 1791 1792 // reset dirty bit in DIR 1793 DirectoryEntry entry; 1794 entry.valid = true; 1795 entry.dirty = false; 1796 entry.tag = m_z[r_config_address.read()]; 1797 entry.is_cnt = r_config_dir_is_cnt.read(); 1798 entry.lock = r_config_dir_lock.read(); 1799 entry.ptr = r_config_dir_ptr.read(); 1800 entry.count = r_config_dir_count.read(); 1801 entry.owner.inst = r_config_dir_copy_inst.read(); 1802 entry.owner.srcid = r_config_dir_copy_srcid.read(); 1803 m_cache_directory.write( set, way, entry ); 1804 1805 r_config_trt_index = index; 1806 r_config_fsm = CONFIG_TRT_SET; 1807 } 1808 1809 #if DEBUG_MEMC_CONFIG 1810 if (m_debug) 1811 std::cout << " <MEMC " << name() << " CONFIG_TRT_LOCK> Access TRT: " 1812 << " wok = " << std::dec << wok 1813 << " index = " << index << std::endl; 1814 #endif 1815 } 1816 break; 1817 } 1818 //////////////////// 1819 case CONFIG_TRT_SET: // read data in cache 1820 // and post a PUT request in TRT 1821 { 1822 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1823 "MEMC ERROR in CONFIG_TRT_SET state: bad DIR allocation"); 1824 1825 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG) and 1826 "MEMC ERROR in CONFIG_TRT_SET state: bad TRT allocation"); 1827 1828 // read data into cache 1829 size_t way = r_config_dir_way.read(); 1830 size_t set = m_y[r_config_address.read()]; 1831 std::vector<data_t> data_vector; 1832 data_vector.clear(); 1833 for(size_t word=0; word<m_words; word++) 1834 { 1835 uint32_t data = m_cache_data.read( way, set, word ); 1836 data_vector.push_back( data ); 1837 } 1838 1839 // post the PUT request in TRT 1840 m_trt.set( r_config_trt_index.read(), 1841 false, // PUT transaction 1842 m_nline[r_config_address.read()], // line index 1843 0, // srcid: unused 1844 0, // trdid: unused 1845 0, // pktid: unused 1846 false, // not proc_read 1847 0, // read_length: unused 1848 0, // word_index: unused 1849 std::vector<be_t>(m_words,0xF), // byte-enable: unused 1850 data_vector, // data to be written 1851 0, // ll_key: unused 1852 true ); // requested by config FSM 1853 r_config_fsm = CONFIG_PUT_REQ; 1854 1855 #if DEBUG_MEMC_CONFIG 1856 if (m_debug) 1857 std::cout << " <MEMC " << name() << " CONFIG_TRT_SET> PUT request in TRT:" 1858 << " address = " << std::hex << r_config_address.read() 1859 << " index = " << std::dec << r_config_trt_index.read() << std::endl; 1860 #endif 1861 break; 1862 } 1863 //////////////////// 1864 case CONFIG_PUT_REQ: // post PUT request to IXR_CMD_FSM 1865 { 1866 if (not r_config_to_ixr_cmd_req.read()) 1867 { 1868 r_config_to_ixr_cmd_req = true; 1869 r_config_to_ixr_cmd_index = r_config_trt_index.read(); 1870 1871 // prepare next iteration 1872 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1873 r_config_address = r_config_address.read() + (m_words<<2); 1874 r_config_fsm = CONFIG_LOOP; 1875 1876 #if DEBUG_MEMC_CONFIG 1877 if (m_debug) 1878 std::cout << " <MEMC " << name() << " CONFIG_PUT_REQ> post PUT request to IXR_CMD_FSM" 1879 << " / address = " << std::hex << r_config_address.read() << std::endl; 1880 #endif 1881 } 1882 break; 1883 } 1884 ///////////////////// 1885 case CONFIG_IVT_LOCK: // enter this state in case of INVAL command 1886 // Keep DIR lock and Try to get IVT lock. 1887 // Return to LOOP state if IVT full. 1888 // Register inval in IVT, and invalidate the 1889 // directory if IVT not full. 1890 { 1891 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1892 "MEMC ERROR in CONFIG_IVT_LOCK state: bad DIR allocation"); 1893 1894 if (r_alloc_ivt_fsm.read() == ALLOC_IVT_CONFIG ) 1895 { 1896 size_t set = m_y[(addr_t)(r_config_address.read())]; 1897 size_t way = r_config_dir_way.read(); 1898 1899 if (r_config_dir_count.read() == 0 ) // inval DIR and return to LOOP 1900 { 1901 m_cache_directory.inval( way, set ); 1902 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1903 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 1904 r_config_address = r_config_address.read() + (m_words<<2); 1905 r_config_fsm = CONFIG_LOOP; 1906 1907 #if DEBUG_MEMC_CONFIG 1908 if (m_debug) 1909 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1910 << " No copies in L1 : inval DIR entry" << std::endl; 1911 #endif 1912 } 1913 else // try to register inval in IVT 1914 { 1915 bool wok = false; 1916 size_t index = 0; 1917 bool broadcast = r_config_dir_is_cnt.read(); 1918 size_t srcid = r_config_srcid.read(); 1919 size_t trdid = r_config_trdid.read(); 1920 size_t pktid = r_config_pktid.read(); 1921 addr_t nline = m_nline[(addr_t)(r_config_address.read())]; 1922 size_t nb_copies = r_config_dir_count.read(); 1923 1924 wok = m_ivt.set(false, // it's an inval transaction 1925 broadcast, 1926 false, // no response required 1927 true, // acknowledge required 1928 srcid, 1929 trdid, 1930 pktid, 1931 nline, 1932 nb_copies, 1933 index); 1934 1935 if (wok ) // IVT success => inval DIR slot 1936 { 1937 m_cache_directory.inval( way, set ); 1938 r_config_ivt_index = index; 1939 if (broadcast ) r_config_fsm = CONFIG_BC_SEND; 1940 else r_config_fsm = CONFIG_INVAL_SEND; 1941 1942 #if DEBUG_MEMC_CONFIG 1943 if (m_debug) 1944 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1945 << " Inval DIR entry and register inval in IVT" 1946 << " / index = " << std::dec << index 1947 << " / broadcast = " << broadcast << std::endl; 1948 #endif 1949 } 1950 else // IVT full => release both DIR and IVT locks 1951 { 1952 r_config_fsm = CONFIG_LOOP; 1953 1954 #if DEBUG_MEMC_CONFIG 1955 if (m_debug) 1956 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1957 << " IVT full : release DIR & IVT locks and retry" << std::endl; 1958 #endif 1959 } 1960 } 1961 } 1962 break; 1963 } 1964 //////////////////// 1965 case CONFIG_BC_SEND: // Post a broadcast inval request to CC_SEND FSM 1966 { 1967 if (not r_config_to_cc_send_multi_req.read() and 1968 not r_config_to_cc_send_brdcast_req.read()) 1969 { 1970 // post bc inval request 1971 r_config_to_cc_send_multi_req = false; 1972 r_config_to_cc_send_brdcast_req = true; 1973 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 1974 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 1975 1976 // prepare next iteration 1977 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1978 r_config_address = r_config_address.read() + (m_words << 2); 1979 r_config_fsm = CONFIG_LOOP; 1980 1981 #if DEBUG_MEMC_CONFIG 1982 if (m_debug) 1983 std::cout << " <MEMC " << name() << " CONFIG_BC_SEND>" 1984 << " Post a broadcast inval request to CC_SEND FSM" 1985 << " / address = " << r_config_address.read() <<std::endl; 1986 #endif 1987 } 1988 break; 1989 } 1990 /////////////////////// 1991 case CONFIG_INVAL_SEND: // Post a multi inval request to CC_SEND FSM 1992 { 1993 if (not r_config_to_cc_send_multi_req.read() and 1994 not r_config_to_cc_send_brdcast_req.read()) 1995 { 1996 // post multi inval request 1997 r_config_to_cc_send_multi_req = true; 1998 r_config_to_cc_send_brdcast_req = false; 1999 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 2000 r_config_to_cc_send_nline = m_nline[(addr_t) (r_config_address.read())]; 2001 2002 // post data into FIFO 2003 config_to_cc_send_fifo_srcid = r_config_dir_copy_srcid.read(); 2004 config_to_cc_send_fifo_inst = r_config_dir_copy_inst.read(); 2005 config_to_cc_send_fifo_put = true; 2006 2007 if (r_config_dir_count.read() == 1 ) // one copy 2008 { 2009 // prepare next iteration 2010 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2011 r_config_address = r_config_address.read() + (m_words << 2); 2012 r_config_fsm = CONFIG_LOOP; 2013 } 2014 else // several copies 2015 { 2016 r_config_fsm = CONFIG_HEAP_REQ; 2017 } 2018 2019 #if DEBUG_MEMC_CONFIG 2020 if (m_debug) 2021 std::cout << " <MEMC " << name() << " CONFIG_INVAL_SEND>" 2022 << " Post multi inval request to CC_SEND FSM" 2023 << " / address = " << std::hex << r_config_address.read() 2024 << " / copy = " << r_config_dir_copy_srcid.read() 2025 << " / inst = " << std::dec << r_config_dir_copy_inst.read() << std::endl; 2026 #endif 2027 } 2028 break; 2029 } 2030 ///////////////////// 2031 case CONFIG_HEAP_REQ: // Try to get access to Heap 2032 { 2033 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_CONFIG ) 2034 { 2035 r_config_fsm = CONFIG_HEAP_SCAN; 2036 r_config_heap_next = r_config_dir_ptr.read(); 2037 } 2038 2039 #if DEBUG_MEMC_CONFIG 2040 if (m_debug) 2041 std::cout << " <MEMC " << name() << " CONFIG_HEAP_REQ>" 2042 << " Requesting HEAP lock" << std::endl; 2043 #endif 2044 break; 2045 } 2046 ////////////////////// 2047 case CONFIG_HEAP_SCAN: // scan HEAP and send inval to CC_SEND FSM 2048 { 2049 HeapEntry entry = m_heap.read( r_config_heap_next.read()); 2050 bool last_copy = (entry.next == r_config_heap_next.read()); 2051 2052 config_to_cc_send_fifo_srcid = entry.owner.srcid; 2053 config_to_cc_send_fifo_inst = entry.owner.inst; 2054 // config_to_cc_send_fifo_last = last_copy; 2055 config_to_cc_send_fifo_put = true; 2056 2057 if (m_config_to_cc_send_inst_fifo.wok()) // inval request accepted 2058 { 2059 r_config_heap_next = entry.next; 2060 if (last_copy ) r_config_fsm = CONFIG_HEAP_LAST; 2061 } 2062 2063 #if DEBUG_MEMC_CONFIG 2064 if (m_debug) 2065 std::cout << " <MEMC " << name() << " CONFIG_HEAP_SCAN>" 2066 << " Post multi inval request to CC_SEND FSM" 2067 << " / address = " << std::hex << r_config_address.read() 2068 << " / copy = " << entry.owner.srcid 2069 << " / inst = " << std::dec << entry.owner.inst << std::endl; 2070 #endif 2071 break; 2072 } 2073 ////////////////////// 2074 case CONFIG_HEAP_LAST: // HEAP housekeeping 2075 { 2076 size_t free_pointer = m_heap.next_free_ptr(); 2077 HeapEntry last_entry; 2078 last_entry.owner.srcid = 0; 2079 last_entry.owner.inst = false; 2080 2081 if (m_heap.is_full()) 2082 { 2083 last_entry.next = r_config_dir_ptr.read(); 2084 m_heap.unset_full(); 2085 } 2086 else 2087 { 2088 last_entry.next = free_pointer; 2089 } 2090 2091 m_heap.write_free_ptr( r_config_dir_ptr.read()); 2092 m_heap.write( r_config_heap_next.read(), last_entry ); 2093 2094 // prepare next iteration 2095 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2096 r_config_address = r_config_address.read() + (m_words<<2); 2097 r_config_fsm = CONFIG_LOOP; 2098 2099 #if DEBUG_MEMC_CONFIG 2100 if (m_debug) 2101 std::cout << " <MEMC " << name() << " CONFIG_HEAP_LAST>" 2102 << " Heap housekeeping" << std::endl; 2103 #endif 2104 break; 2105 } 2106 } // end switch r_config_fsm 2107 2108 //////////////////////////////////////////////////////////////////////////////////// 2109 // READ FSM 2110 //////////////////////////////////////////////////////////////////////////////////// 2111 // The READ FSM controls the VCI read and ll requests. 2112 // It takes the lock protecting the cache directory to check the cache line status: 2113 // - In case of HIT 2114 // The fsm copies the data (one line, or one single word) 2115 // in the r_read_to_tgt_rsp buffer. It waits if this buffer is not empty. 2116 // The requesting initiator is registered in the cache directory. 2117 // If the number of copy is larger than 1, the new copy is registered 2118 // in the HEAP. 2119 // If the number of copy is larger than the threshold, the HEAP is cleared, 2120 // and the corresponding line switches to the counter mode. 2121 // - In case of MISS 2122 // The READ fsm takes the lock protecting the transaction tab. 2123 // If a read transaction to the XRAM for this line already exists, 2124 // or if the transaction tab is full, the fsm is stalled. 2125 // If a TRT entry is free, the READ request is registered in TRT, 2126 // it is consumed in the request FIFO, and transmited to the IXR_CMD FSM. 2127 // The READ FSM returns in the IDLE state as the read transaction will be 2128 // completed when the missing line will be received. 2129 //////////////////////////////////////////////////////////////////////////////////// 2130 2131 //std::cout << std::endl << "read_fsm" << std::endl; 2132 2133 switch(r_read_fsm.read()) 1067 2134 { 1068 assert( ((wdata % (m_words*vci_param_int::B)) == 0) and 1069 "VCI_MEM_CACHE CONFIG ERROR: The buffer must be aligned on a cache line"); 1070 1071 need_rsp = true; 1072 error = 0; 1073 r_config_address = (r_config_address.read() & 0xFFFFFFFF00000000LL) | 1074 ((addr_t)wdata); 1075 } 1076 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_hi 1077 and (cell == MEMC_ADDR_HI) ) 1078 2135 /////////////// 2136 case READ_IDLE: // waiting a read request 2137 { 2138 if (m_cmd_read_addr_fifo.rok()) 2139 { 2140 2141 #if DEBUG_MEMC_READ 2142 if (m_debug) 2143 std::cout << " <MEMC " << name() << " READ_IDLE> Read request" 2144 << " : address = " << std::hex << m_cmd_read_addr_fifo.read() 2145 << " / srcid = " << m_cmd_read_srcid_fifo.read() 2146 << " / trdid = " << m_cmd_read_trdid_fifo.read() 2147 << " / pktid = " << m_cmd_read_pktid_fifo.read() 2148 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 2149 #endif 2150 r_read_fsm = READ_DIR_REQ; 2151 } 2152 break; 2153 } 2154 ////////////////// 2155 case READ_DIR_REQ: // Get the lock to the directory 2156 { 2157 if (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) 2158 { 2159 r_read_fsm = READ_DIR_LOCK; 2160 } 2161 2162 #if DEBUG_MEMC_READ 2163 if (m_debug) 2164 std::cout << " <MEMC " << name() << " READ_DIR_REQ> Requesting DIR lock " << std::endl; 2165 #endif 2166 break; 2167 } 2168 2169 /////////////////// 2170 case READ_DIR_LOCK: // check directory for hit / miss 2171 { 2172 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2173 "MEMC ERROR in READ_DIR_LOCK state: Bad DIR allocation"); 2174 2175 size_t way = 0; 2176 DirectoryEntry entry = m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 2177 2178 // access the global table ONLY when we have an LL cmd 2179 if ((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) 2180 { 2181 r_read_ll_key = m_llsc_table.ll(m_cmd_read_addr_fifo.read()); 2182 } 2183 r_read_is_cnt = entry.is_cnt; 2184 r_read_dirty = entry.dirty; 2185 r_read_lock = entry.lock; 2186 r_read_tag = entry.tag; 2187 r_read_way = way; 2188 r_read_count = entry.count; 2189 r_read_copy = entry.owner.srcid; 2190 r_read_copy_inst = entry.owner.inst; 2191 r_read_ptr = entry.ptr; // pointer to the heap 2192 2193 // check if this is a cached read, this means pktid is either 2194 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 2195 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2196 bool cached_read = (m_cmd_read_pktid_fifo.read() & 0x1); 2197 if (entry.valid) // hit 2198 { 2199 // test if we need to register a new copy in the heap 2200 if (entry.is_cnt or (entry.count == 0) or !cached_read) 2201 { 2202 r_read_fsm = READ_DIR_HIT; 2203 } 2204 else 2205 { 2206 r_read_fsm = READ_HEAP_REQ; 2207 } 2208 } 2209 else // miss 2210 { 2211 r_read_fsm = READ_TRT_LOCK; 2212 } 2213 2214 #if DEBUG_MEMC_READ 2215 if (m_debug) 2216 { 2217 std::cout << " <MEMC " << name() << " READ_DIR_LOCK> Accessing directory: " 2218 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 2219 << " / hit = " << std::dec << entry.valid 2220 << " / count = " <<std::dec << entry.count 2221 << " / is_cnt = " << entry.is_cnt; 2222 if ((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) std::cout << " / LL access" << std::endl; 2223 else std::cout << std::endl; 2224 } 2225 #endif 2226 break; 2227 } 2228 ////////////////// 2229 case READ_DIR_HIT: // read data in cache & update the directory 2230 // we enter this state in 3 cases: 2231 // - the read request is uncachable 2232 // - the cache line is in counter mode 2233 // - the cache line is valid but not replicated 2234 2235 { 2236 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2237 "MEMC ERROR in READ_DIR_HIT state: Bad DIR allocation"); 2238 2239 // check if this is an instruction read, this means pktid is either 2240 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 2241 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2242 bool inst_read = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2243 // check if this is a cached read, this means pktid is either 2244 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 2245 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2246 bool cached_read = (m_cmd_read_pktid_fifo.read() & 0x1); 2247 bool is_cnt = r_read_is_cnt.read(); 2248 2249 // read data in the cache 2250 size_t set = m_y[(addr_t)(m_cmd_read_addr_fifo.read())]; 2251 size_t way = r_read_way.read(); 2252 2253 m_cache_data.read_line(way, set, r_read_data); 2254 2255 // update the cache directory 2256 DirectoryEntry entry; 2257 entry.valid = true; 2258 entry.is_cnt = is_cnt; 2259 entry.dirty = r_read_dirty.read(); 2260 entry.tag = r_read_tag.read(); 2261 entry.lock = r_read_lock.read(); 2262 entry.ptr = r_read_ptr.read(); 2263 2264 if (cached_read) // Cached read => we must update the copies 2265 { 2266 if (!is_cnt) // Not counter mode 2267 { 2268 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2269 entry.owner.inst = inst_read; 2270 entry.count = r_read_count.read() + 1; 2271 } 2272 else // Counter mode 2273 { 2274 entry.owner.srcid = 0; 2275 entry.owner.inst = false; 2276 entry.count = r_read_count.read() + 1; 2277 } 2278 } 2279 else // Uncached read 2280 { 2281 entry.owner.srcid = r_read_copy.read(); 2282 entry.owner.inst = r_read_copy_inst.read(); 2283 entry.count = r_read_count.read(); 2284 } 2285 2286 #if DEBUG_MEMC_READ 2287 if (m_debug) 2288 std::cout << " <MEMC " << name() << " READ_DIR_HIT> Update directory entry:" 2289 << " addr = " << std::hex << m_cmd_read_addr_fifo.read() 2290 << " / set = " << std::dec << set 2291 << " / way = " << way 2292 << " / owner_id = " << std::hex << entry.owner.srcid 2293 << " / owner_ins = " << std::dec << entry.owner.inst 2294 << " / count = " << entry.count 2295 << " / is_cnt = " << entry.is_cnt << std::endl; 2296 #endif 2297 m_cache_directory.write(set, way, entry); 2298 r_read_fsm = READ_RSP; 2299 break; 2300 } 2301 /////////////////// 2302 case READ_HEAP_REQ: // Get the lock to the HEAP directory 2303 { 2304 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2305 { 2306 r_read_fsm = READ_HEAP_LOCK; 2307 } 2308 2309 #if DEBUG_MEMC_READ 2310 if (m_debug) 2311 std::cout << " <MEMC " << name() << " READ_HEAP_REQ>" 2312 << " Requesting HEAP lock " << std::endl; 2313 #endif 2314 break; 2315 } 2316 2317 //////////////////// 2318 case READ_HEAP_LOCK: // read data in cache, update the directory 2319 // and prepare the HEAP update 2320 { 2321 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2322 { 2323 // enter counter mode when we reach the limit of copies or the heap is full 2324 bool go_cnt = (r_read_count.read() >= m_max_copies) or m_heap.is_full(); 2325 2326 // read data in the cache 2327 size_t set = m_y[(addr_t)(m_cmd_read_addr_fifo.read())]; 2328 size_t way = r_read_way.read(); 2329 2330 m_cache_data.read_line(way, set, r_read_data); 2331 2332 // update the cache directory 2333 DirectoryEntry entry; 2334 entry.valid = true; 2335 entry.is_cnt = go_cnt; 2336 entry.dirty = r_read_dirty.read(); 2337 entry.tag = r_read_tag.read(); 2338 entry.lock = r_read_lock.read(); 2339 entry.count = r_read_count.read() + 1; 2340 2341 if (not go_cnt) // Not entering counter mode 2342 { 2343 entry.owner.srcid = r_read_copy.read(); 2344 entry.owner.inst = r_read_copy_inst.read(); 2345 entry.ptr = m_heap.next_free_ptr(); // set pointer on the heap 2346 } 2347 else // Entering Counter mode 2348 { 2349 entry.owner.srcid = 0; 2350 entry.owner.inst = false; 2351 entry.ptr = 0; 2352 } 2353 2354 m_cache_directory.write(set, way, entry); 2355 2356 // prepare the heap update (add an entry, or clear the linked list) 2357 if (not go_cnt) // not switching to counter mode 2358 { 2359 // We test if the next free entry in the heap is the last 2360 HeapEntry heap_entry = m_heap.next_free_entry(); 2361 r_read_next_ptr = heap_entry.next; 2362 r_read_last_free = (heap_entry.next == m_heap.next_free_ptr()); 2363 2364 r_read_fsm = READ_HEAP_WRITE; // add an entry in the HEAP 2365 } 2366 else // switching to counter mode 2367 { 2368 if (r_read_count.read() >1) // heap must be cleared 2369 { 2370 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 2371 r_read_next_ptr = m_heap.next_free_ptr(); 2372 m_heap.write_free_ptr(r_read_ptr.read()); 2373 2374 if (next_entry.next == r_read_ptr.read()) // last entry 2375 { 2376 r_read_fsm = READ_HEAP_LAST; // erase the entry 2377 } 2378 else // not the last entry 2379 { 2380 r_read_ptr = next_entry.next; 2381 r_read_fsm = READ_HEAP_ERASE; // erase the list 2382 } 2383 } 2384 else // the heap is not used / nothing to do 2385 { 2386 r_read_fsm = READ_RSP; 2387 } 2388 } 2389 2390 #if DEBUG_MEMC_READ 2391 if (m_debug) 2392 std::cout << " <MEMC " << name() << " READ_HEAP_LOCK> Update directory:" 2393 << " tag = " << std::hex << entry.tag 2394 << " set = " << std::dec << set 2395 << " way = " << way 2396 << " count = " << entry.count 2397 << " is_cnt = " << entry.is_cnt << std::endl; 2398 #endif 2399 } 2400 else 2401 { 2402 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_LOCK" 2403 << "Bad HEAP allocation" << std::endl; 2404 exit(0); 2405 } 2406 break; 2407 } 2408 ///////////////////// 2409 case READ_HEAP_WRITE: // add an entry in the heap 2410 { 2411 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2412 { 2413 HeapEntry heap_entry; 2414 heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2415 heap_entry.owner.inst = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2416 2417 if (r_read_count.read() == 1) // creation of a new linked list 2418 { 2419 heap_entry.next = m_heap.next_free_ptr(); 2420 } 2421 else // head insertion in existing list 2422 { 2423 heap_entry.next = r_read_ptr.read(); 2424 } 2425 m_heap.write_free_entry(heap_entry); 2426 m_heap.write_free_ptr(r_read_next_ptr.read()); 2427 if (r_read_last_free.read()) m_heap.set_full(); 2428 2429 r_read_fsm = READ_RSP; 2430 2431 #if DEBUG_MEMC_READ 2432 if (m_debug) 2433 std::cout << " <MEMC " << name() << " READ_HEAP_WRITE> Add an entry in the heap:" 2434 << " owner_id = " << std::hex << heap_entry.owner.srcid 2435 << " owner_ins = " << std::dec << heap_entry.owner.inst << std::endl; 2436 #endif 2437 } 2438 else 2439 { 2440 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_WRITE" 2441 << "Bad HEAP allocation" << std::endl; 2442 exit(0); 2443 } 2444 break; 2445 } 2446 ///////////////////// 2447 case READ_HEAP_ERASE: 2448 { 2449 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2450 { 2451 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 2452 if (next_entry.next == r_read_ptr.read()) 2453 { 2454 r_read_fsm = READ_HEAP_LAST; 2455 } 2456 else 2457 { 2458 r_read_ptr = next_entry.next; 2459 r_read_fsm = READ_HEAP_ERASE; 2460 } 2461 } 2462 else 2463 { 2464 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_ERASE" 2465 << "Bad HEAP allocation" << std::endl; 2466 exit(0); 2467 } 2468 break; 2469 } 2470 2471 //////////////////// 2472 case READ_HEAP_LAST: 2473 { 2474 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2475 { 2476 HeapEntry last_entry; 2477 last_entry.owner.srcid = 0; 2478 last_entry.owner.inst = false; 2479 2480 if (m_heap.is_full()) 2481 { 2482 last_entry.next = r_read_ptr.read(); 2483 m_heap.unset_full(); 2484 } 2485 else 2486 { 2487 last_entry.next = r_read_next_ptr.read(); 2488 } 2489 m_heap.write(r_read_ptr.read(),last_entry); 2490 r_read_fsm = READ_RSP; 2491 } 2492 else 2493 { 2494 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_LAST" 2495 << "Bad HEAP allocation" << std::endl; 2496 exit(0); 2497 } 2498 break; 2499 } 2500 ////////////// 2501 case READ_RSP: // request the TGT_RSP FSM to return data 2502 { 2503 if (!r_read_to_tgt_rsp_req) 2504 { 2505 for(size_t i=0 ; i<m_words ; i++) r_read_to_tgt_rsp_data[i] = r_read_data[i]; 2506 r_read_to_tgt_rsp_word = m_x[(addr_t) m_cmd_read_addr_fifo.read()]; 2507 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read(); 2508 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read(); 2509 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); 2510 r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read(); 2511 r_read_to_tgt_rsp_ll_key = r_read_ll_key.read(); 2512 cmd_read_fifo_get = true; 2513 r_read_to_tgt_rsp_req = true; 2514 r_read_fsm = READ_IDLE; 2515 2516 #if DEBUG_MEMC_READ 2517 if (m_debug) 2518 std::cout << " <MEMC " << name() << " READ_RSP> Request TGT_RSP FSM to return data:" 2519 << " rsrcid = " << std::hex << m_cmd_read_srcid_fifo.read() 2520 << " / address = " << std::hex << m_cmd_read_addr_fifo.read() 2521 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 2522 #endif 2523 } 2524 break; 2525 } 2526 /////////////////// 2527 case READ_TRT_LOCK: // read miss : check the Transaction Table 2528 { 2529 if (r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 2530 { 2531 size_t index = 0; 2532 addr_t addr = (addr_t) m_cmd_read_addr_fifo.read(); 2533 bool hit_read = m_trt.hit_read(m_nline[addr], index); 2534 bool hit_write = m_trt.hit_write(m_nline[addr]); 2535 bool wok = not m_trt.full(index); 2536 2537 if (hit_read or !wok or hit_write) // line already requested or no space 2538 { 2539 if (!wok) m_cpt_trt_full++; 2540 if (hit_read or hit_write) m_cpt_trt_rb++; 2541 r_read_fsm = READ_IDLE; 2542 } 2543 else // missing line is requested to the XRAM 2544 { 2545 m_cpt_read_miss++; 2546 r_read_trt_index = index; 2547 r_read_fsm = READ_TRT_SET; 2548 } 2549 2550 #if DEBUG_MEMC_READ 2551 if (m_debug) 2552 std::cout << " <MEMC " << name() << " READ_TRT_LOCK> Check TRT:" 2553 << " hit_read = " << hit_read 2554 << " / hit_write = " << hit_write 2555 << " / full = " << !wok << std::endl; 2556 #endif 2557 } 2558 break; 2559 } 2560 ////////////////// 2561 case READ_TRT_SET: // register get transaction in TRT 2562 { 2563 if (r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 2564 { 2565 m_trt.set( r_read_trt_index.read(), 2566 true, // GET 2567 m_nline[(addr_t)(m_cmd_read_addr_fifo.read())], 2568 m_cmd_read_srcid_fifo.read(), 2569 m_cmd_read_trdid_fifo.read(), 2570 m_cmd_read_pktid_fifo.read(), 2571 true, // proc read 2572 m_cmd_read_length_fifo.read(), 2573 m_x[(addr_t)(m_cmd_read_addr_fifo.read())], 2574 std::vector<be_t> (m_words,0), 2575 std::vector<data_t> (m_words,0), 2576 r_read_ll_key.read()); 2577 #if DEBUG_MEMC_READ 2578 if (m_debug) 2579 std::cout << " <MEMC " << name() << " READ_TRT_SET> Set a GET in TRT:" 2580 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 2581 << " / srcid = " << std::hex << m_cmd_read_srcid_fifo.read() << std::endl; 2582 #endif 2583 r_read_fsm = READ_TRT_REQ; 2584 } 2585 break; 2586 } 2587 2588 ////////////////// 2589 case READ_TRT_REQ: // consume the read request in FIFO and send it to IXR_CMD_FSM 2590 { 2591 if (not r_read_to_ixr_cmd_req) 2592 { 2593 cmd_read_fifo_get = true; 2594 r_read_to_ixr_cmd_req = true; 2595 r_read_to_ixr_cmd_index = r_read_trt_index.read(); 2596 r_read_fsm = READ_IDLE; 2597 2598 #if DEBUG_MEMC_READ 2599 if (m_debug) 2600 std::cout << " <MEMC " << name() << " READ_TRT_REQ> Request GET transaction for address " 2601 << std::hex << m_cmd_read_addr_fifo.read() << std::endl; 2602 #endif 2603 } 2604 break; 2605 } 2606 } // end switch read_fsm 2607 2608 /////////////////////////////////////////////////////////////////////////////////// 2609 // WRITE FSM 2610 /////////////////////////////////////////////////////////////////////////////////// 2611 // The WRITE FSM handles the write bursts and sc requests sent by the processors. 2612 // All addresses in a burst must be in the same cache line. 2613 // A complete write burst is consumed in the FIFO & copied to a local buffer. 2614 // Then the FSM takes the lock protecting the cache directory, to check 2615 // if the line is in the cache. 2616 // 2617 // - In case of HIT, the cache is updated. 2618 // If there is no other copy, an acknowledge response is immediately 2619 // returned to the writing processor. 2620 // If the data is cached by other processors, a coherence transaction must 2621 // be launched (sc requests always require a coherence transaction): 2622 // It is a multicast update if the line is not in counter mode: the processor 2623 // takes the lock protecting the Update Table (UPT) to register this transaction. 2624 // If the UPT is full, it releases the lock(s) and retry. Then, it sends 2625 // a multi-update request to all owners of the line (but the writer), 2626 // through the CC_SEND FSM. In case of coherence transaction, the WRITE FSM 2627 // does not respond to the writing processor, as this response will be sent by 2628 // the MULTI_ACK FSM when all update responses have been received. 2629 // It is a broadcast invalidate if the line is in counter mode: The line 2630 // should be erased in memory cache, and written in XRAM with a PUT transaction, 2631 // after registration in TRT. 2632 // 2633 // - In case of MISS, the WRITE FSM takes the lock protecting the transaction 2634 // table (TRT). If a read transaction to the XRAM for this line already exists, 2635 // it writes in the TRT (write buffer). Otherwise, if a TRT entry is free, 2636 // the WRITE FSM register a new transaction in TRT, and sends a GET request 2637 // to the XRAM. If the TRT is full, it releases the lock, and waits. 2638 // Finally, the WRITE FSM returns an aknowledge response to the writing processor. 2639 ///////////////////////////////////////////////////////////////////////////////////// 2640 2641 //std::cout << std::endl << "write_fsm" << std::endl; 2642 2643 switch(r_write_fsm.read()) 1079 2644 { 1080 need_rsp = true; 1081 error = 0; 1082 r_config_address = (r_config_address.read() & 0x00000000FFFFFFFFLL) | 1083 (((addr_t)wdata)<<32); 1084 } 1085 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set buf_lines 1086 and (cell == MEMC_BUF_LENGTH) ) 1087 { 1088 need_rsp = true; 1089 error = 0; 1090 size_t lines = wdata/(m_words<<2); 1091 if ( wdata%(m_words<<2) ) lines++; 1092 r_config_cmd_lines = lines; 1093 r_config_rsp_lines = lines; 1094 } 1095 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set cmd type 1096 and (cell == MEMC_CMD_TYPE) ) 1097 { 1098 need_rsp = false; 1099 error = 0; 1100 r_config_cmd = wdata; 1101 1102 // prepare delayed response from CONFIG FSM 1103 r_config_srcid = p_vci_tgt.srcid.read(); 1104 r_config_trdid = p_vci_tgt.trdid.read(); 1105 r_config_pktid = p_vci_tgt.pktid.read(); 1106 } 1107 else 1108 { 1109 need_rsp = true; 1110 error = 1; 1111 } 1112 1113 if ( need_rsp ) 1114 { 1115 // blocked if previous pending request to TGT_RSP FSM 1116 if ( r_tgt_cmd_to_tgt_rsp_req.read() ) break; 1117 1118 r_tgt_cmd_to_tgt_rsp_srcid = p_vci_tgt.srcid.read(); 1119 r_tgt_cmd_to_tgt_rsp_trdid = p_vci_tgt.trdid.read(); 1120 r_tgt_cmd_to_tgt_rsp_pktid = p_vci_tgt.pktid.read(); 1121 r_tgt_cmd_to_tgt_rsp_req = true; 1122 r_tgt_cmd_to_tgt_rsp_error = error; 1123 r_tgt_cmd_to_tgt_rsp_rdata = rdata; 1124 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1125 } 1126 else 1127 { 1128 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1129 } 1130 1131 #if DEBUG_MEMC_TGT_CMD 1132 if(m_debug) 1133 std::cout << " <MEMC " << name() << " TGT_CMD_CONFIG> Configuration request:" 1134 << " address = " << std::hex << p_vci_tgt.address.read() 1135 << " / wdata = " << p_vci_tgt.wdata.read() 1136 << " / need_rsp = " << need_rsp 1137 << " / error = " << error << std::endl; 1138 #endif 1139 break; 1140 } 1141 ////////////////// 1142 case TGT_CMD_READ: // Push a read request into read fifo 1143 1144 // check that the read does not cross a cache line limit. 1145 if ( ((m_x[(addr_t) p_vci_tgt.address.read()]+ (p_vci_tgt.plen.read() >>2)) > 16) and 1146 (p_vci_tgt.cmd.read() != vci_param_int::CMD_LOCKED_READ)) 1147 { 1148 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 1149 << " illegal address/plen for VCI read command" << std::endl; 1150 exit(0); 1151 } 1152 // check single flit 1153 if(!p_vci_tgt.eop.read()) 1154 { 1155 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 1156 << " read command packet must contain one single flit" << std::endl; 1157 exit(0); 1158 } 1159 // check plen for LL 1160 if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) and 1161 (p_vci_tgt.plen.read() != 8) ) 1162 { 1163 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 1164 << " ll command packets must have a plen of 8" << std::endl; 1165 exit(0); 1166 } 1167 1168 if ( p_vci_tgt.cmdval and m_cmd_read_addr_fifo.wok() ) 1169 { 1170 1171 #if DEBUG_MEMC_TGT_CMD 1172 if(m_debug) 1173 std::cout << " <MEMC " << name() << " TGT_CMD_READ> Push into read_fifo:" 1174 << " address = " << std::hex << p_vci_tgt.address.read() 1175 << " / srcid = " << p_vci_tgt.srcid.read() 1176 << " / trdid = " << p_vci_tgt.trdid.read() 1177 << " / pktid = " << p_vci_tgt.pktid.read() 1178 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1179 #endif 1180 cmd_read_fifo_put = true; 1181 if(p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) m_cpt_ll++; 1182 else m_cpt_read++; 1183 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1184 } 1185 break; 1186 1187 /////////////////// 1188 case TGT_CMD_WRITE: 1189 if(p_vci_tgt.cmdval and m_cmd_write_addr_fifo.wok()) 1190 { 1191 1192 #if DEBUG_MEMC_TGT_CMD 1193 if(m_debug) 1194 std::cout << " <MEMC " << name() << " TGT_CMD_WRITE> Push into write_fifo:" 1195 << " address = " << std::hex << p_vci_tgt.address.read() 1196 << " / srcid = " << p_vci_tgt.srcid.read() 1197 << " / trdid = " << p_vci_tgt.trdid.read() 1198 << " / pktid = " << p_vci_tgt.pktid.read() 1199 << " / wdata = " << p_vci_tgt.wdata.read() 1200 << " / be = " << p_vci_tgt.be.read() 1201 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1202 #endif 1203 cmd_write_fifo_put = true; 1204 if(p_vci_tgt.eop) r_tgt_cmd_fsm = TGT_CMD_IDLE; 1205 } 1206 break; 1207 1208 ///////////////// 1209 case TGT_CMD_CAS: 1210 if((p_vci_tgt.plen.read() != 8) and (p_vci_tgt.plen.read() != 16)) 1211 { 1212 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_CAS state" 1213 << "illegal format for CAS command " << std::endl; 1214 exit(0); 1215 } 1216 1217 if(p_vci_tgt.cmdval and m_cmd_cas_addr_fifo.wok()) 1218 { 1219 1220 #if DEBUG_MEMC_TGT_CMD 1221 if(m_debug) 1222 std::cout << " <MEMC " << name() << " TGT_CMD_CAS> Pushing command into cmd_cas_fifo:" 1223 << " address = " << std::hex << p_vci_tgt.address.read() 1224 << " srcid = " << p_vci_tgt.srcid.read() 1225 << " trdid = " << p_vci_tgt.trdid.read() 1226 << " pktid = " << p_vci_tgt.pktid.read() 1227 << " wdata = " << p_vci_tgt.wdata.read() 1228 << " be = " << p_vci_tgt.be.read() 1229 << " plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1230 #endif 1231 cmd_cas_fifo_put = true; 1232 if(p_vci_tgt.eop) r_tgt_cmd_fsm = TGT_CMD_IDLE; 1233 } 1234 break; 1235 } // end switch tgt_cmd_fsm 1236 1237 ///////////////////////////////////////////////////////////////////////// 1238 // MULTI_ACK FSM 1239 ///////////////////////////////////////////////////////////////////////// 1240 // This FSM controls the response to the multicast update requests sent 1241 // by the memory cache to the L1 caches and update the UPT. 1242 // 1243 // - The FSM decrements the proper entry in UPT, 1244 // and clear the UPT entry when all responses have been received. 1245 // - If required, it sends a request to the TGT_RSP FSM to complete 1246 // a pending write transaction. 1247 // 1248 // All those multi-ack packets are one flit packet. 1249 // The index in the UPT is defined in the TRDID field. 1250 //////////////////////////////////////////////////////////////////////// 1251 1252 //std::cout << std::endl << "multi_ack_fsm" << std::endl; 1253 1254 switch(r_multi_ack_fsm.read()) 1255 { 1256 //////////////////// 1257 case MULTI_ACK_IDLE: 1258 { 1259 bool multi_ack_fifo_rok = m_cc_receive_to_multi_ack_fifo.rok(); 1260 1261 // No CC_RECEIVE FSM request and no WRITE FSM request 1262 if( not multi_ack_fifo_rok and not r_write_to_multi_ack_req.read()) 1263 break; 1264 1265 uint8_t updt_index; 1266 1267 // handling WRITE FSM request to decrement update table response 1268 // counter if no CC_RECEIVE FSM request 1269 if(not multi_ack_fifo_rok) 1270 { 1271 updt_index = r_write_to_multi_ack_upt_index.read(); 1272 r_write_to_multi_ack_req = false; 1273 } 1274 // Handling CC_RECEIVE FSM request 1275 else 1276 { 1277 uint64_t flit = m_cc_receive_to_multi_ack_fifo.read(); 1278 updt_index = DspinDhccpParam::dspin_get(flit, 1279 DspinDhccpParam::MULTI_ACK_UPDT_INDEX); 1280 1281 cc_receive_to_multi_ack_fifo_get = true; 1282 } 1283 1284 assert((updt_index < m_upt.size()) and 1285 "VCI_MEM_CACHE ERROR in MULTI_ACK_IDLE : " 1286 "index too large for UPT"); 1287 1288 r_multi_ack_upt_index = updt_index; 1289 r_multi_ack_fsm = MULTI_ACK_UPT_LOCK; 1290 1291 #if DEBUG_MEMC_MULTI_ACK 1292 if(m_debug) 1293 { 1294 if (multi_ack_fifo_rok) 1295 { 1296 std::cout << " <MEMC " << name() 1297 << " MULTI_ACK_IDLE> Response for UPT entry " 1298 << (size_t)updt_index << std::endl; 1299 } 1300 else 1301 { 1302 std::cout << " <MEMC " << name() 1303 << " MULTI_ACK_IDLE> Write FSM request to decrement UPT entry " 1304 << updt_index << std::endl; 1305 } 1306 } 1307 #endif 1308 break; 1309 } 1310 1311 //////////////////////// 1312 case MULTI_ACK_UPT_LOCK: 1313 { 1314 // get lock to the UPDATE table 1315 if(r_alloc_upt_fsm.read() != ALLOC_UPT_MULTI_ACK) break; 1316 1317 // decrement the number of expected responses 1318 size_t count = 0; 1319 bool valid = m_upt.decrement(r_multi_ack_upt_index.read(), count); 1320 1321 if(not valid) 1322 { 1323 std::cout << "VCI_MEM_CACHE ERROR " << name() 1324 << " MULTI_ACK_UPT_LOCK state" << std::endl 1325 << "unsuccessful access to decrement the UPT" << std::endl; 1326 exit(0); 1327 } 1328 1329 if(count == 0) 1330 { 1331 r_multi_ack_fsm = MULTI_ACK_UPT_CLEAR; 1332 } 1333 else 1334 { 1335 r_multi_ack_fsm = MULTI_ACK_IDLE; 1336 } 1337 1338 #if DEBUG_MEMC_MULTI_ACK 1339 if(m_debug) 1340 std::cout << " <MEMC " << name() 1341 << " MULTI_ACK_UPT_LOCK> Decrement the responses counter for UPT:" 1342 << " entry = " << r_multi_ack_upt_index.read() 1343 << " / rsp_count = " << std::dec << count << std::endl; 1344 #endif 1345 break; 1346 } 1347 1348 ///////////////////////// 1349 case MULTI_ACK_UPT_CLEAR: // Clear UPT entry / Test if rsp or ack required 1350 { 1351 if(r_alloc_upt_fsm.read() != ALLOC_UPT_MULTI_ACK) 1352 { 1353 std::cout << "VCI_MEM_CACHE ERROR " << name() 1354 << " MULTI_ACK_UPT_CLEAR state" 1355 << " bad UPT allocation" << std::endl; 1356 exit(0); 1357 } 1358 1359 r_multi_ack_srcid = m_upt.srcid(r_multi_ack_upt_index.read()); 1360 r_multi_ack_trdid = m_upt.trdid(r_multi_ack_upt_index.read()); 1361 r_multi_ack_pktid = m_upt.pktid(r_multi_ack_upt_index.read()); 1362 r_multi_ack_nline = m_upt.nline(r_multi_ack_upt_index.read()); 1363 bool need_rsp = m_upt.need_rsp(r_multi_ack_upt_index.read()); 1364 1365 // clear the UPT entry 1366 m_upt.clear(r_multi_ack_upt_index.read()); 1367 1368 if ( need_rsp ) r_multi_ack_fsm = MULTI_ACK_WRITE_RSP; 1369 else r_multi_ack_fsm = MULTI_ACK_IDLE; 1370 1371 #if DEBUG_MEMC_MULTI_ACK 1372 if(m_debug) 1373 std::cout << " <MEMC " << name() 1374 << " MULTI_ACK_UPT_CLEAR> Clear UPT entry " 1375 << std::dec << r_multi_ack_upt_index.read() << std::endl; 1376 #endif 1377 break; 1378 } 1379 ///////////////////////// 1380 case MULTI_ACK_WRITE_RSP: // Post a response request to TGT_RSP FSM 1381 // Wait if pending request 1382 { 1383 if ( r_multi_ack_to_tgt_rsp_req.read() ) break; 1384 1385 r_multi_ack_to_tgt_rsp_req = true; 1386 r_multi_ack_to_tgt_rsp_srcid = r_multi_ack_srcid.read(); 1387 r_multi_ack_to_tgt_rsp_trdid = r_multi_ack_trdid.read(); 1388 r_multi_ack_to_tgt_rsp_pktid = r_multi_ack_pktid.read(); 1389 r_multi_ack_fsm = MULTI_ACK_IDLE; 1390 1391 #if DEBUG_MEMC_MULTI_ACK 1392 if(m_debug) 1393 std::cout << " <MEMC " << name() << " MULTI_ACK_WRITE_RSP>" 1394 << " Request TGT_RSP FSM to send a response to srcid " 1395 << std::hex << r_multi_ack_srcid.read() << std::endl; 1396 #endif 1397 break; 1398 } 1399 } // end switch r_multi_ack_fsm 1400 1401 //////////////////////////////////////////////////////////////////////////////////// 1402 // CONFIG FSM 1403 //////////////////////////////////////////////////////////////////////////////////// 1404 // The CONFIG FSM handles the VCI configuration requests (INVAL & SYNC). 1405 // The target buffer can have any size, and there is one single command for 1406 // all cache lines covered by the target buffer. 1407 // 1408 // An INVAL or SYNC configuration operation is defined by the following registers: 1409 // - bool r_config_cmd : INVAL / SYNC / NOP 1410 // - uint64_t r_config_address : buffer base address 1411 // - uint32_t r_config_cmd_lines : number of lines to be handled 1412 // - uint32_t r_config_rsp_lines : number of lines not completed 1413 // 1414 // For both INVAL and SYNC commands, the CONFIG FSM contains the loop handling 1415 // all cache lines covered by the buffer. The various lines of a given buffer 1416 // can be pipelined: the CONFIG FSM does not wait the response for line (n) to send 1417 // the command for line (n+1). It decrements the r_config_cmd_lines counter until 1418 // the last request has been registered in TRT (for a SYNC), or in IVT (for an INVAL). 1419 // 1420 // - INVAL request: 1421 // For each line, it access to the DIR. 1422 // In case of miss, it does nothing, and a response is requested to TGT_RSP FSM. 1423 // In case of hit, with no copies in L1 caches, the line is invalidated and 1424 // a response is requested to TGT_RSP FSM. 1425 // If there is copies, a multi-inval, or a broadcast-inval coherence transaction 1426 // is launched and registered in UPT. The multi-inval transaction completion 1427 // is signaled by the CLEANUP FSM by decrementing the r_config_rsp_lines counter. 1428 // The CONFIG INVAL response is sent only when the last line has been invalidated. 1429 // TODO : The target buffer address must be aligned on a cache line boundary. 1430 // This constraint can be released, but it requires to make 2 PUT transactions 1431 // for the first and the last line... 1432 // 1433 // - SYNC request: 1434 // For each line, it access to the DIR. 1435 // In case of miss, it does nothing, and a response is requested to TGT_RSP FSM. 1436 // In case of hit, a PUT transaction is registered in TRT and a request is sent 1437 // to IXR_CMD FSM. The IXR_RSP FSM decrements the r_config_rsp_lines counter 1438 // when a PUT response is received. 1439 // The CONFIG SYNC response is sent only when the last PUT response is received. 1440 // 1441 // From the software point of view, a configuration request is a sequence 1442 // of 6 atomic accesses in an uncached segment. A dedicated lock is used 1443 // to handle only one configuration command at a given time: 1444 // - Read MEMC_LOCK : Get the lock 1445 // - Write MEMC_ADDR_LO : Set the buffer address LSB 1446 // - Write MEMC_ADDR_HI : Set the buffer address MSB 1447 // - Write MEMC_BUF_LENGTH : set buffer length (bytes) 1448 // - Write MEMC_CMD_TYPE : launch the actual operation 1449 // - WRITE MEMC_LOCK : release the lock 1450 //////////////////////////////////////////////////////////////////////////////////// 1451 1452 //std::cout << std::endl << "config_fsm" << std::endl; 1453 1454 switch( r_config_fsm.read() ) 1455 { 1456 ///////////////// 1457 case CONFIG_IDLE: // waiting a config request 1458 { 1459 if ( r_config_cmd.read() != MEMC_CMD_NOP ) 1460 { 1461 r_config_fsm = CONFIG_LOOP; 1462 1463 #if DEBUG_MEMC_CONFIG 1464 if(m_debug) 1465 std::cout << " <MEMC " << name() << " CONFIG_IDLE> Config Request received" 1466 << " / address = " << std::hex << r_config_address.read() 1467 << " / lines = " << std::dec << r_config_cmd_lines.read() 1468 << " / type = " << r_config_cmd.read() << std::endl; 1469 #endif 1470 } 1471 break; 1472 } 1473 ///////////////// 1474 case CONFIG_LOOP: // test if last line to be handled 1475 { 1476 if ( r_config_cmd_lines.read() == 0 ) 1477 { 1478 r_config_cmd = MEMC_CMD_NOP; 1479 r_config_fsm = CONFIG_WAIT; 1480 } 1481 else 1482 { 1483 r_config_fsm = CONFIG_DIR_REQ; 1484 } 1485 1486 #if DEBUG_MEMC_CONFIG 1487 if(m_debug) 1488 std::cout << " <MEMC " << name() << " CONFIG_LOOP>" 1489 << " / address = " << std::hex << r_config_address.read() 1490 << " / lines not handled = " << std::dec << r_config_cmd_lines.read() 1491 << " / command = " << r_config_cmd.read() << std::endl; 1492 #endif 1493 break; 1494 } 1495 ///////////////// 1496 case CONFIG_WAIT: // wait completion (last response) 1497 { 1498 if ( r_config_rsp_lines.read() == 0 ) // last response received 1499 { 1500 r_config_fsm = CONFIG_RSP; 1501 } 1502 1503 #if DEBUG_MEMC_CONFIG 1504 if(m_debug) 1505 std::cout << " <MEMC " << name() << " CONFIG_WAIT>" 1506 << " / lines to do = " << std::dec << r_config_rsp_lines.read() << std::endl; 1507 #endif 1508 break; 1509 } 1510 //////////////// 1511 case CONFIG_RSP: // request TGT_RSP FSM to return response 1512 { 1513 if ( not r_config_to_tgt_rsp_req.read() ) 1514 { 1515 r_config_to_tgt_rsp_srcid = r_config_srcid.read(); 1516 r_config_to_tgt_rsp_trdid = r_config_trdid.read(); 1517 r_config_to_tgt_rsp_pktid = r_config_pktid.read(); 1518 r_config_to_tgt_rsp_error = false; 1519 r_config_to_tgt_rsp_req = true; 1520 r_config_fsm = CONFIG_IDLE; 1521 1522 #if DEBUG_MEMC_CONFIG 1523 if(m_debug) 1524 std::cout << " <MEMC " << name() << " CONFIG_RSP> Request TGT_RSP FSM to return response:" 1525 << " error = " << r_config_to_tgt_rsp_error.read() 1526 << " / rsrcid = " << std::hex << r_config_srcid.read() 1527 << " / rtrdid = " << std::hex << r_config_trdid.read() 1528 << " / rpktid = " << std::hex << r_config_pktid.read() << std::endl; 1529 #endif 1530 } 1531 break; 1532 1533 } 1534 //////////////////// 1535 case CONFIG_DIR_REQ: // Request directory lock 1536 { 1537 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG ) 1538 { 1539 r_config_fsm = CONFIG_DIR_ACCESS; 1540 } 1541 1542 #if DEBUG_MEMC_CONFIG 1543 if(m_debug) 1544 std::cout << " <MEMC " << name() << " CONFIG_DIR_REQ>" 1545 << " Request DIR access" << std::endl; 1546 #endif 1547 break; 1548 } 1549 /////////////////////// 1550 case CONFIG_DIR_ACCESS: // Access directory and decode config command 1551 { 1552 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1553 "MEMC ERROR in CONFIG_DIR_ACCESS state: bad DIR allocation"); 1554 1555 size_t way = 0; 1556 DirectoryEntry entry = m_cache_directory.read(r_config_address.read(), way); 1557 1558 r_config_dir_way = way; 1559 r_config_dir_copy_inst = entry.owner.inst; 1560 r_config_dir_copy_srcid = entry.owner.srcid; 1561 r_config_dir_is_cnt = entry.is_cnt; 1562 r_config_dir_lock = entry.lock; 1563 r_config_dir_count = entry.count; 1564 r_config_dir_ptr = entry.ptr; 1565 1566 if ( entry.valid and // hit & inval command 1567 (r_config_cmd.read() == MEMC_CMD_INVAL) ) 1568 { 1569 r_config_fsm = CONFIG_IVT_LOCK; 1570 } 1571 else if ( entry.valid and // hit & sync command 1572 entry.dirty and 1573 (r_config_cmd.read() == MEMC_CMD_SYNC) ) 1574 { 1575 r_config_fsm = CONFIG_TRT_LOCK; 1576 } 1577 else // miss : return to LOOP 1578 { 1579 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1580 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 1581 r_config_address = r_config_address.read() + (m_words<<2); 1582 r_config_fsm = CONFIG_LOOP; 1583 } 1584 1585 #if DEBUG_MEMC_CONFIG 1586 if(m_debug) 1587 std::cout << " <MEMC " << name() << " CONFIG_DIR_ACCESS> Accessing directory: " 1588 << " address = " << std::hex << r_config_address.read() 1589 << " / hit = " << std::dec << entry.valid 1590 << " / dirty = " << entry.dirty 1591 << " / count = " << entry.count 1592 << " / is_cnt = " << entry.is_cnt << std::endl; 1593 #endif 1594 break; 1595 } 1596 ///////////////////// 1597 case CONFIG_TRT_LOCK: // enter this state in case of SYNC command 1598 // to a dirty cache line 1599 // keep DIR lock, and try to get TRT lock 1600 // return to LOOP state if TRT full 1601 // reset dirty bit in DIR and register a PUT 1602 // trabsaction in TRT if not full. 1603 { 1604 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1605 "MEMC ERROR in CONFIG_TRT_LOCK state: bad DIR allocation"); 1606 1607 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG ) 1608 { 1609 size_t index = 0; 1610 bool wok = not m_trt.full(index); 1611 1612 if ( not wok ) 1613 { 1614 r_config_fsm = CONFIG_LOOP; 1615 } 1616 else 1617 { 1618 size_t way = r_config_dir_way.read(); 1619 size_t set = m_y[r_config_address.read()]; 1620 1621 // reset dirty bit in DIR 1622 DirectoryEntry entry; 1623 entry.valid = true; 1624 entry.dirty = false; 1625 entry.tag = m_z[r_config_address.read()]; 1626 entry.is_cnt = r_config_dir_is_cnt.read(); 1627 entry.lock = r_config_dir_lock.read(); 1628 entry.ptr = r_config_dir_ptr.read(); 1629 entry.count = r_config_dir_count.read(); 1630 entry.owner.inst = r_config_dir_copy_inst.read(); 1631 entry.owner.srcid = r_config_dir_copy_srcid.read(); 1632 m_cache_directory.write( set, way, entry ); 1633 1634 r_config_trt_index = index; 1635 r_config_fsm = CONFIG_TRT_SET; 1636 } 1637 1638 #if DEBUG_MEMC_CONFIG 1639 if(m_debug) 1640 std::cout << " <MEMC " << name() << " CONFIG_TRT_LOCK> Access TRT: " 1641 << " wok = " << std::dec << wok 1642 << " index = " << index << std::endl; 1643 #endif 1644 } 1645 break; 1646 } 1647 //////////////////// 1648 case CONFIG_TRT_SET: // read data in cache 1649 // and post a PUT request in TRT 1650 { 1651 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1652 "MEMC ERROR in CONFIG_TRT_SET state: bad DIR allocation"); 1653 1654 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG) and 1655 "MEMC ERROR in CONFIG_TRT_SET state: bad TRT allocation"); 1656 1657 // read data into cache 1658 size_t way = r_config_dir_way.read(); 1659 size_t set = m_y[r_config_address.read()]; 1660 std::vector<data_t> data_vector; 1661 data_vector.clear(); 1662 for(size_t word=0; word<m_words; word++) 1663 { 1664 uint32_t data = m_cache_data.read( way, set, word ); 1665 data_vector.push_back( data ); 1666 } 1667 1668 // post the PUT request in TRT 1669 m_trt.set( r_config_trt_index.read(), 1670 false, // PUT transaction 1671 m_nline[r_config_address.read()], // line index 1672 0, // srcid: unused 1673 0, // trdid: unused 1674 0, // pktid: unused 1675 false, // not proc_read 1676 0, // read_length: unused 1677 0, // word_index: unused 1678 std::vector<be_t>(m_words,0xF), // byte-enable: unused 1679 data_vector, // data to be written 1680 0, // ll_key: unused 1681 true ); // requested by config FSM 1682 r_config_fsm = CONFIG_PUT_REQ; 1683 1684 #if DEBUG_MEMC_CONFIG 1685 if(m_debug) 1686 std::cout << " <MEMC " << name() << " CONFIG_TRT_SET> PUT request in TRT:" 1687 << " address = " << std::hex << r_config_address.read() 1688 << " index = " << std::dec << r_config_trt_index.read() << std::endl; 1689 #endif 1690 break; 1691 } 1692 //////////////////// 1693 case CONFIG_PUT_REQ: // post PUT request to IXR_CMD_FSM 1694 { 1695 if ( not r_config_to_ixr_cmd_req.read() ) 1696 { 1697 r_config_to_ixr_cmd_req = true; 1698 r_config_to_ixr_cmd_index = r_config_trt_index.read(); 1699 1700 // prepare next iteration 1701 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1702 r_config_address = r_config_address.read() + (m_words<<2); 1703 r_config_fsm = CONFIG_LOOP; 1704 1705 #if DEBUG_MEMC_CONFIG 1706 if(m_debug) 1707 std::cout << " <MEMC " << name() << " CONFIG_PUT_REQ> post PUT request to IXR_CMD_FSM" 1708 << " / address = " << std::hex << r_config_address.read() << std::endl; 1709 #endif 1710 } 1711 break; 1712 } 1713 ///////////////////// 1714 case CONFIG_IVT_LOCK: // enter this state in case of INVAL command 1715 // Keep DIR lock and Try to get IVT lock. 1716 // Return to LOOP state if IVT full. 1717 // Register inval in IVT, and invalidate the 1718 // directory if IVT not full. 1719 { 1720 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1721 "MEMC ERROR in CONFIG_IVT_LOCK state: bad DIR allocation"); 1722 1723 if ( r_alloc_ivt_fsm.read() == ALLOC_IVT_CONFIG ) 1724 { 1725 size_t set = m_y[(addr_t)(r_config_address.read())]; 1726 size_t way = r_config_dir_way.read(); 1727 1728 if ( r_config_dir_count.read() == 0 ) // inval DIR and return to LOOP 1729 { 1730 m_cache_directory.inval( way, set ); 1731 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1732 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 1733 r_config_address = r_config_address.read() + (m_words<<2); 1734 r_config_fsm = CONFIG_LOOP; 1735 1736 #if DEBUG_MEMC_CONFIG 1737 if(m_debug) 1738 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1739 << " No copies in L1 : inval DIR entry" << std::endl; 1740 #endif 1741 } 1742 else // try to register inval in IVT 1743 { 1744 bool wok = false; 1745 size_t index = 0; 1746 bool broadcast = r_config_dir_is_cnt.read(); 1747 size_t srcid = r_config_srcid.read(); 1748 size_t trdid = r_config_trdid.read(); 1749 size_t pktid = r_config_pktid.read(); 1750 addr_t nline = m_nline[(addr_t)(r_config_address.read())]; 1751 size_t nb_copies = r_config_dir_count.read(); 1752 1753 wok = m_ivt.set(false, // it's an inval transaction 1754 broadcast, 1755 false, // no response required 1756 true, // acknowledge required 1757 srcid, 1758 trdid, 1759 pktid, 1760 nline, 1761 nb_copies, 1762 index); 1763 1764 if ( wok ) // IVT success => inval DIR slot 1765 { 1766 m_cache_directory.inval( way, set ); 1767 r_config_ivt_index = index; 1768 if ( broadcast ) r_config_fsm = CONFIG_BC_SEND; 1769 else r_config_fsm = CONFIG_INVAL_SEND; 1770 1771 #if DEBUG_MEMC_CONFIG 1772 if(m_debug) 1773 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1774 << " Inval DIR entry and register inval in IVT" 1775 << " / index = " << std::dec << index 1776 << " / broadcast = " << broadcast << std::endl; 1777 #endif 1778 } 1779 else // IVT full => release both DIR and IVT locks 1780 { 1781 r_config_fsm = CONFIG_LOOP; 1782 1783 #if DEBUG_MEMC_CONFIG 1784 if(m_debug) 1785 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1786 << " IVT full : release DIR & IVT locks and retry" << std::endl; 1787 #endif 1788 } 1789 } 1790 } 1791 break; 1792 } 1793 //////////////////// 1794 case CONFIG_BC_SEND: // Post a broadcast inval request to CC_SEND FSM 1795 { 1796 if( not r_config_to_cc_send_multi_req.read() and 1797 not r_config_to_cc_send_brdcast_req.read() ) 1798 { 1799 // post bc inval request 1800 r_config_to_cc_send_multi_req = false; 1801 r_config_to_cc_send_brdcast_req = true; 1802 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 1803 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 1804 1805 // prepare next iteration 1806 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1807 r_config_address = r_config_address.read() + (m_words<<2); 1808 r_config_fsm = CONFIG_LOOP; 1809 1810 #if DEBUG_MEMC_CONFIG 1811 if(m_debug) 1812 std::cout << " <MEMC " << name() << " CONFIG_BC_SEND>" 1813 << " Post a broadcast inval request to CC_SEND FSM" 1814 << " / address = " << r_config_address.read() <<std::endl; 1815 #endif 1816 } 1817 break; 1818 } 1819 /////////////////////// 1820 case CONFIG_INVAL_SEND: // Post a multi inval request to CC_SEND FSM 1821 { 1822 if( not r_config_to_cc_send_multi_req.read() and 1823 not r_config_to_cc_send_brdcast_req.read() ) 1824 { 1825 // post multi inval request 1826 r_config_to_cc_send_multi_req = true; 1827 r_config_to_cc_send_brdcast_req = false; 1828 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 1829 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 1830 1831 // post data into FIFO 1832 config_to_cc_send_fifo_srcid = r_config_dir_copy_srcid.read(); 1833 config_to_cc_send_fifo_inst = r_config_dir_copy_inst.read(); 1834 config_to_cc_send_fifo_put = true; 1835 1836 if ( r_config_dir_count.read() == 1 ) // one copy 1837 { 1838 // prepare next iteration 1839 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1840 r_config_address = r_config_address.read() + (m_words<<2); 1841 r_config_fsm = CONFIG_LOOP; 1842 } 1843 else // several copies 1844 { 1845 r_config_fsm = CONFIG_HEAP_REQ; 1846 } 1847 1848 #if DEBUG_MEMC_CONFIG 1849 if(m_debug) 1850 std::cout << " <MEMC " << name() << " CONFIG_INVAL_SEND>" 1851 << " Post multi inval request to CC_SEND FSM" 1852 << " / address = " << std::hex << r_config_address.read() 1853 << " / copy = " << r_config_dir_copy_srcid.read() 1854 << " / inst = " << std::dec << r_config_dir_copy_inst.read() << std::endl; 1855 #endif 1856 } 1857 break; 1858 } 1859 ///////////////////// 1860 case CONFIG_HEAP_REQ: // Try to get access to Heap 1861 { 1862 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_CONFIG ) 1863 { 1864 r_config_fsm = CONFIG_HEAP_SCAN; 1865 r_config_heap_next = r_config_dir_ptr.read(); 1866 } 1867 1868 #if DEBUG_MEMC_CONFIG 1869 if(m_debug) 1870 std::cout << " <MEMC " << name() << " CONFIG_HEAP_REQ>" 1871 << " Requesting HEAP lock" << std::endl; 1872 #endif 1873 break; 1874 } 1875 ////////////////////// 1876 case CONFIG_HEAP_SCAN: // scan HEAP and send inval to CC_SEND FSM 1877 { 1878 HeapEntry entry = m_heap.read( r_config_heap_next.read() ); 1879 bool last_copy = (entry.next == r_config_heap_next.read()); 1880 1881 config_to_cc_send_fifo_srcid = entry.owner.srcid; 1882 config_to_cc_send_fifo_inst = entry.owner.inst; 1883 // config_to_cc_send_fifo_last = last_copy; 1884 config_to_cc_send_fifo_put = true; 1885 1886 if ( m_config_to_cc_send_inst_fifo.wok() ) // inval request accepted 1887 { 1888 r_config_heap_next = entry.next; 1889 if ( last_copy ) r_config_fsm = CONFIG_HEAP_LAST; 1890 } 1891 1892 #if DEBUG_MEMC_CONFIG 1893 if(m_debug) 1894 std::cout << " <MEMC " << name() << " CONFIG_HEAP_SCAN>" 1895 << " Post multi inval request to CC_SEND FSM" 1896 << " / address = " << std::hex << r_config_address.read() 1897 << " / copy = " << entry.owner.srcid 1898 << " / inst = " << std::dec << entry.owner.inst << std::endl; 1899 #endif 1900 break; 1901 } 1902 ////////////////////// 1903 case CONFIG_HEAP_LAST: // HEAP housekeeping 1904 { 1905 size_t free_pointer = m_heap.next_free_ptr(); 1906 HeapEntry last_entry; 1907 last_entry.owner.srcid = 0; 1908 last_entry.owner.inst = false; 1909 1910 if ( m_heap.is_full() ) 1911 { 1912 last_entry.next = r_config_dir_ptr.read(); 1913 m_heap.unset_full(); 1914 } 1915 else 1916 { 1917 last_entry.next = free_pointer; 1918 } 1919 1920 m_heap.write_free_ptr( r_config_dir_ptr.read() ); 1921 m_heap.write( r_config_heap_next.read(), last_entry ); 1922 1923 // prepare next iteration 1924 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1925 r_config_address = r_config_address.read() + (m_words<<2); 1926 r_config_fsm = CONFIG_LOOP; 1927 1928 #if DEBUG_MEMC_CONFIG 1929 if(m_debug) 1930 std::cout << " <MEMC " << name() << " CONFIG_HEAP_LAST>" 1931 << " Heap housekeeping" << std::endl; 1932 #endif 1933 break; 1934 } 1935 } // end switch r_config_fsm 1936 1937 //////////////////////////////////////////////////////////////////////////////////// 1938 // READ FSM 1939 //////////////////////////////////////////////////////////////////////////////////// 1940 // The READ FSM controls the VCI read and ll requests. 1941 // It takes the lock protecting the cache directory to check the cache line status: 1942 // - In case of HIT 1943 // The fsm copies the data (one line, or one single word) 1944 // in the r_read_to_tgt_rsp buffer. It waits if this buffer is not empty. 1945 // The requesting initiator is registered in the cache directory. 1946 // If the number of copy is larger than 1, the new copy is registered 1947 // in the HEAP. 1948 // If the number of copy is larger than the threshold, the HEAP is cleared, 1949 // and the corresponding line switches to the counter mode. 1950 // - In case of MISS 1951 // The READ fsm takes the lock protecting the transaction tab. 1952 // If a read transaction to the XRAM for this line already exists, 1953 // or if the transaction tab is full, the fsm is stalled. 1954 // If a TRT entry is free, the READ request is registered in TRT, 1955 // it is consumed in the request FIFO, and transmited to the IXR_CMD FSM. 1956 // The READ FSM returns in the IDLE state as the read transaction will be 1957 // completed when the missing line will be received. 1958 //////////////////////////////////////////////////////////////////////////////////// 1959 1960 //std::cout << std::endl << "read_fsm" << std::endl; 1961 1962 switch(r_read_fsm.read()) 1963 { 1964 /////////////// 1965 case READ_IDLE: // waiting a read request 1966 { 1967 if(m_cmd_read_addr_fifo.rok()) 1968 { 1969 1970 #if DEBUG_MEMC_READ 1971 if(m_debug) 1972 std::cout << " <MEMC " << name() << " READ_IDLE> Read request" 1973 << " : address = " << std::hex << m_cmd_read_addr_fifo.read() 1974 << " / srcid = " << m_cmd_read_srcid_fifo.read() 1975 << " / trdid = " << m_cmd_read_trdid_fifo.read() 1976 << " / pktid = " << m_cmd_read_pktid_fifo.read() 1977 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 1978 #endif 1979 r_read_fsm = READ_DIR_REQ; 1980 } 1981 break; 1982 } 1983 ////////////////// 1984 case READ_DIR_REQ: // Get the lock to the directory 1985 { 1986 if(r_alloc_dir_fsm.read() == ALLOC_DIR_READ) 1987 { 1988 r_read_fsm = READ_DIR_LOCK; 1989 } 1990 1991 #if DEBUG_MEMC_READ 1992 if(m_debug) 1993 std::cout << " <MEMC " << name() << " READ_DIR_REQ> Requesting DIR lock " << std::endl; 1994 #endif 1995 break; 1996 } 1997 1998 /////////////////// 1999 case READ_DIR_LOCK: // check directory for hit / miss 2000 { 2001 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2002 "MEMC ERROR in READ_DIR_LOCK state: Bad DIR allocation"); 2003 2004 size_t way = 0; 2005 DirectoryEntry entry = m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 2006 2007 // access the global table ONLY when we have an LL cmd 2008 if((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) 2009 { 2010 r_read_ll_key = m_llsc_table.ll(m_cmd_read_addr_fifo.read()); 2011 } 2012 r_read_is_cnt = entry.is_cnt; 2013 r_read_dirty = entry.dirty; 2014 r_read_lock = entry.lock; 2015 r_read_tag = entry.tag; 2016 r_read_way = way; 2017 r_read_count = entry.count; 2018 r_read_copy = entry.owner.srcid; 2019 r_read_copy_inst = entry.owner.inst; 2020 r_read_ptr = entry.ptr; // pointer to the heap 2021 2022 // check if this is a cached read, this means pktid is either 2023 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 2024 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2025 bool cached_read = (m_cmd_read_pktid_fifo.read() & 0x1); 2026 if(entry.valid) // hit 2027 { 2028 // test if we need to register a new copy in the heap 2029 if(entry.is_cnt or (entry.count == 0) or !cached_read) 2030 { 2031 r_read_fsm = READ_DIR_HIT; 2032 } 2033 else 2034 { 2035 r_read_fsm = READ_HEAP_REQ; 2036 } 2037 } 2038 else // miss 2039 { 2040 r_read_fsm = READ_TRT_LOCK; 2041 } 2042 2043 #if DEBUG_MEMC_READ 2044 if(m_debug) 2045 { 2046 std::cout << " <MEMC " << name() << " READ_DIR_LOCK> Accessing directory: " 2047 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 2048 << " / hit = " << std::dec << entry.valid 2049 << " / count = " <<std::dec << entry.count 2050 << " / is_cnt = " << entry.is_cnt; 2051 if((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) std::cout << " / LL access" << std::endl; 2052 else std::cout << std::endl; 2053 } 2054 #endif 2055 break; 2056 } 2057 ////////////////// 2058 case READ_DIR_HIT: // read data in cache & update the directory 2059 // we enter this state in 3 cases: 2060 // - the read request is uncachable 2061 // - the cache line is in counter mode 2062 // - the cache line is valid but not replicated 2063 2064 { 2065 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2066 "MEMC ERROR in READ_DIR_HIT state: Bad DIR allocation"); 2067 2068 // check if this is an instruction read, this means pktid is either 2069 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 2070 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2071 bool inst_read = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2072 // check if this is a cached read, this means pktid is either 2073 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 2074 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2075 bool cached_read = (m_cmd_read_pktid_fifo.read() & 0x1); 2076 bool is_cnt = r_read_is_cnt.read(); 2077 2078 // read data in the cache 2079 size_t set = m_y[(addr_t)(m_cmd_read_addr_fifo.read())]; 2080 size_t way = r_read_way.read(); 2081 2082 m_cache_data.read_line(way, set, r_read_data); 2083 2084 // update the cache directory 2085 DirectoryEntry entry; 2086 entry.valid = true; 2087 entry.is_cnt = is_cnt; 2088 entry.dirty = r_read_dirty.read(); 2089 entry.tag = r_read_tag.read(); 2090 entry.lock = r_read_lock.read(); 2091 entry.ptr = r_read_ptr.read(); 2092 2093 if(cached_read) // Cached read => we must update the copies 2094 { 2095 if(!is_cnt) // Not counter mode 2096 { 2097 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2098 entry.owner.inst = inst_read; 2099 entry.count = r_read_count.read() + 1; 2100 } 2101 else // Counter mode 2102 { 2103 entry.owner.srcid = 0; 2104 entry.owner.inst = false; 2105 entry.count = r_read_count.read() + 1; 2106 } 2107 } 2108 else // Uncached read 2109 { 2110 entry.owner.srcid = r_read_copy.read(); 2111 entry.owner.inst = r_read_copy_inst.read(); 2112 entry.count = r_read_count.read(); 2113 } 2114 2115 #if DEBUG_MEMC_READ 2116 if(m_debug) 2117 std::cout << " <MEMC " << name() << " READ_DIR_HIT> Update directory entry:" 2118 << " addr = " << std::hex << m_cmd_read_addr_fifo.read() 2119 << " / set = " << std::dec << set 2120 << " / way = " << way 2121 << " / owner_id = " << std::hex << entry.owner.srcid 2122 << " / owner_ins = " << std::dec << entry.owner.inst 2123 << " / count = " << entry.count 2124 << " / is_cnt = " << entry.is_cnt << std::endl; 2125 #endif 2126 m_cache_directory.write(set, way, entry); 2127 r_read_fsm = READ_RSP; 2128 break; 2129 } 2130 /////////////////// 2131 case READ_HEAP_REQ: // Get the lock to the HEAP directory 2132 { 2133 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2134 { 2135 r_read_fsm = READ_HEAP_LOCK; 2136 } 2137 2138 #if DEBUG_MEMC_READ 2139 if(m_debug) 2140 std::cout << " <MEMC " << name() << " READ_HEAP_REQ>" 2141 << " Requesting HEAP lock " << std::endl; 2142 #endif 2143 break; 2144 } 2145 2146 //////////////////// 2147 case READ_HEAP_LOCK: // read data in cache, update the directory 2148 // and prepare the HEAP update 2149 { 2150 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2151 { 2152 // enter counter mode when we reach the limit of copies or the heap is full 2153 bool go_cnt = (r_read_count.read() >= m_max_copies) or m_heap.is_full(); 2154 2155 // read data in the cache 2156 size_t set = m_y[(addr_t)(m_cmd_read_addr_fifo.read())]; 2157 size_t way = r_read_way.read(); 2158 2159 m_cache_data.read_line(way, set, r_read_data); 2160 2161 // update the cache directory 2162 DirectoryEntry entry; 2163 entry.valid = true; 2164 entry.is_cnt = go_cnt; 2165 entry.dirty = r_read_dirty.read(); 2166 entry.tag = r_read_tag.read(); 2167 entry.lock = r_read_lock.read(); 2168 entry.count = r_read_count.read() + 1; 2169 2170 if(not go_cnt) // Not entering counter mode 2171 { 2172 entry.owner.srcid = r_read_copy.read(); 2173 entry.owner.inst = r_read_copy_inst.read(); 2174 entry.ptr = m_heap.next_free_ptr(); // set pointer on the heap 2175 } 2176 else // Entering Counter mode 2177 { 2178 entry.owner.srcid = 0; 2179 entry.owner.inst = false; 2180 entry.ptr = 0; 2181 } 2182 2183 m_cache_directory.write(set, way, entry); 2184 2185 // prepare the heap update (add an entry, or clear the linked list) 2186 if(not go_cnt) // not switching to counter mode 2187 { 2188 // We test if the next free entry in the heap is the last 2189 HeapEntry heap_entry = m_heap.next_free_entry(); 2190 r_read_next_ptr = heap_entry.next; 2191 r_read_last_free = (heap_entry.next == m_heap.next_free_ptr()); 2192 2193 r_read_fsm = READ_HEAP_WRITE; // add an entry in the HEAP 2194 } 2195 else // switching to counter mode 2196 { 2197 if(r_read_count.read() >1) // heap must be cleared 2198 { 2199 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 2200 r_read_next_ptr = m_heap.next_free_ptr(); 2201 m_heap.write_free_ptr(r_read_ptr.read()); 2202 2203 if(next_entry.next == r_read_ptr.read()) // last entry 2204 { 2205 r_read_fsm = READ_HEAP_LAST; // erase the entry 2206 } 2207 else // not the last entry 2208 { 2209 r_read_ptr = next_entry.next; 2210 r_read_fsm = READ_HEAP_ERASE; // erase the list 2211 } 2212 } 2213 else // the heap is not used / nothing to do 2214 { 2215 r_read_fsm = READ_RSP; 2216 } 2217 } 2218 2219 #if DEBUG_MEMC_READ 2220 if(m_debug) 2221 std::cout << " <MEMC " << name() << " READ_HEAP_LOCK> Update directory:" 2222 << " tag = " << std::hex << entry.tag 2223 << " set = " << std::dec << set 2224 << " way = " << way 2225 << " count = " << entry.count 2226 << " is_cnt = " << entry.is_cnt << std::endl; 2227 #endif 2228 } 2229 else 2230 { 2231 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_LOCK" 2232 << "Bad HEAP allocation" << std::endl; 2233 exit(0); 2234 } 2235 break; 2236 } 2237 ///////////////////// 2238 case READ_HEAP_WRITE: // add an entry in the heap 2239 { 2240 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2241 { 2242 HeapEntry heap_entry; 2243 heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2244 heap_entry.owner.inst = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2245 2246 if(r_read_count.read() == 1) // creation of a new linked list 2247 { 2248 heap_entry.next = m_heap.next_free_ptr(); 2249 } 2250 else // head insertion in existing list 2251 { 2252 heap_entry.next = r_read_ptr.read(); 2253 } 2254 m_heap.write_free_entry(heap_entry); 2255 m_heap.write_free_ptr(r_read_next_ptr.read()); 2256 if(r_read_last_free.read()) m_heap.set_full(); 2257 2258 r_read_fsm = READ_RSP; 2259 2260 #if DEBUG_MEMC_READ 2261 if(m_debug) 2262 std::cout << " <MEMC " << name() << " READ_HEAP_WRITE> Add an entry in the heap:" 2263 << " owner_id = " << std::hex << heap_entry.owner.srcid 2264 << " owner_ins = " << std::dec << heap_entry.owner.inst << std::endl; 2265 #endif 2266 } 2267 else 2268 { 2269 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_WRITE" 2270 << "Bad HEAP allocation" << std::endl; 2271 exit(0); 2272 } 2273 break; 2274 } 2275 ///////////////////// 2276 case READ_HEAP_ERASE: 2277 { 2278 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2279 { 2280 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 2281 if(next_entry.next == r_read_ptr.read()) 2282 { 2283 r_read_fsm = READ_HEAP_LAST; 2284 } 2285 else 2286 { 2287 r_read_ptr = next_entry.next; 2288 r_read_fsm = READ_HEAP_ERASE; 2289 } 2290 } 2291 else 2292 { 2293 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_ERASE" 2294 << "Bad HEAP allocation" << std::endl; 2295 exit(0); 2296 } 2297 break; 2298 } 2299 2300 //////////////////// 2301 case READ_HEAP_LAST: 2302 { 2303 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2304 { 2305 HeapEntry last_entry; 2306 last_entry.owner.srcid = 0; 2307 last_entry.owner.inst = false; 2308 2309 if(m_heap.is_full()) 2310 { 2311 last_entry.next = r_read_ptr.read(); 2312 m_heap.unset_full(); 2313 } 2314 else 2315 { 2316 last_entry.next = r_read_next_ptr.read(); 2317 } 2318 m_heap.write(r_read_ptr.read(),last_entry); 2319 r_read_fsm = READ_RSP; 2320 } 2321 else 2322 { 2323 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_LAST" 2324 << "Bad HEAP allocation" << std::endl; 2325 exit(0); 2326 } 2327 break; 2328 } 2329 ////////////// 2330 case READ_RSP: // request the TGT_RSP FSM to return data 2331 { 2332 if(!r_read_to_tgt_rsp_req) 2333 { 2334 for(size_t i=0 ; i<m_words ; i++) r_read_to_tgt_rsp_data[i] = r_read_data[i]; 2335 r_read_to_tgt_rsp_word = m_x[(addr_t) m_cmd_read_addr_fifo.read()]; 2336 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read(); 2337 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read(); 2338 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); 2339 r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read(); 2340 r_read_to_tgt_rsp_ll_key = r_read_ll_key.read(); 2341 cmd_read_fifo_get = true; 2342 r_read_to_tgt_rsp_req = true; 2343 r_read_fsm = READ_IDLE; 2344 2345 #if DEBUG_MEMC_READ 2346 if(m_debug) 2347 std::cout << " <MEMC " << name() << " READ_RSP> Request TGT_RSP FSM to return data:" 2348 << " rsrcid = " << std::hex << m_cmd_read_srcid_fifo.read() 2349 << " / address = " << std::hex << m_cmd_read_addr_fifo.read() 2350 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 2351 #endif 2352 } 2353 break; 2354 } 2355 /////////////////// 2356 case READ_TRT_LOCK: // read miss : check the Transaction Table 2357 { 2358 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 2359 { 2360 size_t index = 0; 2361 addr_t addr = (addr_t) m_cmd_read_addr_fifo.read(); 2362 bool hit_read = m_trt.hit_read(m_nline[addr], index); 2363 bool hit_write = m_trt.hit_write(m_nline[addr]); 2364 bool wok = not m_trt.full(index); 2365 2366 if(hit_read or !wok or hit_write) // line already requested or no space 2367 { 2368 if(!wok) m_cpt_trt_full++; 2369 if(hit_read or hit_write) m_cpt_trt_rb++; 2370 r_read_fsm = READ_IDLE; 2371 } 2372 else // missing line is requested to the XRAM 2373 { 2374 m_cpt_read_miss++; 2375 r_read_trt_index = index; 2376 r_read_fsm = READ_TRT_SET; 2377 } 2378 2379 #if DEBUG_MEMC_READ 2380 if(m_debug) 2381 std::cout << " <MEMC " << name() << " READ_TRT_LOCK> Check TRT:" 2382 << " hit_read = " << hit_read 2383 << " / hit_write = " << hit_write 2384 << " / full = " << !wok << std::endl; 2385 #endif 2386 } 2387 break; 2388 } 2389 ////////////////// 2390 case READ_TRT_SET: // register get transaction in TRT 2391 { 2392 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 2393 { 2394 m_trt.set( r_read_trt_index.read(), 2395 true, // GET 2396 m_nline[(addr_t)(m_cmd_read_addr_fifo.read())], 2397 m_cmd_read_srcid_fifo.read(), 2398 m_cmd_read_trdid_fifo.read(), 2399 m_cmd_read_pktid_fifo.read(), 2400 true, // proc read 2401 m_cmd_read_length_fifo.read(), 2402 m_x[(addr_t)(m_cmd_read_addr_fifo.read())], 2403 std::vector<be_t> (m_words,0), 2404 std::vector<data_t> (m_words,0), 2405 r_read_ll_key.read() ); 2406 #if DEBUG_MEMC_READ 2407 if(m_debug) 2408 std::cout << " <MEMC " << name() << " READ_TRT_SET> Set a GET in TRT:" 2409 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 2410 << " / srcid = " << std::hex << m_cmd_read_srcid_fifo.read() << std::endl; 2411 #endif 2412 r_read_fsm = READ_TRT_REQ; 2413 } 2414 break; 2415 } 2416 2417 ////////////////// 2418 case READ_TRT_REQ: // consume the read request in FIFO and send it to IXR_CMD_FSM 2419 { 2420 if(not r_read_to_ixr_cmd_req) 2421 { 2422 cmd_read_fifo_get = true; 2423 r_read_to_ixr_cmd_req = true; 2424 r_read_to_ixr_cmd_index = r_read_trt_index.read(); 2425 r_read_fsm = READ_IDLE; 2426 2427 #if DEBUG_MEMC_READ 2428 if(m_debug) 2429 std::cout << " <MEMC " << name() << " READ_TRT_REQ> Request GET transaction for address " 2430 << std::hex << m_cmd_read_addr_fifo.read() << std::endl; 2431 #endif 2432 } 2433 break; 2434 } 2435 } // end switch read_fsm 2436 2437 /////////////////////////////////////////////////////////////////////////////////// 2438 // WRITE FSM 2439 /////////////////////////////////////////////////////////////////////////////////// 2440 // The WRITE FSM handles the write bursts and sc requests sent by the processors. 2441 // All addresses in a burst must be in the same cache line. 2442 // A complete write burst is consumed in the FIFO & copied to a local buffer. 2443 // Then the FSM takes the lock protecting the cache directory, to check 2444 // if the line is in the cache. 2445 // 2446 // - In case of HIT, the cache is updated. 2447 // If there is no other copy, an acknowledge response is immediately 2448 // returned to the writing processor. 2449 // If the data is cached by other processors, a coherence transaction must 2450 // be launched (sc requests always require a coherence transaction): 2451 // It is a multicast update if the line is not in counter mode: the processor 2452 // takes the lock protecting the Update Table (UPT) to register this transaction. 2453 // If the UPT is full, it releases the lock(s) and retry. Then, it sends 2454 // a multi-update request to all owners of the line (but the writer), 2455 // through the CC_SEND FSM. In case of coherence transaction, the WRITE FSM 2456 // does not respond to the writing processor, as this response will be sent by 2457 // the MULTI_ACK FSM when all update responses have been received. 2458 // It is a broadcast invalidate if the line is in counter mode: The line 2459 // should be erased in memory cache, and written in XRAM with a PUT transaction, 2460 // after registration in TRT. 2461 // 2462 // - In case of MISS, the WRITE FSM takes the lock protecting the transaction 2463 // table (TRT). If a read transaction to the XRAM for this line already exists, 2464 // it writes in the TRT (write buffer). Otherwise, if a TRT entry is free, 2465 // the WRITE FSM register a new transaction in TRT, and sends a GET request 2466 // to the XRAM. If the TRT is full, it releases the lock, and waits. 2467 // Finally, the WRITE FSM returns an aknowledge response to the writing processor. 2468 ///////////////////////////////////////////////////////////////////////////////////// 2469 2470 //std::cout << std::endl << "write_fsm" << std::endl; 2471 2472 switch(r_write_fsm.read()) 2473 { 2474 //////////////// 2475 case WRITE_IDLE: // copy first word of a write burst in local buffer 2476 { 2477 if(m_cmd_write_addr_fifo.rok()) 2478 { 2479 if((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 2480 { 2481 m_cpt_sc++; 2482 } 2483 else 2484 { 2485 m_cpt_write++; 2486 m_cpt_write_cells++; 2487 } 2488 2489 // consume a word in the FIFO & write it in the local buffer 2490 cmd_write_fifo_get = true; 2491 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 2492 2493 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 2494 r_write_word_index = index; 2495 r_write_word_count = 1; 2496 r_write_data[index] = m_cmd_write_data_fifo.read(); 2497 r_write_srcid = m_cmd_write_srcid_fifo.read(); 2498 r_write_trdid = m_cmd_write_trdid_fifo.read(); 2499 r_write_pktid = m_cmd_write_pktid_fifo.read(); 2500 r_write_pending_sc = false; 2501 2502 // initialize the be field for all words 2503 for(size_t word=0 ; word<m_words ; word++) 2504 { 2505 if(word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 2506 else r_write_be[word] = 0x0; 2507 } 2508 2509 if (m_cmd_write_eop_fifo.read() or ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC)) 2510 { 2511 r_write_fsm = WRITE_DIR_REQ; 2512 } 2513 else 2514 { 2515 r_write_fsm = WRITE_NEXT; 2516 } 2645 //////////////// 2646 case WRITE_IDLE: // copy first word of a write burst in local buffer 2647 { 2648 if (m_cmd_write_addr_fifo.rok()) 2649 { 2650 // consume a word in the FIFO & write it in the local buffer 2651 cmd_write_fifo_get = true; 2652 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 2653 2654 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 2655 r_write_word_index = index; 2656 r_write_word_count = 1; 2657 r_write_data[index] = m_cmd_write_data_fifo.read(); 2658 r_write_srcid = m_cmd_write_srcid_fifo.read(); 2659 r_write_trdid = m_cmd_write_trdid_fifo.read(); 2660 r_write_pktid = m_cmd_write_pktid_fifo.read(); 2661 r_write_pending_sc = false; 2662 2663 // initialize the be field for all words 2664 for(size_t word=0 ; word<m_words ; word++) 2665 { 2666 if (word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 2667 else r_write_be[word] = 0x0; 2668 } 2669 2670 if (m_cmd_write_eop_fifo.read() or ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC)) 2671 { 2672 r_write_fsm = WRITE_DIR_REQ; 2673 } 2674 else 2675 { 2676 r_write_fsm = WRITE_NEXT; 2677 } 2517 2678 2518 2679 #if DEBUG_MEMC_WRITE 2519 if(m_debug)2520 std::cout << " <MEMC " << name() << " WRITE_IDLE> Write request "2521 << " srcid = " << std::hex << m_cmd_write_srcid_fifo.read()2522 << " / address = " << std::hex << m_cmd_write_addr_fifo.read()2523 << " / data = " << m_cmd_write_data_fifo.read() << std::endl;2524 #endif 2525 }2526 break;2527 }2528 ////////////////2529 case WRITE_NEXT: // copy next word of a write burst in local buffer2530 {2531 if(m_cmd_write_addr_fifo.rok())2532 {2680 if (m_debug) 2681 std::cout << " <MEMC " << name() << " WRITE_IDLE> Write request " 2682 << " srcid = " << std::hex << m_cmd_write_srcid_fifo.read() 2683 << " / address = " << std::hex << m_cmd_write_addr_fifo.read() 2684 << " / data = " << m_cmd_write_data_fifo.read() << std::endl; 2685 #endif 2686 } 2687 break; 2688 } 2689 //////////////// 2690 case WRITE_NEXT: // copy next word of a write burst in local buffer 2691 { 2692 if (m_cmd_write_addr_fifo.rok()) 2693 { 2533 2694 2534 2695 #if DEBUG_MEMC_WRITE 2535 if(m_debug) 2536 std::cout << " <MEMC " << name() 2537 << " WRITE_NEXT> Write another word in local buffer" 2538 << std::endl; 2539 #endif 2540 m_cpt_write_cells++; 2541 2542 // check that the next word is in the same cache line 2543 assert( (m_nline[(addr_t)(r_write_address.read())] == 2544 m_nline[(addr_t)(m_cmd_write_addr_fifo.read())]) and 2545 "MEMC ERROR in WRITE_NEXT state: Illegal write burst"); 2546 2547 // consume a word in the FIFO & write it in the local buffer 2548 cmd_write_fifo_get = true; 2549 size_t index = r_write_word_index.read() + r_write_word_count.read(); 2550 2551 r_write_be[index] = m_cmd_write_be_fifo.read(); 2552 r_write_data[index] = m_cmd_write_data_fifo.read(); 2553 r_write_word_count = r_write_word_count.read() + 1; 2554 2555 if(m_cmd_write_eop_fifo.read()) r_write_fsm = WRITE_DIR_REQ; 2556 } 2557 break; 2558 } 2559 /////////////////// 2560 case WRITE_DIR_REQ: // Get the lock to the directory 2561 // and access the llsc_global_table 2562 { 2563 if( r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE ) 2564 { 2565 if(((r_write_pktid.read() & 0x7) == TYPE_SC) and not r_write_pending_sc.read()) 2566 { 2567 // We enter here if it is a new SC command 2568 // If r_write_pending_sc is set the SC is not new and has already been tested 2569 2570 if(not m_cmd_write_addr_fifo.rok()) break; 2571 2572 assert( m_cmd_write_eop_fifo.read() and 2573 "MEMC ERROR in WRITE_DIR_REQ state: invalid packet format for SC command"); 2574 2575 size_t index = r_write_word_index.read(); 2576 bool sc_success = m_llsc_table.sc(r_write_address.read() , 2577 r_write_data[index].read()); 2578 2579 // consume a word in the FIFO & write it in the local buffer 2580 cmd_write_fifo_get = true; 2581 r_write_data[index] = m_cmd_write_data_fifo.read(); 2582 r_write_sc_fail = not sc_success; 2583 r_write_pending_sc = true; 2584 2585 if(not sc_success) r_write_fsm = WRITE_RSP; 2586 else r_write_fsm = WRITE_DIR_LOCK; 2587 } 2588 else 2589 { 2590 // We enter here if it is a SW command or an already tested SC command 2591 2592 m_llsc_table.sw( m_nline[(addr_t)r_write_address.read()], 2593 r_write_word_index.read(), 2594 r_write_word_index.read() + r_write_word_count.read() ); 2595 2596 r_write_fsm = WRITE_DIR_LOCK; 2597 } 2696 if (m_debug) 2697 std::cout << " <MEMC " << name() 2698 << " WRITE_NEXT> Write another word in local buffer" 2699 << std::endl; 2700 #endif 2701 2702 // check that the next word is in the same cache line 2703 assert((m_nline[(addr_t)(r_write_address.read())] == 2704 m_nline[(addr_t)(m_cmd_write_addr_fifo.read())]) and 2705 "MEMC ERROR in WRITE_NEXT state: Illegal write burst"); 2706 2707 // consume a word in the FIFO & write it in the local buffer 2708 cmd_write_fifo_get = true; 2709 size_t index = r_write_word_index.read() + r_write_word_count.read(); 2710 2711 r_write_be[index] = m_cmd_write_be_fifo.read(); 2712 r_write_data[index] = m_cmd_write_data_fifo.read(); 2713 r_write_word_count = r_write_word_count.read() + 1; 2714 2715 if (m_cmd_write_eop_fifo.read()) r_write_fsm = WRITE_DIR_REQ; 2716 } 2717 break; 2718 } 2719 /////////////////// 2720 case WRITE_DIR_REQ: // Get the lock to the directory 2721 // and access the llsc_global_table 2722 { 2723 if (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE ) 2724 { 2725 if (((r_write_pktid.read() & 0x7) == TYPE_SC) and not r_write_pending_sc.read()) 2726 { 2727 // We enter here if it is a new SC command 2728 // If r_write_pending_sc is set the SC is not new and has already been tested 2729 2730 if (not m_cmd_write_addr_fifo.rok()) break; 2731 2732 assert( m_cmd_write_eop_fifo.read() and 2733 "MEMC ERROR in WRITE_DIR_REQ state: invalid packet format for SC command"); 2734 2735 size_t index = r_write_word_index.read(); 2736 bool sc_success = m_llsc_table.sc(r_write_address.read() , 2737 r_write_data[index].read()); 2738 2739 // consume a word in the FIFO & write it in the local buffer 2740 cmd_write_fifo_get = true; 2741 r_write_data[index] = m_cmd_write_data_fifo.read(); 2742 r_write_sc_fail = not sc_success; 2743 r_write_pending_sc = true; 2744 2745 if (not sc_success) r_write_fsm = WRITE_RSP; 2746 else r_write_fsm = WRITE_DIR_LOCK; 2747 } 2748 else 2749 { 2750 // We enter here if it is a SW command or an already tested SC command 2751 2752 m_llsc_table.sw( m_nline[(addr_t)r_write_address.read()], 2753 r_write_word_index.read(), 2754 r_write_word_index.read() + r_write_word_count.read()); 2755 2756 r_write_fsm = WRITE_DIR_LOCK; 2757 } 2598 2758 2599 2759 #if DEBUG_MEMC_WRITE 2600 if(m_debug)2601 std::cout << " <MEMC " << name() << " WRITE_DIR_REQ> Requesting DIR lock "2602 << std::endl;2603 #endif 2604 }2605 break;2606 }2607 ////////////////////2608 case WRITE_DIR_LOCK: // access directory to check hit/miss2609 {2610 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and2611 "MEMC ERROR in ALLOC_DIR_LOCK state: Bad DIR allocation");2612 2613 size_t way = 0;2614 DirectoryEntry entry(m_cache_directory.read(r_write_address.read(), way));2615 2616 if(entry.valid) // hit2617 {2618 // copy directory entry in local buffer in case of hit2619 r_write_is_cnt = entry.is_cnt;2620 r_write_lock = entry.lock;2621 r_write_tag = entry.tag;2622 r_write_copy = entry.owner.srcid;2623 r_write_copy_inst = entry.owner.inst;2624 r_write_count = entry.count;2625 r_write_ptr = entry.ptr;2626 r_write_way = way;2627 2628 if(entry.is_cnt and entry.count) r_write_fsm = WRITE_BC_DIR_READ;2629 else r_write_fsm = WRITE_DIR_HIT;2630 }2631 else // miss2632 {2633 r_write_fsm = WRITE_MISS_TRT_LOCK;2634 }2760 if (m_debug) 2761 std::cout << " <MEMC " << name() << " WRITE_DIR_REQ> Requesting DIR lock " 2762 << std::endl; 2763 #endif 2764 } 2765 break; 2766 } 2767 //////////////////// 2768 case WRITE_DIR_LOCK: // access directory to check hit/miss 2769 { 2770 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 2771 "MEMC ERROR in ALLOC_DIR_LOCK state: Bad DIR allocation"); 2772 2773 size_t way = 0; 2774 DirectoryEntry entry(m_cache_directory.read(r_write_address.read(), way)); 2775 2776 if (entry.valid) // hit 2777 { 2778 // copy directory entry in local buffer in case of hit 2779 r_write_is_cnt = entry.is_cnt; 2780 r_write_lock = entry.lock; 2781 r_write_tag = entry.tag; 2782 r_write_copy = entry.owner.srcid; 2783 r_write_copy_inst = entry.owner.inst; 2784 r_write_count = entry.count; 2785 r_write_ptr = entry.ptr; 2786 r_write_way = way; 2787 2788 if (entry.is_cnt and entry.count) r_write_fsm = WRITE_BC_DIR_READ; 2789 else r_write_fsm = WRITE_DIR_HIT; 2790 } 2791 else // miss 2792 { 2793 r_write_fsm = WRITE_MISS_TRT_LOCK; 2794 } 2635 2795 2636 2796 #if DEBUG_MEMC_WRITE 2637 if(m_debug)2638 {2639 std::cout << " <MEMC " << name() << " WRITE_DIR_LOCK> Check the directory: "2640 << " address = " << std::hex << r_write_address.read()2641 << " / hit = " << std::dec << entry.valid2642 << " / count = " << entry.count2643 << " / is_cnt = " << entry.is_cnt ;2644 if((r_write_pktid.read() & 0x7) == TYPE_SC)2645 std::cout << " / SC access" << std::endl;2646 else2647 std::cout << " / SW access" << std::endl;2648 }2649 #endif 2650 break;2651 }2652 ///////////////////2653 case WRITE_DIR_HIT: // update the cache directory with Dirty bit2654 2655 {2656 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and2657 "MEMC ERROR in ALLOC_DIR_HIT state: Bad DIR allocation");2658 2659 DirectoryEntry entry;2660 entry.valid = true;2661 entry.dirty = true;2662 entry.tag = r_write_tag.read();2663 entry.is_cnt = r_write_is_cnt.read();2664 entry.lock = r_write_lock.read();2665 entry.owner.srcid = r_write_copy.read();2666 entry.owner.inst = r_write_copy_inst.read();2667 entry.count = r_write_count.read();2668 entry.ptr = r_write_ptr.read();2669 2670 size_t set = m_y[(addr_t)(r_write_address.read())];2671 size_t way = r_write_way.read();2672 2673 // update directory2674 m_cache_directory.write(set, way, entry);2675 2676 // owner is true when the the first registered copy is the writer itself2677 bool owner = ( (r_write_copy.read() == r_write_srcid.read())2678 and not r_write_copy_inst.read());2679 2680 // no_update is true when there is no need for coherence transaction2681 bool no_update = ( (r_write_count.read() == 0) or2682 (owner and (r_write_count.read() ==1) and2683 (r_write_pktid.read() != TYPE_SC)));2684 2685 // write data in the cache if no coherence transaction2686 if(no_update)2687 {2688 for(size_t word=0 ; word<m_words ; word++)2689 {2690 m_cache_data.write( way,2797 if (m_debug) 2798 { 2799 std::cout << " <MEMC " << name() << " WRITE_DIR_LOCK> Check the directory: " 2800 << " address = " << std::hex << r_write_address.read() 2801 << " / hit = " << std::dec << entry.valid 2802 << " / count = " << entry.count 2803 << " / is_cnt = " << entry.is_cnt ; 2804 if ((r_write_pktid.read() & 0x7) == TYPE_SC) 2805 std::cout << " / SC access" << std::endl; 2806 else 2807 std::cout << " / SW access" << std::endl; 2808 } 2809 #endif 2810 break; 2811 } 2812 /////////////////// 2813 case WRITE_DIR_HIT: // update the cache directory with Dirty bit 2814 // and update data cache 2815 { 2816 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 2817 "MEMC ERROR in ALLOC_DIR_HIT state: Bad DIR allocation"); 2818 2819 DirectoryEntry entry; 2820 entry.valid = true; 2821 entry.dirty = true; 2822 entry.tag = r_write_tag.read(); 2823 entry.is_cnt = r_write_is_cnt.read(); 2824 entry.lock = r_write_lock.read(); 2825 entry.owner.srcid = r_write_copy.read(); 2826 entry.owner.inst = r_write_copy_inst.read(); 2827 entry.count = r_write_count.read(); 2828 entry.ptr = r_write_ptr.read(); 2829 2830 size_t set = m_y[(addr_t)(r_write_address.read())]; 2831 size_t way = r_write_way.read(); 2832 2833 // update directory 2834 m_cache_directory.write(set, way, entry); 2835 2836 // owner is true when the the first registered copy is the writer itself 2837 bool owner = ( (r_write_copy.read() == r_write_srcid.read()) 2838 and not r_write_copy_inst.read()); 2839 2840 // no_update is true when there is no need for coherence transaction 2841 bool no_update = ( (r_write_count.read() == 0) or 2842 (owner and (r_write_count.read() ==1) and 2843 (r_write_pktid.read() != TYPE_SC))); 2844 2845 // write data in the cache if no coherence transaction 2846 if (no_update) 2847 { 2848 for(size_t word=0 ; word<m_words ; word++) 2849 { 2850 m_cache_data.write( way, 2691 2851 set, 2692 2852 word, 2693 2853 r_write_data[word].read(), 2694 2854 r_write_be[word].read()); 2695 }2696 }2697 2698 if(owner and not no_update and(r_write_pktid.read() != TYPE_SC))2699 {2700 r_write_count = r_write_count.read() - 1;2701 }2702 2703 if(no_update) // Write transaction completed2704 {2705 r_write_fsm = WRITE_RSP;2706 }2707 else // coherence update required2708 {2709 if(!r_write_to_cc_send_multi_req.read() and2710 !r_write_to_cc_send_brdcast_req.read())2711 {2712 r_write_fsm = WRITE_UPT_LOCK;2713 }2714 else2715 {2716 r_write_fsm = WRITE_WAIT;2717 }2718 }2855 } 2856 } 2857 2858 if (owner and not no_update and(r_write_pktid.read() != TYPE_SC)) 2859 { 2860 r_write_count = r_write_count.read() - 1; 2861 } 2862 2863 if (no_update) // Write transaction completed 2864 { 2865 r_write_fsm = WRITE_RSP; 2866 } 2867 else // coherence update required 2868 { 2869 if (!r_write_to_cc_send_multi_req.read() and 2870 !r_write_to_cc_send_brdcast_req.read()) 2871 { 2872 r_write_fsm = WRITE_UPT_LOCK; 2873 } 2874 else 2875 { 2876 r_write_fsm = WRITE_WAIT; 2877 } 2878 } 2719 2879 2720 2880 #if DEBUG_MEMC_WRITE 2721 if(m_debug)2722 {2723 if(no_update)2724 {2725 std::cout << " <MEMC " << name()2726 << " WRITE_DIR_HIT> Write into cache / No coherence transaction" << std::endl;2727 }2728 else2729 {2730 std::cout << " <MEMC " << name() << " WRITE_DIR_HIT> Coherence update required:"2731 << " is_cnt = " << r_write_is_cnt.read()2732 << " nb_copies = " << std::dec << r_write_count.read() << std::endl;2733 if(owner) std::cout << " ... but the first copy is the writer" << std::endl;2734 }2735 }2736 #endif 2737 break;2738 }2739 ////////////////////2740 case WRITE_UPT_LOCK: // Try to register the update request in UPT2741 {2742 if(r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE)2743 {2744 bool wok = false;2745 size_t index = 0;2746 size_t srcid = r_write_srcid.read();2747 size_t trdid = r_write_trdid.read();2748 size_t pktid = r_write_pktid.read();2749 addr_t nline = m_nline[(addr_t)(r_write_address.read())];2750 size_t nb_copies = r_write_count.read();2751 size_t set = m_y[(addr_t)(r_write_address.read())];2752 size_t way = r_write_way.read();2753 2754 wok = m_upt.set( true, // it's an update transaction2755 false, // it's not a broadcast2756 true, // response required2757 false, // no acknowledge required2758 srcid,2759 trdid,2760 pktid,2761 nline,2762 nb_copies,2763 index);2764 2765 if(wok ) // write data in cache2766 {2767 for(size_t word=0 ; word<m_words ; word++)2768 {2769 m_cache_data.write( way,2881 if (m_debug) 2882 { 2883 if (no_update) 2884 { 2885 std::cout << " <MEMC " << name() 2886 << " WRITE_DIR_HIT> Write into cache / No coherence transaction" << std::endl; 2887 } 2888 else 2889 { 2890 std::cout << " <MEMC " << name() << " WRITE_DIR_HIT> Coherence update required:" 2891 << " is_cnt = " << r_write_is_cnt.read() 2892 << " nb_copies = " << std::dec << r_write_count.read() << std::endl; 2893 if (owner) std::cout << " ... but the first copy is the writer" << std::endl; 2894 } 2895 } 2896 #endif 2897 break; 2898 } 2899 //////////////////// 2900 case WRITE_UPT_LOCK: // Try to register the update request in UPT 2901 { 2902 if (r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE) 2903 { 2904 bool wok = false; 2905 size_t index = 0; 2906 size_t srcid = r_write_srcid.read(); 2907 size_t trdid = r_write_trdid.read(); 2908 size_t pktid = r_write_pktid.read(); 2909 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 2910 size_t nb_copies = r_write_count.read(); 2911 size_t set = m_y[(addr_t)(r_write_address.read())]; 2912 size_t way = r_write_way.read(); 2913 2914 wok = m_upt.set( true, // it's an update transaction 2915 false, // it's not a broadcast 2916 true, // response required 2917 false, // no acknowledge required 2918 srcid, 2919 trdid, 2920 pktid, 2921 nline, 2922 nb_copies, 2923 index); 2924 2925 if (wok ) // write data in cache 2926 { 2927 for(size_t word=0 ; word<m_words ; word++) 2928 { 2929 m_cache_data.write( way, 2770 2930 set, 2771 2931 word, 2772 2932 r_write_data[word].read(), 2773 2933 r_write_be[word].read()); 2774 } 2934 } 2935 } 2936 2937 #if DEBUG_MEMC_WRITE 2938 if (m_debug and wok) 2939 { 2940 std::cout << " <MEMC " << name() 2941 << " WRITE_UPT_LOCK> Register the multicast update in UPT / " 2942 << " nb_copies = " << r_write_count.read() << std::endl; 2943 } 2944 #endif 2945 r_write_upt_index = index; 2946 // releases the lock protecting UPT and the DIR if no entry... 2947 if (wok) r_write_fsm = WRITE_UPT_HEAP_LOCK; 2948 else r_write_fsm = WRITE_WAIT; 2949 } 2950 break; 2951 } 2952 2953 ///////////////////////// 2954 case WRITE_UPT_HEAP_LOCK: // get access to heap 2955 { 2956 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE) 2957 { 2958 2959 #if DEBUG_MEMC_WRITE 2960 if (m_debug) 2961 std::cout << " <MEMC " << name() 2962 << " WRITE_UPT_HEAP_LOCK> Get acces to the HEAP" << std::endl; 2963 #endif 2964 r_write_fsm = WRITE_UPT_REQ; 2965 } 2966 break; 2967 } 2968 2969 ////////////////// 2970 case WRITE_UPT_REQ: // prepare the coherence transaction for the CC_SEND FSM 2971 // and write the first copy in the FIFO 2972 // send the request if only one copy 2973 { 2974 assert(not r_write_to_cc_send_multi_req.read() and 2975 not r_write_to_cc_send_brdcast_req.read() and 2976 "Error in VCI_MEM_CACHE : pending multicast or broadcast\n" 2977 "transaction in WRITE_UPT_REQ state" 2978 ); 2979 2980 r_write_to_cc_send_brdcast_req = false; 2981 r_write_to_cc_send_trdid = r_write_upt_index.read(); 2982 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())]; 2983 r_write_to_cc_send_index = r_write_word_index.read(); 2984 r_write_to_cc_send_count = r_write_word_count.read(); 2985 2986 for(size_t i=0; i<m_words ; i++) r_write_to_cc_send_be[i]=r_write_be[i].read(); 2987 2988 size_t min = r_write_word_index.read(); 2989 size_t max = r_write_word_index.read() + r_write_word_count.read(); 2990 for(size_t i=min ; i<max ; i++) r_write_to_cc_send_data[i] = r_write_data[i]; 2991 2992 if ((r_write_copy.read() != r_write_srcid.read()) or 2993 (r_write_pktid.read() == TYPE_SC) or r_write_copy_inst.read()) 2994 { 2995 // put the first srcid in the fifo 2996 write_to_cc_send_fifo_put = true; 2997 write_to_cc_send_fifo_inst = r_write_copy_inst.read(); 2998 write_to_cc_send_fifo_srcid = r_write_copy.read(); 2999 if (r_write_count.read() == 1) 3000 { 3001 r_write_fsm = WRITE_IDLE; 3002 r_write_to_cc_send_multi_req = true; 3003 } 3004 else 3005 { 3006 r_write_fsm = WRITE_UPT_NEXT; 3007 r_write_to_dec = false; 3008 3009 } 3010 } 3011 else 3012 { 3013 r_write_fsm = WRITE_UPT_NEXT; 3014 r_write_to_dec = false; 3015 } 3016 3017 #if DEBUG_MEMC_WRITE 3018 if (m_debug) 3019 { 3020 std::cout 3021 << " <MEMC " << name() 3022 << " WRITE_UPT_REQ> Post first request to CC_SEND FSM" 3023 << " / srcid = " << std::dec << r_write_copy.read() 3024 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3025 3026 if (r_write_count.read() == 1) 3027 std::cout << " ... and this is the last" << std::endl; 3028 } 3029 #endif 3030 break; 3031 } 3032 3033 /////////////////// 3034 case WRITE_UPT_NEXT: 3035 { 3036 // continue the multi-update request to CC_SEND fsm 3037 // when there is copies in the heap. 3038 // if one copy in the heap is the writer itself 3039 // the corresponding SRCID should not be written in the fifo, 3040 // but the UPT counter must be decremented. 3041 // As this decrement is done in the WRITE_UPT_DEC state, 3042 // after the last copy has been found, the decrement request 3043 // must be registered in the r_write_to_dec flip-flop. 3044 3045 HeapEntry entry = m_heap.read(r_write_ptr.read()); 3046 3047 bool dec_upt_counter; 3048 3049 // put the next srcid in the fifo 3050 if ((entry.owner.srcid != r_write_srcid.read()) or 3051 (r_write_pktid.read() == TYPE_SC) or entry.owner.inst) 3052 { 3053 dec_upt_counter = false; 3054 write_to_cc_send_fifo_put = true; 3055 write_to_cc_send_fifo_inst = entry.owner.inst; 3056 write_to_cc_send_fifo_srcid = entry.owner.srcid; 3057 3058 #if DEBUG_MEMC_WRITE 3059 if (m_debug) 3060 { 3061 std::cout << " <MEMC " << name() << " WRITE_UPT_NEXT> Post another request to CC_SEND FSM" 3062 << " / heap_index = " << std::dec << r_write_ptr.read() 3063 << " / srcid = " << std::dec << r_write_copy.read() 3064 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3065 if (entry.next == r_write_ptr.read()) 3066 std::cout << " ... and this is the last" << std::endl; 3067 } 3068 #endif 3069 } 3070 else // the UPT counter must be decremented 3071 { 3072 dec_upt_counter = true; 3073 3074 #if DEBUG_MEMC_WRITE 3075 if (m_debug) 3076 { 3077 std::cout << " <MEMC " << name() << " WRITE_UPT_NEXT> Skip one entry in heap matching the writer" 3078 << " / heap_index = " << std::dec << r_write_ptr.read() 3079 << " / srcid = " << std::dec << r_write_copy.read() 3080 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3081 if (entry.next == r_write_ptr.read()) 3082 std::cout << " ... and this is the last" << std::endl; 3083 } 3084 #endif 3085 } 3086 3087 // register the possible UPT decrement request 3088 r_write_to_dec = dec_upt_counter or r_write_to_dec.read(); 3089 3090 if (not m_write_to_cc_send_inst_fifo.wok()) 3091 { 3092 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_UPT_NEXT state" << std::endl 3093 << "The write_to_cc_send_fifo should not be full" << std::endl 3094 << "as the depth should be larger than the max number of copies" << std::endl; 3095 exit(0); 3096 } 3097 3098 r_write_ptr = entry.next; 3099 3100 if (entry.next == r_write_ptr.read()) // last copy 3101 { 3102 r_write_to_cc_send_multi_req = true; 3103 if (r_write_to_dec.read() or dec_upt_counter) r_write_fsm = WRITE_UPT_DEC; 3104 else r_write_fsm = WRITE_IDLE; 3105 } 3106 break; 3107 } 3108 3109 ////////////////// 3110 case WRITE_UPT_DEC: 3111 { 3112 // If the initial writer has a copy, it should not 3113 // receive an update request, but the counter in the 3114 // update table must be decremented by the MULTI_ACK FSM. 3115 3116 if (!r_write_to_multi_ack_req.read()) 3117 { 3118 r_write_to_multi_ack_req = true; 3119 r_write_to_multi_ack_upt_index = r_write_upt_index.read(); 3120 r_write_fsm = WRITE_IDLE; 3121 } 3122 break; 3123 } 3124 3125 /////////////// 3126 case WRITE_RSP: // Post a request to TGT_RSP FSM to acknowledge the write 3127 // In order to increase the Write requests throughput, 3128 // we don't wait to return in the IDLE state to consume 3129 // a new request in the write FIFO 3130 { 3131 if (!r_write_to_tgt_rsp_req.read()) 3132 { 3133 // post the request to TGT_RSP_FSM 3134 r_write_to_tgt_rsp_req = true; 3135 r_write_to_tgt_rsp_srcid = r_write_srcid.read(); 3136 r_write_to_tgt_rsp_trdid = r_write_trdid.read(); 3137 r_write_to_tgt_rsp_pktid = r_write_pktid.read(); 3138 r_write_to_tgt_rsp_sc_fail = r_write_sc_fail.read(); 3139 3140 // try to get a new write request from the FIFO 3141 if (m_cmd_write_addr_fifo.rok()) 3142 { 3143 // consume a word in the FIFO & write it in the local buffer 3144 cmd_write_fifo_get = true; 3145 size_t index = m_x[(addr_t) (m_cmd_write_addr_fifo.read())]; 3146 3147 r_write_address = (addr_t) (m_cmd_write_addr_fifo.read()); 3148 r_write_word_index = index; 3149 r_write_word_count = 1; 3150 r_write_data[index] = m_cmd_write_data_fifo.read(); 3151 r_write_srcid = m_cmd_write_srcid_fifo.read(); 3152 r_write_trdid = m_cmd_write_trdid_fifo.read(); 3153 r_write_pktid = m_cmd_write_pktid_fifo.read(); 3154 r_write_pending_sc = false; 3155 3156 // initialize the be field for all words 3157 for(size_t word=0 ; word<m_words ; word++) 3158 { 3159 if (word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 3160 else r_write_be[word] = 0x0; 3161 } 3162 3163 if (m_cmd_write_eop_fifo.read() or ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC)) 3164 { 3165 r_write_fsm = WRITE_DIR_REQ; 3166 } 3167 else 3168 { 3169 r_write_fsm = WRITE_NEXT; 3170 } 3171 } 3172 else 3173 { 3174 r_write_fsm = WRITE_IDLE; 3175 } 3176 3177 #if DEBUG_MEMC_WRITE 3178 if (m_debug) 3179 { 3180 std::cout << " <MEMC " << name() << " WRITE_RSP> Post a request to TGT_RSP FSM" 3181 << " : rsrcid = " << std::hex << r_write_srcid.read() << std::endl; 3182 if (m_cmd_write_addr_fifo.rok()) 3183 { 3184 std::cout << " New Write request: " 3185 << " srcid = " << std::hex << m_cmd_write_srcid_fifo.read() 3186 << " / address = " << m_cmd_write_addr_fifo.read() 3187 << " / data = " << m_cmd_write_data_fifo.read() << std::endl; 3188 } 3189 } 3190 #endif 3191 } 3192 break; 3193 } 3194 3195 ///////////////////////// 3196 case WRITE_MISS_TRT_LOCK: // Miss : check Transaction Table 3197 { 3198 if (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3199 { 3200 3201 #if DEBUG_MEMC_WRITE 3202 if (m_debug) 3203 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_LOCK> Check the TRT" << std::endl; 3204 #endif 3205 size_t hit_index = 0; 3206 size_t wok_index = 0; 3207 addr_t addr = (addr_t) r_write_address.read(); 3208 bool hit_read = m_trt.hit_read(m_nline[addr], hit_index); 3209 bool hit_write = m_trt.hit_write(m_nline[addr]); 3210 bool wok = not m_trt.full(wok_index); 3211 3212 if (hit_read) // register the modified data in TRT 3213 { 3214 r_write_trt_index = hit_index; 3215 r_write_fsm = WRITE_MISS_TRT_DATA; 3216 m_cpt_write_miss++; 3217 } 3218 else if (wok and !hit_write) // set a new entry in TRT 3219 { 3220 r_write_trt_index = wok_index; 3221 r_write_fsm = WRITE_MISS_TRT_SET; 3222 m_cpt_write_miss++; 3223 } 3224 else // wait an empty entry in TRT 3225 { 3226 r_write_fsm = WRITE_WAIT; 3227 m_cpt_trt_full++; 3228 } 3229 } 3230 break; 3231 } 3232 3233 //////////////// 3234 case WRITE_WAIT: // release the locks protecting the shared ressources 3235 { 3236 3237 #if DEBUG_MEMC_WRITE 3238 if (m_debug) 3239 std::cout << " <MEMC " << name() << " WRITE_WAIT> Releases the locks before retry" << std::endl; 3240 #endif 3241 r_write_fsm = WRITE_DIR_REQ; 3242 break; 3243 } 3244 3245 //////////////////////// 3246 case WRITE_MISS_TRT_SET: // register a new transaction in TRT (Write Buffer) 3247 { 3248 if (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3249 { 3250 std::vector<be_t> be_vector; 3251 std::vector<data_t> data_vector; 3252 be_vector.clear(); 3253 data_vector.clear(); 3254 for(size_t i=0; i<m_words; i++) 3255 { 3256 be_vector.push_back(r_write_be[i]); 3257 data_vector.push_back(r_write_data[i]); 3258 } 3259 m_trt.set(r_write_trt_index.read(), 3260 true, // read request to XRAM 3261 m_nline[(addr_t)(r_write_address.read())], 3262 r_write_srcid.read(), 3263 r_write_trdid.read(), 3264 r_write_pktid.read(), 3265 false, // not a processor read 3266 0, // not a single word 3267 0, // word index 3268 be_vector, 3269 data_vector); 3270 r_write_fsm = WRITE_MISS_XRAM_REQ; 3271 3272 #if DEBUG_MEMC_WRITE 3273 if (m_debug) 3274 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_SET> Set a new entry in TRT" << std::endl; 3275 #endif 3276 } 3277 break; 3278 } 3279 3280 ///////////////////////// 3281 case WRITE_MISS_TRT_DATA: // update an entry in TRT (used as a Write Buffer) 3282 { 3283 if (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3284 { 3285 std::vector<be_t> be_vector; 3286 std::vector<data_t> data_vector; 3287 be_vector.clear(); 3288 data_vector.clear(); 3289 for(size_t i=0; i<m_words; i++) 3290 { 3291 be_vector.push_back(r_write_be[i]); 3292 data_vector.push_back(r_write_data[i]); 3293 } 3294 m_trt.write_data_mask( r_write_trt_index.read(), 3295 be_vector, 3296 data_vector ); 3297 r_write_fsm = WRITE_RSP; 3298 3299 #if DEBUG_MEMC_WRITE 3300 if (m_debug) 3301 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_DATA> Modify an existing entry in TRT" << std::endl; 3302 #endif 3303 } 3304 break; 3305 } 3306 ///////////////////////// 3307 case WRITE_MISS_XRAM_REQ: // send a GET request to IXR_CMD FSM 3308 { 3309 if (not r_write_to_ixr_cmd_req.read()) 3310 { 3311 r_write_to_ixr_cmd_req = true; 3312 r_write_to_ixr_cmd_put = false; 3313 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 3314 r_write_fsm = WRITE_RSP; 3315 3316 #if DEBUG_MEMC_WRITE 3317 if (m_debug) 3318 std::cout << " <MEMC " << name() << " WRITE_MISS_XRAM_REQ> Post a GET request to the IXR_CMD FSM" << std::endl; 3319 #endif 3320 } 3321 break; 3322 } 3323 /////////////////////// 3324 case WRITE_BC_DIR_READ: // enter this state if a broadcast-inval is required 3325 // the cache line must be erased in mem-cache, and written 3326 // into XRAM. we read the cache and complete the buffer 3327 { 3328 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3329 "MEMC ERROR in WRITE_BC_DIR_READ state: Bad DIR allocation"); 3330 3331 // update local buffer 3332 size_t set = m_y[(addr_t)(r_write_address.read())]; 3333 size_t way = r_write_way.read(); 3334 for(size_t word=0 ; word<m_words ; word++) 3335 { 3336 data_t mask = 0; 3337 if (r_write_be[word].read() & 0x1) mask = mask | 0x000000FF; 3338 if (r_write_be[word].read() & 0x2) mask = mask | 0x0000FF00; 3339 if (r_write_be[word].read() & 0x4) mask = mask | 0x00FF0000; 3340 if (r_write_be[word].read() & 0x8) mask = mask | 0xFF000000; 3341 3342 // complete only if mask is not null (for energy consumption) 3343 r_write_data[word] = (r_write_data[word].read() & mask) | 3344 (m_cache_data.read(way, set, word) & ~mask); 3345 } // end for 3346 3347 r_write_fsm = WRITE_BC_TRT_LOCK; 3348 3349 #if DEBUG_MEMC_WRITE 3350 if (m_debug) 3351 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_READ>" 3352 << " Read the cache to complete local buffer" << std::endl; 3353 #endif 3354 break; 3355 } 3356 /////////////////////// 3357 case WRITE_BC_TRT_LOCK: // get TRT lock to check TRT not full 3358 { 3359 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3360 "MEMC ERROR in WRITE_BC_TRT_LOCK state: Bad DIR allocation"); 3361 3362 if (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3363 { 3364 size_t wok_index = 0; 3365 bool wok = not m_trt.full(wok_index); 3366 if (wok ) 3367 { 3368 r_write_trt_index = wok_index; 3369 r_write_fsm = WRITE_BC_IVT_LOCK; 3370 } 3371 else // wait an empty slot in TRT 3372 { 3373 r_write_fsm = WRITE_WAIT; 3374 } 3375 3376 #if DEBUG_MEMC_WRITE 3377 if (m_debug) 3378 std::cout << " <MEMC " << name() << " WRITE_BC_TRT_LOCK> Check TRT" 3379 << " : wok = " << wok << " / index = " << wok_index << std::endl; 3380 #endif 3381 } 3382 break; 3383 } 3384 ////////////////////// 3385 case WRITE_BC_IVT_LOCK: // get IVT lock and register BC transaction in IVT 3386 { 3387 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3388 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad DIR allocation"); 3389 3390 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 3391 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad TRT allocation"); 3392 3393 if (r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 3394 { 3395 bool wok = false; 3396 size_t index = 0; 3397 size_t srcid = r_write_srcid.read(); 3398 size_t trdid = r_write_trdid.read(); 3399 size_t pktid = r_write_pktid.read(); 3400 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 3401 size_t nb_copies = r_write_count.read(); 3402 3403 wok = m_ivt.set(false, // it's an inval transaction 3404 true, // it's a broadcast 3405 true, // response required 3406 false, // no acknowledge required 3407 srcid, 3408 trdid, 3409 pktid, 3410 nline, 3411 nb_copies, 3412 index); 3413 #if DEBUG_MEMC_WRITE 3414 if (m_debug and wok ) 3415 std::cout << " <MEMC " << name() << " WRITE_BC_IVT_LOCK> Register broadcast inval in IVT" 3416 << " / nb_copies = " << r_write_count.read() << std::endl; 3417 #endif 3418 r_write_upt_index = index; 3419 3420 if (wok ) r_write_fsm = WRITE_BC_DIR_INVAL; 3421 else r_write_fsm = WRITE_WAIT; 3422 } 3423 break; 3424 } 3425 //////////////////////// 3426 case WRITE_BC_DIR_INVAL: // Register a put transaction in TRT 3427 // and invalidate the line in directory 3428 { 3429 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3430 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad DIR allocation"); 3431 3432 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 3433 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad TRT allocation"); 3434 3435 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) and 3436 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad IVT allocation"); 3437 3438 // register PUT request in TRT 3439 std::vector<data_t> data_vector; 3440 data_vector.clear(); 3441 for(size_t i=0; i<m_words; i++) data_vector.push_back(r_write_data[i].read()); 3442 m_trt.set( r_write_trt_index.read(), 3443 false, // PUT request 3444 m_nline[(addr_t)(r_write_address.read())], 3445 0, // unused 3446 0, // unused 3447 0, // unused 3448 false, // not a processor read 3449 0, // unused 3450 0, // unused 3451 std::vector<be_t> (m_words,0), 3452 data_vector ); 3453 3454 // invalidate directory entry 3455 DirectoryEntry entry; 3456 entry.valid = false; 3457 entry.dirty = false; 3458 entry.tag = 0; 3459 entry.is_cnt = false; 3460 entry.lock = false; 3461 entry.owner.srcid = 0; 3462 entry.owner.inst = false; 3463 entry.ptr = 0; 3464 entry.count = 0; 3465 size_t set = m_y[(addr_t)(r_write_address.read())]; 3466 size_t way = r_write_way.read(); 3467 3468 m_cache_directory.write(set, way, entry); 3469 3470 #if DEBUG_MEMC_WRITE 3471 if (m_debug) 3472 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_INVAL> Inval DIR and register in TRT:" 3473 << " address = " << r_write_address.read() << std::endl; 3474 #endif 3475 r_write_fsm = WRITE_BC_CC_SEND; 3476 break; 3477 } 3478 3479 ////////////////////// 3480 case WRITE_BC_CC_SEND: // Post a coherence broadcast request to CC_SEND FSM 3481 { 3482 if (!r_write_to_cc_send_multi_req.read() and !r_write_to_cc_send_brdcast_req.read()) 3483 { 3484 r_write_to_cc_send_multi_req = false; 3485 r_write_to_cc_send_brdcast_req = true; 3486 r_write_to_cc_send_trdid = r_write_upt_index.read(); 3487 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())]; 3488 r_write_to_cc_send_index = 0; 3489 r_write_to_cc_send_count = 0; 3490 3491 for(size_t i=0; i<m_words ; i++) // Ã quoi sert ce for? (AG) 3492 { 3493 r_write_to_cc_send_be[i]=0; 3494 r_write_to_cc_send_data[i] = 0; 3495 } 3496 r_write_fsm = WRITE_BC_XRAM_REQ; 3497 3498 #if DEBUG_MEMC_WRITE 3499 if (m_debug) 3500 std::cout << " <MEMC " << name() 3501 << " WRITE_BC_CC_SEND> Post a broadcast request to CC_SEND FSM" << std::endl; 3502 #endif 3503 } 3504 break; 3505 } 3506 3507 /////////////////////// 3508 case WRITE_BC_XRAM_REQ: // Post a PUT request to IXR_CMD FSM 3509 { 3510 if (not r_write_to_ixr_cmd_req.read()) 3511 { 3512 r_write_to_ixr_cmd_req = true; 3513 r_write_to_ixr_cmd_put = true; 3514 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 3515 r_write_fsm = WRITE_IDLE; 3516 3517 #if DEBUG_MEMC_WRITE 3518 if (m_debug) 3519 std::cout << " <MEMC " << name() 3520 << " WRITE_BC_XRAM_REQ> Post a put request to IXR_CMD FSM" << std::endl; 3521 #endif 3522 } 3523 break; 3524 } 3525 } // end switch r_write_fsm 3526 3527 /////////////////////////////////////////////////////////////////////// 3528 // IXR_CMD FSM 3529 /////////////////////////////////////////////////////////////////////// 3530 // The IXR_CMD fsm controls the command packets to the XRAM : 3531 // It handles requests from 5 FSMs with a round-robin priority: 3532 // READ > WRITE > CAS > XRAM_RSP > CONFIG 3533 // 3534 // - It sends a single flit VCI read to the XRAM in case of 3535 // GET request posted by the READ, WRITE or CAS FSMs. 3536 // - It sends a multi-flit VCI write in case of PUT request posted by 3537 // the XRAM_RSP, WRITE, CAS, or CONFIG FSMs. 3538 // 3539 // For each client, there is three steps: 3540 // - IXR_CMD_*_IDLE : round-robin allocation to a client 3541 // - IXR_CMD_*_TRT : access to TRT for address and data 3542 // - IXR_CMD_*_SEND : send the PUT or GET VCI command 3543 // 3544 // The address and data to be written (for a PUT) are stored in TRT. 3545 // The trdid field contains always the TRT entry index. 3546 //////////////////////////////////////////////////////////////////////// 3547 3548 //std::cout << std::endl << "ixr_cmd_fsm" << std::endl; 3549 3550 switch(r_ixr_cmd_fsm.read()) 3551 { 3552 /////////////////////// 3553 case IXR_CMD_READ_IDLE: 3554 { 3555 if (r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3556 else if (r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3557 else if (r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3558 else if (r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3559 else if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3560 break; 3561 } 3562 //////////////////////// 3563 case IXR_CMD_WRITE_IDLE: 3564 { 3565 if (r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3566 else if (r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3567 else if (r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3568 else if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3569 else if (r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3570 break; 3571 } 3572 ////////////////////// 3573 case IXR_CMD_CAS_IDLE: 3574 { 3575 if (r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3576 else if (r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3577 else if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3578 else if (r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3579 else if (r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3580 break; 3581 } 3582 /////////////////////// 3583 case IXR_CMD_XRAM_IDLE: 3584 { 3585 if (r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3586 else if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3587 else if (r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3588 else if (r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3589 else if (r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3590 break; 3591 } 3592 ///////////////////////// 3593 case IXR_CMD_CONFIG_IDLE: 3594 { 3595 if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3596 else if (r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3597 else if (r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3598 else if (r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3599 else if (r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3600 break; 3601 } 3602 3603 ////////////////////// 3604 case IXR_CMD_READ_TRT: // access TRT for a GET 3605 { 3606 if (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3607 { 3608 TransactionTabEntry entry = m_trt.read( r_read_to_ixr_cmd_index.read()); 3609 r_ixr_cmd_address = entry.nline * (m_words<<2); 3610 r_ixr_cmd_trdid = r_read_to_ixr_cmd_index.read(); 3611 r_ixr_cmd_get = true; 3612 r_ixr_cmd_word = 0; 3613 r_ixr_cmd_fsm = IXR_CMD_READ_SEND; 3614 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3615 3616 #if DEBUG_MEMC_IXR_CMD 3617 if (m_debug) 3618 std::cout << " <MEMC " << name() << " IXR_CMD_READ_TRT> TRT access" 3619 << " index = " << std::dec << r_read_to_ixr_cmd_index.read() 3620 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3621 #endif 3622 } 3623 break; 3624 } 3625 /////////////////////// 3626 case IXR_CMD_WRITE_TRT: // access TRT for a PUT or a GET 3627 { 3628 if (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3629 { 3630 TransactionTabEntry entry = m_trt.read( r_write_to_ixr_cmd_index.read()); 3631 r_ixr_cmd_address = entry.nline * (m_words<<2); 3632 r_ixr_cmd_trdid = r_write_to_ixr_cmd_index.read(); 3633 r_ixr_cmd_get = entry.xram_read; 3634 r_ixr_cmd_word = 0; 3635 r_ixr_cmd_fsm = IXR_CMD_WRITE_SEND; 3636 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3637 3638 #if DEBUG_MEMC_IXR_CMD 3639 if (m_debug) 3640 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_TRT> TRT access" 3641 << " index = " << std::dec << r_write_to_ixr_cmd_index.read() 3642 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3643 #endif 3644 } 3645 break; 3646 } 3647 ///////////////////// 3648 case IXR_CMD_CAS_TRT: // access TRT for a PUT or a GET 3649 { 3650 if (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3651 { 3652 TransactionTabEntry entry = m_trt.read( r_cas_to_ixr_cmd_index.read()); 3653 r_ixr_cmd_address = entry.nline * (m_words<<2); 3654 r_ixr_cmd_trdid = r_cas_to_ixr_cmd_index.read(); 3655 r_ixr_cmd_get = entry.xram_read; 3656 r_ixr_cmd_word = 0; 3657 r_ixr_cmd_fsm = IXR_CMD_CAS_SEND; 3658 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3659 3660 #if DEBUG_MEMC_IXR_CMD 3661 if (m_debug) 3662 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_TRT> TRT access" 3663 << " index = " << std::dec << r_cas_to_ixr_cmd_index.read() 3664 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3665 #endif 3666 } 3667 break; 3668 } 3669 ////////////////////// 3670 case IXR_CMD_XRAM_TRT: // access TRT for a PUT 3671 { 3672 if (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3673 { 3674 TransactionTabEntry entry = m_trt.read( r_xram_rsp_to_ixr_cmd_index.read()); 3675 r_ixr_cmd_address = entry.nline * (m_words<<2); 3676 r_ixr_cmd_trdid = r_xram_rsp_to_ixr_cmd_index.read(); 3677 r_ixr_cmd_get = false; 3678 r_ixr_cmd_word = 0; 3679 r_ixr_cmd_fsm = IXR_CMD_XRAM_SEND; 3680 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3681 3682 #if DEBUG_MEMC_IXR_CMD 3683 if (m_debug) 3684 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_TRT> TRT access" 3685 << " index = " << std::dec << r_xram_rsp_to_ixr_cmd_index.read() 3686 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3687 #endif 3688 } 3689 break; 3690 } 3691 //////////////////////// 3692 case IXR_CMD_CONFIG_TRT: // access TRT for a PUT 3693 { 3694 if (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3695 { 3696 TransactionTabEntry entry = m_trt.read( r_config_to_ixr_cmd_index.read()); 3697 r_ixr_cmd_address = entry.nline * (m_words<<2); 3698 r_ixr_cmd_trdid = r_config_to_ixr_cmd_index.read(); 3699 r_ixr_cmd_get = false; 3700 r_ixr_cmd_word = 0; 3701 r_ixr_cmd_fsm = IXR_CMD_CONFIG_SEND; 3702 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3703 3704 #if DEBUG_MEMC_IXR_CMD 3705 if (m_debug) 3706 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_TRT> TRT access" 3707 << " index = " << std::dec << r_config_to_ixr_cmd_index.read() 3708 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3709 #endif 3710 } 3711 break; 3712 } 3713 3714 /////////////////////// 3715 case IXR_CMD_READ_SEND: // send a get from READ FSM 3716 { 3717 if (p_vci_ixr.cmdack) 3718 { 3719 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 3720 r_read_to_ixr_cmd_req = false; 3721 3722 #if DEBUG_MEMC_IXR_CMD 3723 if (m_debug) 3724 std::cout << " <MEMC " << name() << " IXR_CMD_READ_SEND> GET request:" << std::hex 3725 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3726 #endif 3727 } 3728 break; 3729 } 3730 //////////////////////// 3731 case IXR_CMD_WRITE_SEND: // send a put or get from WRITE FSM 3732 { 3733 if (p_vci_ixr.cmdack) 3734 { 3735 if (r_write_to_ixr_cmd_put.read()) // PUT 3736 { 3737 if (r_ixr_cmd_word.read() == (m_words - 2)) 3738 { 3739 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3740 r_write_to_ixr_cmd_req = false; 3741 } 3742 else 3743 { 3744 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3745 } 3746 3747 #if DEBUG_MEMC_IXR_CMD 3748 if (m_debug) 3749 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> PUT request:" << std::hex 3750 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3751 #endif 3752 } 3753 else // GET 3754 { 3755 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3756 r_write_to_ixr_cmd_req = false; 3757 3758 #if DEBUG_MEMC_IXR_CMD 3759 if (m_debug) 3760 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> GET request:" << std::hex 3761 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3762 #endif 3763 } 3764 } 3765 break; 3766 } 3767 ////////////////////// 3768 case IXR_CMD_CAS_SEND: // send a put or get command from CAS FSM 3769 { 3770 if (p_vci_ixr.cmdack) 3771 { 3772 if (r_cas_to_ixr_cmd_put.read()) // PUT 3773 { 3774 if (r_ixr_cmd_word.read() == (m_words - 2)) 3775 { 3776 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3777 r_cas_to_ixr_cmd_req = false; 3778 } 3779 else 3780 { 3781 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3782 } 3783 3784 #if DEBUG_MEMC_IXR_CMD 3785 if (m_debug) 3786 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> PUT request:" << std::hex 3787 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3788 #endif 3789 } 3790 else // GET 3791 { 3792 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3793 r_cas_to_ixr_cmd_req = false; 3794 3795 #if DEBUG_MEMC_IXR_CMD 3796 if (m_debug) 3797 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> GET request:" << std::hex 3798 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3799 #endif 3800 } 3801 } 3802 break; 3803 } 3804 /////////////////////// 3805 case IXR_CMD_XRAM_SEND: // send a put from XRAM_RSP FSM 3806 { 3807 if (p_vci_ixr.cmdack.read()) 3808 { 3809 if (r_ixr_cmd_word.read() == (m_words - 2)) 3810 { 3811 r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE; 3812 r_xram_rsp_to_ixr_cmd_req = false; 3813 } 3814 else 3815 { 3816 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3817 } 3818 3819 #if DEBUG_MEMC_IXR_CMD 3820 if (m_debug) 3821 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_SEND> PUT request:" << std::hex 3822 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3823 #endif 3824 } 3825 break; 3826 } 3827 ///////////////////////// 3828 case IXR_CMD_CONFIG_SEND: // send a put from CONFIG FSM 3829 { 3830 if (p_vci_ixr.cmdack.read()) 3831 { 3832 if (r_ixr_cmd_word.read() == (m_words - 2)) 3833 { 3834 r_ixr_cmd_fsm = IXR_CMD_CONFIG_IDLE; 3835 r_config_to_ixr_cmd_req = false; 3836 } 3837 else 3838 { 3839 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3840 } 3841 3842 #if DEBUG_MEMC_IXR_CMD 3843 if (m_debug) 3844 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_SEND> PUT request:" << std::hex 3845 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3846 #endif 3847 } 3848 break; 3849 } 3850 } // end switch r_ixr_cmd_fsm 3851 3852 //////////////////////////////////////////////////////////////////////////// 3853 // IXR_RSP FSM 3854 //////////////////////////////////////////////////////////////////////////// 3855 // The IXR_RSP FSM receives the response packets from the XRAM, 3856 // for both PUT transaction, and GET transaction. 3857 // 3858 // - A response to a PUT request is a single-cell VCI packet. 3859 // The TRT index is contained in the RTRDID field. 3860 // The FSM takes the lock protecting the TRT, and the corresponding 3861 // entry is erased. If an acknowledge was required (in case of software SYNC) 3862 // the r_config_rsp_lines counter is decremented. 3863 // 3864 // - A response to a GET request is a multi-cell VCI packet. 3865 // The TRT index is contained in the RTRDID field. 3866 // The N cells contain the N words of the cache line in the RDATA field. 3867 // The FSM takes the lock protecting the TRT to store the line in the TRT 3868 // (taking into account the write requests already stored in the TRT). 3869 // When the line is completely written, the r_ixr_rsp_to_xram_rsp_rok[index] 3870 // signal is set to inform the XRAM_RSP FSM. 3871 /////////////////////////////////////////////////////////////////////////////// 3872 3873 //std::cout << std::endl << "ixr_rsp_fsm" << std::endl; 3874 3875 switch(r_ixr_rsp_fsm.read()) 3876 { 3877 ////////////////// 3878 case IXR_RSP_IDLE: // test transaction type: PUT/GET 3879 { 3880 if (p_vci_ixr.rspval.read()) 3881 { 3882 r_ixr_rsp_cpt = 0; 3883 r_ixr_rsp_trt_index = p_vci_ixr.rtrdid.read(); 3884 3885 assert( ((p_vci_ixr.rerror.read() & 0x1) == 0) and 3886 "MEMC ERROR in IXR_RSP state: XRAM response error !"); 3887 3888 if (p_vci_ixr.reop.read()) // PUT 3889 { 3890 r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 3891 3892 #if DEBUG_MEMC_IXR_RSP 3893 if (m_debug) 3894 std::cout << " <MEMC " << name() 3895 << " IXR_RSP_IDLE> Response from XRAM to a put transaction" << std::endl; 3896 #endif 3897 } 3898 else // GET 3899 { 3900 r_ixr_rsp_fsm = IXR_RSP_TRT_READ; 3901 3902 #if DEBUG_MEMC_IXR_RSP 3903 if (m_debug) 3904 std::cout << " <MEMC " << name() 3905 << " IXR_RSP_IDLE> Response from XRAM to a get transaction" << std::endl; 3906 #endif 3907 } 3908 } 3909 break; 3910 } 3911 //////////////////////// 3912 case IXR_RSP_TRT_ERASE: // erase the entry in the TRT 3913 // decrease the line counter if config request 3914 { 3915 if (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) 3916 { 3917 size_t index = r_ixr_rsp_trt_index.read(); 3918 if (m_trt.is_config(index)) r_config_rsp_lines = r_config_rsp_lines.read() - 1; 3919 m_trt.erase(index); 3920 r_ixr_rsp_fsm = IXR_RSP_IDLE; 3921 3922 #if DEBUG_MEMC_IXR_RSP 3923 if (m_debug) 3924 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_ERASE> Erase TRT entry " 3925 << r_ixr_rsp_trt_index.read() << std::endl; 3926 #endif 3927 } 3928 break; 3929 } 3930 ////////////////////// 3931 case IXR_RSP_TRT_READ: // write a 64 bits data word in TRT 3932 { 3933 if ((r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) and p_vci_ixr.rspval) 3934 { 3935 size_t index = r_ixr_rsp_trt_index.read(); 3936 size_t word = r_ixr_rsp_cpt.read(); 3937 bool eop = p_vci_ixr.reop.read(); 3938 wide_data_t data = p_vci_ixr.rdata.read(); 3939 bool error = ((p_vci_ixr.rerror.read() & 0x1) == 1); 3940 3941 assert(((eop == (word == (m_words-2))) or error) and 3942 "MEMC ERROR in IXR_RSP_TRT_READ state : invalid response from XRAM"); 3943 3944 m_trt.write_rsp( index, 3945 word, 3946 data ); 3947 3948 r_ixr_rsp_cpt = word + 2; 3949 3950 if (eop ) 3951 { 3952 r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()] = true; 3953 r_ixr_rsp_fsm = IXR_RSP_IDLE; 3954 } 3955 3956 #if DEBUG_MEMC_IXR_RSP 3957 if (m_debug) 3958 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_READ> Writing 2 words in TRT : " 3959 << " index = " << std::dec << index 3960 << " / word = " << word 3961 << " / data = " << std::hex << data << std::endl; 3962 #endif 3963 } 3964 break; 3965 } 3966 } // end swich r_ixr_rsp_fsm 3967 3968 //////////////////////////////////////////////////////////////////////////// 3969 // XRAM_RSP FSM 3970 //////////////////////////////////////////////////////////////////////////// 3971 // The XRAM_RSP FSM handles the incoming cache lines after an XRAM GET. 3972 // The cache line has been written in the TRT by the IXR_CMD_FSM. 3973 // As the IXR_RSP FSM and the XRAM_RSP FSM are running in parallel, 3974 // there is as many flip-flops r_ixr_rsp_to_xram_rsp_rok[i] as the number 3975 // of entries in the TRT, that are handled with a round-robin priority... 3976 // 3977 // The FSM takes the lock protecting TRT, and the lock protecting DIR. 3978 // The selected TRT entry is copied in the local buffer r_xram_rsp_trt_buf. 3979 // It selects a cache slot and save the victim line in another local buffer 3980 // r_xram_rsp_victim_***. 3981 // It writes the line extracted from TRT in the cache. 3982 // If it was a read MISS, the XRAM_RSP FSM send a request to the TGT_RSP 3983 // FSM to return the cache line to the registered processor. 3984 // If there is no empty slot, a victim line is evicted, and 3985 // invalidate requests are sent to the L1 caches containing copies. 3986 // If this line is dirty, the XRAM_RSP FSM send a request to the IXR_CMD 3987 // FSM to save the victim line to the XRAM, and register the write transaction 3988 // in the TRT (using the entry previously used by the read transaction). 3989 /////////////////////////////////////////////////////////////////////////////// 3990 3991 //std::cout << std::endl << "xram_rsp_fsm" << std::endl; 3992 3993 switch(r_xram_rsp_fsm.read()) 3994 { 3995 /////////////////// 3996 case XRAM_RSP_IDLE: // scan the XRAM responses / select a TRT index (round robin) 3997 { 3998 size_t old = r_xram_rsp_trt_index.read(); 3999 size_t lines = m_trt_lines; 4000 for(size_t i=0 ; i<lines ; i++) 4001 { 4002 size_t index = (i+old+1) %lines; 4003 if (r_ixr_rsp_to_xram_rsp_rok[index]) 4004 { 4005 r_xram_rsp_trt_index = index; 4006 r_ixr_rsp_to_xram_rsp_rok[index] = false; 4007 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 4008 4009 #if DEBUG_MEMC_XRAM_RSP 4010 if (m_debug) 4011 std::cout << " <MEMC " << name() << " XRAM_RSP_IDLE>" 4012 << " Available cache line in TRT:" 4013 << " index = " << std::dec << index << std::endl; 4014 #endif 4015 break; 4016 } 4017 } 4018 break; 4019 } 4020 /////////////////////// 4021 case XRAM_RSP_DIR_LOCK: // Takes the DIR lock and the TRT lock 4022 // Copy the TRT entry in a local buffer 4023 { 4024 if ((r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4025 (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP)) 4026 { 4027 // copy the TRT entry in the r_xram_rsp_trt_buf local buffer 4028 size_t index = r_xram_rsp_trt_index.read(); 4029 r_xram_rsp_trt_buf.copy( m_trt.read(index)); 4030 r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; 4031 4032 #if DEBUG_MEMC_XRAM_RSP 4033 if (m_debug) 4034 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_LOCK>" 4035 << " Get access to DIR and TRT" << std::endl; 4036 #endif 4037 } 4038 break; 4039 } 4040 /////////////////////// 4041 case XRAM_RSP_TRT_COPY: // Select a victim cache line 4042 // and copy it in a local buffer 4043 { 4044 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4045 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad DIR allocation"); 4046 4047 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4048 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad TRT allocation"); 4049 4050 // selects & extracts a victim line from cache 4051 size_t way = 0; 4052 size_t set = m_y[(addr_t)(r_xram_rsp_trt_buf.nline * m_words * 4)]; 4053 4054 DirectoryEntry victim(m_cache_directory.select(set, way)); 4055 4056 bool inval = (victim.count and victim.valid) ; 4057 4058 // copy the victim line in a local buffer (both data dir) 4059 m_cache_data.read_line(way, set, r_xram_rsp_victim_data); 4060 4061 r_xram_rsp_victim_copy = victim.owner.srcid; 4062 r_xram_rsp_victim_copy_inst = victim.owner.inst; 4063 r_xram_rsp_victim_count = victim.count; 4064 r_xram_rsp_victim_ptr = victim.ptr; 4065 r_xram_rsp_victim_way = way; 4066 r_xram_rsp_victim_set = set; 4067 r_xram_rsp_victim_nline = (addr_t)victim.tag*m_sets + set; 4068 r_xram_rsp_victim_is_cnt = victim.is_cnt; 4069 r_xram_rsp_victim_inval = inval ; 4070 r_xram_rsp_victim_dirty = victim.dirty; 4071 4072 if (not r_xram_rsp_trt_buf.rerror ) r_xram_rsp_fsm = XRAM_RSP_IVT_LOCK; 4073 else r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 4074 4075 #if DEBUG_MEMC_XRAM_RSP 4076 if (m_debug) 4077 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_COPY>" 4078 << " Select a victim slot: " 4079 << " way = " << std::dec << way 4080 << " / set = " << set 4081 << " / inval_required = " << inval << std::endl; 4082 #endif 4083 break; 4084 } 4085 /////////////////////// 4086 case XRAM_RSP_IVT_LOCK: // Keep DIR and TRT locks and take the IVT lock 4087 // to check a possible pending inval 4088 { 4089 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4090 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad DIR allocation"); 4091 4092 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4093 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad TRT allocation"); 4094 4095 if (r_alloc_ivt_fsm == ALLOC_IVT_XRAM_RSP) 4096 { 4097 size_t index = 0; 4098 if (m_ivt.search_inval(r_xram_rsp_trt_buf.nline, index)) // pending inval 4099 { 4100 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 4101 4102 #if DEBUG_MEMC_XRAM_RSP 4103 if (m_debug) 4104 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 4105 << " Get acces to IVT, but line invalidation registered" 4106 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline*m_words*4 4107 << " / index = " << std::dec << index << std::endl; 4108 #endif 4109 4110 } 4111 else if (m_ivt.is_full() and r_xram_rsp_victim_inval.read()) // IVT full 4112 { 4113 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 4114 4115 #if DEBUG_MEMC_XRAM_RSP 4116 if (m_debug) 4117 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 4118 << " Get acces to IVT, but inval required and IVT full" << std::endl; 4119 #endif 4120 } 4121 else 4122 { 4123 r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; 4124 4125 #if DEBUG_MEMC_XRAM_RSP 4126 if (m_debug) 4127 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 4128 << " Get acces to IVT / no pending inval request" << std::endl; 4129 #endif 4130 } 4131 } 4132 break; 4133 } 4134 ///////////////////////// 4135 case XRAM_RSP_INVAL_WAIT: // release all locks and returns to DIR_LOCK to retry 4136 { 4137 4138 #if DEBUG_MEMC_XRAM_RSP 4139 if (m_debug) 4140 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL_WAIT>" 4141 << " Release all locks and retry" << std::endl; 4142 #endif 4143 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 4144 break; 4145 } 4146 /////////////////////// 4147 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory), 4148 // erases the TRT entry if victim not dirty, 4149 // and set inval request in IVT if required 4150 { 4151 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4152 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad DIR allocation"); 4153 4154 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4155 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad TRT allocation"); 4156 4157 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_XRAM_RSP) and 4158 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad IVT allocation"); 4159 4160 // check if this is an instruction read, this means pktid is either 4161 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 4162 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4163 bool inst_read = (r_xram_rsp_trt_buf.pktid & 0x2) and r_xram_rsp_trt_buf.proc_read; 4164 4165 // check if this is a cached read, this means pktid is either 4166 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 4167 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4168 bool cached_read = (r_xram_rsp_trt_buf.pktid & 0x1) and r_xram_rsp_trt_buf.proc_read; 4169 4170 bool dirty = false; 4171 4172 // update cache data 4173 size_t set = r_xram_rsp_victim_set.read(); 4174 size_t way = r_xram_rsp_victim_way.read(); 4175 4176 for(size_t word=0; word<m_words ; word++) 4177 { 4178 m_cache_data.write(way, set, word, r_xram_rsp_trt_buf.wdata[word]); 4179 dirty = dirty or (r_xram_rsp_trt_buf.wdata_be[word] != 0); 4180 } 4181 4182 // update cache directory 4183 DirectoryEntry entry; 4184 entry.valid = true; 4185 entry.is_cnt = false; 4186 entry.lock = false; 4187 entry.dirty = dirty; 4188 entry.tag = r_xram_rsp_trt_buf.nline / m_sets; 4189 entry.ptr = 0; 4190 if (cached_read) 4191 { 4192 entry.owner.srcid = r_xram_rsp_trt_buf.srcid; 4193 entry.owner.inst = inst_read; 4194 entry.count = 1; 4195 } 4196 else 4197 { 4198 entry.owner.srcid = 0; 4199 entry.owner.inst = 0; 4200 entry.count = 0; 4201 } 4202 m_cache_directory.write(set, way, entry); 4203 4204 // register invalid request in IVT for victim line if required 4205 if (r_xram_rsp_victim_inval.read()) 4206 { 4207 bool broadcast = r_xram_rsp_victim_is_cnt.read(); 4208 size_t index = 0; 4209 size_t count_copies = r_xram_rsp_victim_count.read(); 4210 4211 bool wok = m_ivt.set(false, // it's an inval transaction 4212 broadcast, // set broadcast bit 4213 false, // no response required 4214 false, // no acknowledge required 4215 0, // srcid 4216 0, // trdid 4217 0, // pktid 4218 r_xram_rsp_victim_nline.read(), 4219 count_copies, 4220 index); 4221 4222 r_xram_rsp_ivt_index = index; 4223 4224 assert( wok and 4225 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: IVT should not be full"); 4226 } 4227 4228 #if DEBUG_MEMC_XRAM_RSP 4229 if (m_debug) 4230 { 4231 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_UPDT>" 4232 << " Cache update: " 4233 << " way = " << std::dec << way 4234 << " / set = " << set 4235 << " / owner_id = " << std::hex << entry.owner.srcid 4236 << " / owner_ins = " << std::dec << entry.owner.inst 4237 << " / count = " << entry.count 4238 << " / is_cnt = " << entry.is_cnt << std::endl; 4239 if (r_xram_rsp_victim_inval.read()) 4240 std::cout << " Invalidation request for address " 4241 << std::hex << r_xram_rsp_victim_nline.read()*m_words*4 4242 << " / broadcast = " << r_xram_rsp_victim_is_cnt.read() << std::endl; 4243 } 4244 #endif 4245 4246 // If the victim is not dirty, we don't need to reuse the TRT entry for 4247 // another PUT transaction, and we can erase the TRT entry 4248 if (not r_xram_rsp_victim_dirty.read()) 4249 { 4250 m_trt.erase(r_xram_rsp_trt_index.read()); 4251 } 4252 4253 // Next state 4254 if (r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; 4255 else if (r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4256 else if (r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4257 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4258 break; 4259 } 4260 //////////////////////// 4261 case XRAM_RSP_TRT_DIRTY: // set the TRT entry (PUT to XRAM) if the victim is dirty 4262 { 4263 if (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) 4264 { 4265 std::vector<data_t> data_vector; 4266 data_vector.clear(); 4267 for(size_t i=0; i<m_words; i++) 4268 { 4269 data_vector.push_back(r_xram_rsp_victim_data[i].read()); 4270 } 4271 m_trt.set( r_xram_rsp_trt_index.read(), 4272 false, // PUT 4273 r_xram_rsp_victim_nline.read(), // line index 4274 0, // unused 4275 0, // unused 4276 0, // unused 4277 false, // not proc_read 4278 0, // unused 4279 0, // unused 4280 std::vector<be_t>(m_words,0xF), 4281 data_vector); 4282 4283 #if DEBUG_MEMC_XRAM_RSP 4284 if (m_debug) 4285 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_DIRTY>" 4286 << " Set TRT entry for the put transaction" 4287 << " / address = " << (r_xram_rsp_victim_nline.read()*m_words*4) << std::endl; 4288 #endif 4289 if (r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4290 else if (r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4291 else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4292 } 4293 break; 4294 } 4295 ////////////////////// 4296 case XRAM_RSP_DIR_RSP: // Request a response to TGT_RSP FSM 4297 { 4298 if (not r_xram_rsp_to_tgt_rsp_req.read()) 4299 { 4300 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 4301 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 4302 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 4303 for(size_t i=0; i < m_words; i++) 4304 { 4305 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 4306 } 4307 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 4308 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 4309 r_xram_rsp_to_tgt_rsp_ll_key = r_xram_rsp_trt_buf.ll_key; 4310 r_xram_rsp_to_tgt_rsp_rerror = false; 4311 r_xram_rsp_to_tgt_rsp_req = true; 4312 4313 if (r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4314 else if (r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4315 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4316 4317 #if DEBUG_MEMC_XRAM_RSP 4318 if (m_debug) 4319 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_RSP>" 4320 << " Request the TGT_RSP FSM to return data:" 4321 << " rsrcid = " << std::hex << r_xram_rsp_trt_buf.srcid 4322 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline*m_words*4 4323 << " / nwords = " << std::dec << r_xram_rsp_trt_buf.read_length << std::endl; 4324 #endif 4325 } 4326 break; 4327 } 4328 //////////////////// 4329 case XRAM_RSP_INVAL: // send invalidate request to CC_SEND FSM 4330 { 4331 if (!r_xram_rsp_to_cc_send_multi_req.read() and 4332 !r_xram_rsp_to_cc_send_brdcast_req.read()) 4333 { 4334 bool multi_req = !r_xram_rsp_victim_is_cnt.read(); 4335 bool last_multi_req = multi_req and (r_xram_rsp_victim_count.read() == 1); 4336 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 4337 4338 r_xram_rsp_to_cc_send_multi_req = last_multi_req; 4339 r_xram_rsp_to_cc_send_brdcast_req = r_xram_rsp_victim_is_cnt.read(); 4340 r_xram_rsp_to_cc_send_nline = r_xram_rsp_victim_nline.read(); 4341 r_xram_rsp_to_cc_send_trdid = r_xram_rsp_ivt_index; 4342 xram_rsp_to_cc_send_fifo_srcid = r_xram_rsp_victim_copy.read(); 4343 xram_rsp_to_cc_send_fifo_inst = r_xram_rsp_victim_copy_inst.read(); 4344 xram_rsp_to_cc_send_fifo_put = multi_req; 4345 r_xram_rsp_next_ptr = r_xram_rsp_victim_ptr.read(); 4346 4347 if (r_xram_rsp_victim_dirty) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4348 else if (not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 4349 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4350 4351 #if DEBUG_MEMC_XRAM_RSP 4352 if (m_debug) 4353 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL>" 4354 << " Send an inval request to CC_SEND FSM" 4355 << " / address = " << r_xram_rsp_victim_nline.read()*m_words*4 << std::endl; 4356 #endif 4357 } 4358 break; 4359 } 4360 ////////////////////////// 4361 case XRAM_RSP_WRITE_DIRTY: // send a write request to IXR_CMD FSM 4362 { 4363 if (not r_xram_rsp_to_ixr_cmd_req.read()) 4364 { 4365 r_xram_rsp_to_ixr_cmd_req = true; 4366 r_xram_rsp_to_ixr_cmd_index = r_xram_rsp_trt_index.read(); 4367 4368 m_cpt_write_dirty++; 4369 4370 bool multi_req = not r_xram_rsp_victim_is_cnt.read() and 4371 r_xram_rsp_victim_inval.read(); 4372 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 4373 4374 if (not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 4375 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4376 4377 #if DEBUG_MEMC_XRAM_RSP 4378 if (m_debug) 4379 std::cout << " <MEMC " << name() << " XRAM_RSP_WRITE_DIRTY>" 4380 << " Send the put request to IXR_CMD FSM" 4381 << " / address = " << r_xram_rsp_victim_nline.read()*m_words*4 << std::endl; 4382 #endif 4383 } 4384 break; 4385 } 4386 ///////////////////////// 4387 case XRAM_RSP_HEAP_REQ: // Get the lock to the HEAP 4388 { 4389 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 4390 { 4391 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4392 } 4393 4394 #if DEBUG_MEMC_XRAM_RSP 4395 if (m_debug) 4396 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_REQ>" 4397 << " Requesting HEAP lock" << std::endl; 4398 #endif 4399 break; 4400 } 4401 ///////////////////////// 4402 case XRAM_RSP_HEAP_ERASE: // erase the copies and send invalidations 4403 { 4404 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 4405 { 4406 HeapEntry entry = m_heap.read(r_xram_rsp_next_ptr.read()); 4407 4408 xram_rsp_to_cc_send_fifo_srcid = entry.owner.srcid; 4409 xram_rsp_to_cc_send_fifo_inst = entry.owner.inst; 4410 xram_rsp_to_cc_send_fifo_put = true; 4411 if (m_xram_rsp_to_cc_send_inst_fifo.wok()) 4412 { 4413 r_xram_rsp_next_ptr = entry.next; 4414 if (entry.next == r_xram_rsp_next_ptr.read()) // last copy 4415 { 4416 r_xram_rsp_to_cc_send_multi_req = true; 4417 r_xram_rsp_fsm = XRAM_RSP_HEAP_LAST; 4418 } 4419 else 4420 { 4421 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4422 } 4423 } 4424 else 4425 { 4426 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4427 } 4428 4429 #if DEBUG_MEMC_XRAM_RSP 4430 if (m_debug) 4431 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_ERASE>" 4432 << " Erase copy:" 4433 << " srcid = " << std::hex << entry.owner.srcid 4434 << " / inst = " << std::dec << entry.owner.inst << std::endl; 4435 #endif 4436 } 4437 break; 4438 } 4439 ///////////////////////// 4440 case XRAM_RSP_HEAP_LAST: // last copy 4441 { 4442 if (r_alloc_heap_fsm.read() != ALLOC_HEAP_XRAM_RSP) 4443 { 4444 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_HEAP_LAST" 4445 << " bad HEAP allocation" << std::endl; 4446 exit(0); 4447 } 4448 size_t free_pointer = m_heap.next_free_ptr(); 4449 4450 HeapEntry last_entry; 4451 last_entry.owner.srcid = 0; 4452 last_entry.owner.inst = false; 4453 if (m_heap.is_full()) 4454 { 4455 last_entry.next = r_xram_rsp_next_ptr.read(); 4456 m_heap.unset_full(); 4457 } 4458 else 4459 { 4460 last_entry.next = free_pointer; 4461 } 4462 4463 m_heap.write_free_ptr(r_xram_rsp_victim_ptr.read()); 4464 m_heap.write(r_xram_rsp_next_ptr.read(),last_entry); 4465 4466 r_xram_rsp_fsm = XRAM_RSP_IDLE; 4467 4468 #if DEBUG_MEMC_XRAM_RSP 4469 if (m_debug) 4470 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_LAST>" 4471 << " Heap housekeeping" << std::endl; 4472 #endif 4473 break; 4474 } 4475 ////////////////////////// 4476 case XRAM_RSP_ERROR_ERASE: // erase TRT entry in case of error 4477 { 4478 m_trt.erase(r_xram_rsp_trt_index.read()); 4479 4480 // Next state 4481 if (r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_ERROR_RSP; 4482 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4483 4484 #if DEBUG_MEMC_XRAM_RSP 4485 if (m_debug) 4486 std::cout << " <MEMC " << name() << " XRAM_RSP_ERROR_ERASE>" 4487 << " Error reported by XRAM / erase the TRT entry" << std::endl; 4488 #endif 4489 break; 4490 } 4491 //////////////////////// 4492 case XRAM_RSP_ERROR_RSP: // Request an error response to TGT_RSP FSM 4493 { 4494 if (!r_xram_rsp_to_tgt_rsp_req.read()) 4495 { 4496 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 4497 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 4498 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 4499 for(size_t i=0; i < m_words; i++) 4500 { 4501 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 4502 } 4503 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 4504 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 4505 r_xram_rsp_to_tgt_rsp_rerror = true; 4506 r_xram_rsp_to_tgt_rsp_req = true; 4507 4508 r_xram_rsp_fsm = XRAM_RSP_IDLE; 4509 4510 #if DEBUG_MEMC_XRAM_RSP 4511 if (m_debug) 4512 std::cout << " <MEMC " << name() 4513 << " XRAM_RSP_ERROR_RSP> Request a response error to TGT_RSP FSM:" 4514 << " srcid = " << std::dec << r_xram_rsp_trt_buf.srcid << std::endl; 4515 #endif 4516 } 4517 break; 4518 } 4519 } // end swich r_xram_rsp_fsm 4520 4521 //////////////////////////////////////////////////////////////////////////////////// 4522 // CLEANUP FSM 4523 //////////////////////////////////////////////////////////////////////////////////// 4524 // The CLEANUP FSM handles the cleanup request from L1 caches. 4525 // It accesses the cache directory and the heap to update the list of copies. 4526 //////////////////////////////////////////////////////////////////////////////////// 4527 4528 switch(r_cleanup_fsm.read()) 4529 { 4530 ////////////////// 4531 case CLEANUP_IDLE: // Get first DSPIN flit of the CLEANUP command 4532 { 4533 if (not m_cc_receive_to_cleanup_fifo.rok()) break; 4534 4535 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 4536 4537 uint32_t srcid = DspinDhccpParam::dspin_get(flit, 4538 DspinDhccpParam::CLEANUP_SRCID); 4539 4540 uint8_t type = DspinDhccpParam::dspin_get(flit, 4541 DspinDhccpParam::P2M_TYPE); 4542 4543 r_cleanup_way_index = DspinDhccpParam::dspin_get(flit, 4544 DspinDhccpParam::CLEANUP_WAY_INDEX); 4545 4546 r_cleanup_nline = DspinDhccpParam::dspin_get(flit, 4547 DspinDhccpParam::CLEANUP_NLINE_MSB) << 32; 4548 4549 r_cleanup_inst = (type == DspinDhccpParam::TYPE_CLEANUP_INST); 4550 r_cleanup_srcid = srcid; 4551 4552 assert((srcid < m_initiators) and 4553 "MEMC ERROR in CLEANUP_IDLE state : illegal SRCID value"); 4554 4555 // <Activity Counters> 4556 if (is_local_req(srcid)) { 4557 m_cpt_cleanup_local++; 4558 } 4559 else { 4560 m_cpt_cleanup_remote++; 4561 m_cpt_cleanup_cost += req_distance(srcid); 4562 } 4563 // </Activity Counters> 4564 cc_receive_to_cleanup_fifo_get = true; 4565 r_cleanup_fsm = CLEANUP_GET_NLINE; 4566 4567 #if DEBUG_MEMC_CLEANUP 4568 if (m_debug) 4569 std::cout << " <MEMC " << name() 4570 << " CLEANUP_IDLE> Cleanup request:" << std::hex 4571 << " owner_id = " << srcid 4572 << " / owner_ins = " << (type == DspinDhccpParam::TYPE_CLEANUP_INST) << std::endl; 4573 #endif 4574 break; 4575 } 4576 /////////////////////// 4577 case CLEANUP_GET_NLINE: // GET second DSPIN flit of the cleanup command 4578 { 4579 if (not m_cc_receive_to_cleanup_fifo.rok()) break; 4580 4581 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 4582 4583 addr_t nline = r_cleanup_nline.read() | 4584 DspinDhccpParam::dspin_get(flit, DspinDhccpParam::CLEANUP_NLINE_LSB); 4585 4586 cc_receive_to_cleanup_fifo_get = true; 4587 r_cleanup_nline = nline; 4588 r_cleanup_fsm = CLEANUP_DIR_REQ; 4589 4590 #if DEBUG_MEMC_CLEANUP 4591 if (m_debug) 4592 std::cout << " <MEMC " << name() 4593 << " CLEANUP_GET_NLINE> Cleanup request:" 4594 << " address = " << std::hex << nline * m_words * 4 << std::endl; 4595 #endif 4596 break; 4597 } 4598 ///////////////////// 4599 case CLEANUP_DIR_REQ: // Get the lock to the directory 4600 { 4601 if (r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) break; 4602 4603 r_cleanup_fsm = CLEANUP_DIR_LOCK; 4604 4605 #if DEBUG_MEMC_CLEANUP 4606 if (m_debug) 4607 std::cout << " <MEMC " << name() << " CLEANUP_DIR_REQ> Requesting DIR lock" << std::endl; 4608 #endif 4609 break; 4610 } 4611 ////////////////////// 4612 case CLEANUP_DIR_LOCK: // test directory status 4613 { 4614 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP) and 4615 "MEMC ERROR in CLEANUP_DIR_LOCK: bad DIR allocation"); 4616 4617 // Read the directory 4618 size_t way = 0; 4619 addr_t cleanup_address = r_cleanup_nline.read() * m_words * 4; 4620 DirectoryEntry entry = m_cache_directory.read(cleanup_address , way); 4621 r_cleanup_is_cnt = entry.is_cnt; 4622 r_cleanup_dirty = entry.dirty; 4623 r_cleanup_tag = entry.tag; 4624 r_cleanup_lock = entry.lock; 4625 r_cleanup_way = way; 4626 r_cleanup_count = entry.count; 4627 r_cleanup_ptr = entry.ptr; 4628 r_cleanup_copy = entry.owner.srcid; 4629 r_cleanup_copy_inst = entry.owner.inst; 4630 4631 if (entry.valid) // hit : the copy must be cleared 4632 { 4633 assert( (entry.count > 0) and 4634 "MEMC ERROR in CLEANUP_DIR_LOCK state, CLEANUP on valid entry with no copies"); 4635 4636 if ((entry.count == 1) or (entry.is_cnt)) // no access to the heap 4637 { 4638 r_cleanup_fsm = CLEANUP_DIR_WRITE; 4639 } 4640 else // access to the heap 4641 { 4642 r_cleanup_fsm = CLEANUP_HEAP_REQ; 4643 } 4644 } 4645 else // miss : check IVT for a pending inval 4646 { 4647 r_cleanup_fsm = CLEANUP_IVT_LOCK; 4648 } 4649 4650 #if DEBUG_MEMC_CLEANUP 4651 if (m_debug) 4652 std::cout << " <MEMC " << name() 4653 << " CLEANUP_DIR_LOCK> Test directory status: " 4654 << std::hex << " address = " << cleanup_address 4655 << " / hit = " << entry.valid 4656 << " / dir_id = " << entry.owner.srcid 4657 << " / dir_ins = " << entry.owner.inst 4658 << " / search_id = " << r_cleanup_srcid.read() 4659 << " / search_ins = " << r_cleanup_inst.read() 4660 << " / count = " << entry.count 4661 << " / is_cnt = " << entry.is_cnt << std::endl; 4662 #endif 4663 break; 4664 } 4665 /////////////////////// 4666 case CLEANUP_DIR_WRITE: // Update the directory entry without heap access 4667 { 4668 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP) and 4669 "MEMC ERROR in CLEANUP_DIR_LOCK: bad DIR allocation"); 4670 4671 size_t way = r_cleanup_way.read(); 4672 size_t set = m_y[(addr_t)(r_cleanup_nline.read()*m_words*4)]; 4673 bool match_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 4674 bool match_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 4675 bool match = match_srcid and match_inst; 4676 4677 assert( (r_cleanup_is_cnt.read() or match) and 4678 "MEMC ERROR in CLEANUP_DIR_LOCK: illegal CLEANUP on valid entry"); 4679 4680 // update the cache directory (for the copies) 4681 DirectoryEntry entry; 4682 entry.valid = true; 4683 entry.is_cnt = r_cleanup_is_cnt.read(); 4684 entry.dirty = r_cleanup_dirty.read(); 4685 entry.tag = r_cleanup_tag.read(); 4686 entry.lock = r_cleanup_lock.read(); 4687 entry.ptr = r_cleanup_ptr.read(); 4688 entry.count = r_cleanup_count.read() - 1; 4689 entry.owner.srcid = 0; 4690 entry.owner.inst = 0; 4691 4692 m_cache_directory.write(set, way, entry); 4693 4694 r_cleanup_fsm = CLEANUP_SEND_CLACK; 4695 4696 #if DEBUG_MEMC_CLEANUP 4697 if (m_debug) 4698 std::cout << " <MEMC " << name() 4699 << " CLEANUP_DIR_WRITE> Update directory:" 4700 << std::hex << " address = " << r_cleanup_nline.read() * m_words * 4 4701 << " / dir_id = " << entry.owner.srcid 4702 << " / dir_ins = " << entry.owner.inst 4703 << " / count = " << entry.count 4704 << " / is_cnt = " << entry.is_cnt << std::endl; 4705 #endif 4706 4707 break; 4708 } 4709 ////////////////////// 4710 case CLEANUP_HEAP_REQ: // get the lock to the HEAP directory 4711 { 4712 if (r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP) break; 4713 4714 r_cleanup_fsm = CLEANUP_HEAP_LOCK; 4715 4716 #if DEBUG_MEMC_CLEANUP 4717 if (m_debug) 4718 std::cout << " <MEMC " << name() 4719 << " CLEANUP_HEAP_REQ> HEAP lock acquired " << std::endl; 4720 #endif 4721 break; 4722 } 4723 ////////////////////// 4724 case CLEANUP_HEAP_LOCK: // two cases are handled in this state : 4725 // 1. the matching copy is directly in the directory 4726 // 2. the matching copy is the first copy in the heap 4727 { 4728 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 4729 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 4730 4731 size_t way = r_cleanup_way.read(); 4732 size_t set = m_y[(addr_t)(r_cleanup_nline.read() *m_words*4)]; 4733 4734 HeapEntry heap_entry = m_heap.read(r_cleanup_ptr.read()); 4735 bool last = (heap_entry.next == r_cleanup_ptr.read()); 4736 4737 // match_dir computation 4738 bool match_dir_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 4739 bool match_dir_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 4740 bool match_dir = match_dir_srcid and match_dir_inst; 4741 4742 // match_heap computation 4743 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 4744 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 4745 bool match_heap = match_heap_srcid and match_heap_inst; 4746 4747 r_cleanup_prev_ptr = r_cleanup_ptr.read(); 4748 r_cleanup_prev_srcid = heap_entry.owner.srcid; 4749 r_cleanup_prev_inst = heap_entry.owner.inst; 4750 4751 assert( (not last or match_dir or match_heap) and 4752 "MEMC ERROR in CLEANUP_HEAP_LOCK state: hit but no copy found"); 4753 4754 assert( (not match_dir or not match_heap) and 4755 "MEMC ERROR in CLEANUP_HEAP_LOCK state: two matching copies found"); 4756 4757 DirectoryEntry dir_entry; 4758 dir_entry.valid = true; 4759 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 4760 dir_entry.dirty = r_cleanup_dirty.read(); 4761 dir_entry.tag = r_cleanup_tag.read(); 4762 dir_entry.lock = r_cleanup_lock.read(); 4763 dir_entry.count = r_cleanup_count.read()-1; 4764 4765 // the matching copy is registered in the directory and 4766 // it must be replaced by the first copy registered in 4767 // the heap. The corresponding entry must be freed 4768 if (match_dir) 4769 { 4770 dir_entry.ptr = heap_entry.next; 4771 dir_entry.owner.srcid = heap_entry.owner.srcid; 4772 dir_entry.owner.inst = heap_entry.owner.inst; 4773 r_cleanup_next_ptr = r_cleanup_ptr.read(); 4774 r_cleanup_fsm = CLEANUP_HEAP_FREE; 4775 } 4776 4777 // the matching copy is the first copy in the heap 4778 // It must be freed and the copy registered in directory 4779 // must point to the next copy in heap 4780 else if (match_heap) 4781 { 4782 dir_entry.ptr = heap_entry.next; 4783 dir_entry.owner.srcid = r_cleanup_copy.read(); 4784 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 4785 r_cleanup_next_ptr = r_cleanup_ptr.read(); 4786 r_cleanup_fsm = CLEANUP_HEAP_FREE; 4787 } 4788 4789 // The matching copy is in the heap, but is not the first copy 4790 // The directory entry must be modified to decrement count 4791 else 4792 { 4793 dir_entry.ptr = r_cleanup_ptr.read(); 4794 dir_entry.owner.srcid = r_cleanup_copy.read(); 4795 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 4796 r_cleanup_next_ptr = heap_entry.next; 4797 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 4798 } 4799 4800 m_cache_directory.write(set,way,dir_entry); 4801 4802 #if DEBUG_MEMC_CLEANUP 4803 if (m_debug) 4804 std::cout << " <MEMC " << name() 4805 << " CLEANUP_HEAP_LOCK> Checks matching:" 4806 << " address = " << r_cleanup_nline.read() * m_words * 4 4807 << " / dir_id = " << r_cleanup_copy.read() 4808 << " / dir_ins = " << r_cleanup_copy_inst.read() 4809 << " / heap_id = " << heap_entry.owner.srcid 4810 << " / heap_ins = " << heap_entry.owner.inst 4811 << " / search_id = " << r_cleanup_srcid.read() 4812 << " / search_ins = " << r_cleanup_inst.read() << std::endl; 4813 #endif 4814 break; 4815 } 4816 //////////////////////// 4817 case CLEANUP_HEAP_SEARCH: // This state is handling the case where the copy 4818 // is in the heap, but not the first in linked list 4819 { 4820 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 4821 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 4822 4823 HeapEntry heap_entry = m_heap.read(r_cleanup_next_ptr.read()); 4824 4825 bool last = (heap_entry.next == r_cleanup_next_ptr.read()); 4826 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 4827 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 4828 bool match_heap = match_heap_srcid and match_heap_inst; 4829 4830 assert( (not last or match_heap) and 4831 "MEMC ERROR in CLEANUP_HEAP_SEARCH state: no copy found"); 4832 4833 // the matching copy must be removed 4834 if (match_heap) 4835 { 4836 // re-use ressources 4837 r_cleanup_ptr = heap_entry.next; 4838 r_cleanup_fsm = CLEANUP_HEAP_CLEAN; 4839 } 4840 // test the next in the linked list 4841 else 4842 { 4843 r_cleanup_prev_ptr = r_cleanup_next_ptr.read(); 4844 r_cleanup_prev_srcid = heap_entry.owner.srcid; 4845 r_cleanup_prev_inst = heap_entry.owner.inst; 4846 r_cleanup_next_ptr = heap_entry.next; 4847 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 4848 } 4849 4850 #if DEBUG_MEMC_CLEANUP 4851 if (m_debug) 4852 { 4853 if (not match_heap) 4854 { 4855 std::cout 4856 << " <MEMC " << name() 4857 << " CLEANUP_HEAP_SEARCH> Matching copy not found, search next:" 4858 << std::endl; 4859 } 4860 else 4861 { 4862 std::cout 4863 << " <MEMC " << name() 4864 << " CLEANUP_HEAP_SEARCH> Matching copy found:" 4865 << std::endl; 4866 } 4867 std::cout 4868 << " address = " << r_cleanup_nline.read() * m_words * 4 4869 << " / heap_id = " << heap_entry.owner.srcid 4870 << " / heap_ins = " << heap_entry.owner.inst 4871 << " / search_id = " << r_cleanup_srcid.read() 4872 << " / search_ins = " << r_cleanup_inst.read() 4873 << " / last = " << last 4874 << std::endl; 4875 } 4876 #endif 4877 break; 4878 } 4879 //////////////////////// 4880 case CLEANUP_HEAP_CLEAN: // remove a copy in the linked list 4881 { 4882 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 4883 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 4884 4885 HeapEntry heap_entry; 4886 heap_entry.owner.srcid = r_cleanup_prev_srcid.read(); 4887 heap_entry.owner.inst = r_cleanup_prev_inst.read(); 4888 bool last = (r_cleanup_next_ptr.read() == r_cleanup_ptr.read()); 4889 4890 if (last) // this is the last entry of the list of copies 4891 { 4892 heap_entry.next = r_cleanup_prev_ptr.read(); 4893 } 4894 else // this is not the last entry 4895 { 4896 heap_entry.next = r_cleanup_ptr.read(); 4897 } 4898 4899 m_heap.write(r_cleanup_prev_ptr.read(), heap_entry); 4900 4901 r_cleanup_fsm = CLEANUP_HEAP_FREE; 4902 4903 #if DEBUG_MEMC_CLEANUP 4904 if (m_debug) 4905 std::cout << " <MEMC " << name() << " CLEANUP_HEAP_SEARCH>" 4906 << " Remove the copy in the linked list" << std::endl; 4907 #endif 4908 break; 4909 } 4910 /////////////////////// 4911 case CLEANUP_HEAP_FREE: // The heap entry pointed by r_cleanup_next_ptr is freed 4912 // and becomes the head of the list of free entries 4913 { 4914 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 4915 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 4916 4917 HeapEntry heap_entry; 4918 heap_entry.owner.srcid = 0; 4919 heap_entry.owner.inst = false; 4920 4921 if (m_heap.is_full()) 4922 { 4923 heap_entry.next = r_cleanup_next_ptr.read(); 4924 } 4925 else 4926 { 4927 heap_entry.next = m_heap.next_free_ptr(); 4928 } 4929 4930 m_heap.write(r_cleanup_next_ptr.read(),heap_entry); 4931 m_heap.write_free_ptr(r_cleanup_next_ptr.read()); 4932 m_heap.unset_full(); 4933 4934 r_cleanup_fsm = CLEANUP_SEND_CLACK; 4935 4936 #if DEBUG_MEMC_CLEANUP 4937 if (m_debug) 4938 std::cout << " <MEMC " << name() << " CLEANUP_HEAP_FREE>" 4939 << " Update the list of free entries" << std::endl; 4940 #endif 4941 break; 4942 } 4943 ////////////////////// 4944 case CLEANUP_IVT_LOCK: // get the lock protecting the IVT to search a pending 4945 // invalidate transaction matching the cleanup 4946 { 4947 if (r_alloc_ivt_fsm.read() != ALLOC_IVT_CLEANUP) break; 4948 4949 size_t index = 0; 4950 bool match_inval; 4951 4952 match_inval = m_ivt.search_inval(r_cleanup_nline.read(), index); 4953 4954 if (not match_inval ) // no pending inval in IVT 4955 { 4956 r_cleanup_fsm = CLEANUP_SEND_CLACK; 4957 4958 #if DEBUG_MEMC_CLEANUP 4959 if (m_debug) 4960 std::cout << " <MEMC " << name() << " CLEANUP_IVT_LOCK>" 4961 << " Unexpected cleanup with no corresponding IVT entry:" 4962 << " address = " << std::hex << (r_cleanup_nline.read()*4*m_words) << std::endl; 4963 #endif 4964 } 4965 else // pending inval in IVT 4966 { 4967 r_cleanup_write_srcid = m_ivt.srcid(index); 4968 r_cleanup_write_trdid = m_ivt.trdid(index); 4969 r_cleanup_write_pktid = m_ivt.pktid(index); 4970 r_cleanup_need_rsp = m_ivt.need_rsp(index); 4971 r_cleanup_need_ack = m_ivt.need_ack(index); 4972 r_cleanup_index = index; 4973 r_cleanup_fsm = CLEANUP_IVT_DECREMENT; 4974 4975 #if DEBUG_MEMC_CLEANUP 4976 if (m_debug) 4977 std::cout << " <MEMC " << name() << " CLEANUP_IVT_LOCK>" 4978 << " Cleanup matching pending invalidate transaction on IVT:" 4979 << " address = " << std::hex << (r_cleanup_nline.read()*m_words*4) 4980 << " / ivt_entry = " << index << std::endl; 4981 #endif 4982 } 4983 break; 4984 } 4985 /////////////////////////// 4986 case CLEANUP_IVT_DECREMENT: // decrement response counter in IVT matching entry 4987 // and test if last 4988 { 4989 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) and 4990 "MEMC ERROR in CLEANUP_IVT_DECREMENT state: Bad IVT allocation"); 4991 4992 size_t count = 0; 4993 m_ivt.decrement(r_cleanup_index.read(), count); 4994 4995 if (count == 0) r_cleanup_fsm = CLEANUP_IVT_CLEAR; 4996 else r_cleanup_fsm = CLEANUP_SEND_CLACK ; 4997 4998 #if DEBUG_MEMC_CLEANUP 4999 if (m_debug) 5000 std::cout << " <MEMC " << name() << " CLEANUP_IVT_DECREMENT>" 5001 << " Decrement response counter in IVT:" 5002 << " IVT_index = " << r_cleanup_index.read() 5003 << " / rsp_count = " << count << std::endl; 5004 #endif 5005 break; 5006 } 5007 /////////////////////// 5008 case CLEANUP_IVT_CLEAR: // Clear IVT entry 5009 // Acknowledge CONFIG FSM if required 5010 { 5011 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) and 5012 "MEMC ERROR in CLEANUP_IVT_CLEAR state : bad IVT allocation"); 5013 5014 m_ivt.clear(r_cleanup_index.read()); 5015 5016 if (r_cleanup_need_ack.read()) 5017 { 5018 assert( (r_config_rsp_lines.read() > 0) and 5019 "MEMC ERROR in CLEANUP_IVT_CLEAR state"); 5020 5021 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 5022 } 5023 5024 if (r_cleanup_need_rsp.read()) r_cleanup_fsm = CLEANUP_WRITE_RSP; 5025 else r_cleanup_fsm = CLEANUP_SEND_CLACK; 5026 5027 #if DEBUG_MEMC_CLEANUP 5028 if (m_debug) 5029 std::cout << " <MEMC " << name() 5030 << " CLEANUP_IVT_CLEAR> Clear entry in IVT:" 5031 << " IVT_index = " << r_cleanup_index.read() << std::endl; 5032 #endif 5033 break; 5034 } 5035 /////////////////////// 5036 case CLEANUP_WRITE_RSP: // response to a previous write on the direct network 5037 // wait if pending request to the TGT_RSP FSM 5038 { 5039 if (r_cleanup_to_tgt_rsp_req.read()) break; 5040 5041 // no pending request 5042 r_cleanup_to_tgt_rsp_req = true; 5043 r_cleanup_to_tgt_rsp_srcid = r_cleanup_write_srcid.read(); 5044 r_cleanup_to_tgt_rsp_trdid = r_cleanup_write_trdid.read(); 5045 r_cleanup_to_tgt_rsp_pktid = r_cleanup_write_pktid.read(); 5046 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5047 5048 #if DEBUG_MEMC_CLEANUP 5049 if (m_debug) 5050 std::cout << " <MEMC " << name() << " CLEANUP_WRITE_RSP>" 5051 << " Send a response to a previous write request: " 5052 << " rsrcid = " << std::hex << r_cleanup_write_srcid.read() 5053 << " / rtrdid = " << r_cleanup_write_trdid.read() 5054 << " / rpktid = " << r_cleanup_write_pktid.read() << std::endl; 5055 #endif 5056 break; 5057 } 5058 //////////////////////// 5059 case CLEANUP_SEND_CLACK: // acknowledgement to a cleanup command 5060 // on the coherence CLACK network. 5061 { 5062 if (not p_dspin_clack.read) break; 5063 5064 r_cleanup_fsm = CLEANUP_IDLE; 5065 5066 #if DEBUG_MEMC_CLEANUP 5067 if (m_debug) 5068 std::cout << " <MEMC " << name() 5069 << " CLEANUP_SEND_CLACK> Send the response to a cleanup request:" 5070 << " address = " << std::hex << r_cleanup_nline.read()*m_words*4 5071 << " / way = " << std::dec << r_cleanup_way.read() 5072 << " / srcid = " << std::dec << r_cleanup_srcid.read() 5073 << std::endl; 5074 #endif 5075 break; 5076 } 5077 } // end switch cleanup fsm 5078 5079 //////////////////////////////////////////////////////////////////////////////////// 5080 // CAS FSM 5081 //////////////////////////////////////////////////////////////////////////////////// 5082 // The CAS FSM handles the CAS (Compare And Swap) atomic commands. 5083 // 5084 // This command contains two or four flits: 5085 // - In case of 32 bits atomic access, the first flit contains the value read 5086 // by a previous READ instruction, the second flit contains the value to be writen. 5087 // - In case of 64 bits atomic access, the 2 first flits contains the value read 5088 // by a previous READ instruction, the 2 next flits contains the value to be writen. 5089 // 5090 // The target address is cachable. If it is replicated in other L1 caches 5091 // than the writer, a coherence operation is done. 5092 // 5093 // It access the directory to check hit / miss. 5094 // - In case of miss, the CAS FSM must register a GET transaction in TRT. 5095 // If a read transaction to the XRAM for this line already exists, 5096 // or if the transaction table is full, it goes to the WAIT state 5097 // to release the locks and try again. When the GET transaction has been 5098 // launched, it goes to the WAIT state and try again. 5099 // The CAS request is not consumed in the FIFO until a HIT is obtained. 5100 // - In case of hit... 5101 /////////////////////////////////////////////////////////////////////////////////// 5102 5103 //std::cout << std::endl << "cas_fsm" << std::endl; 5104 5105 switch(r_cas_fsm.read()) 5106 { 5107 //////////// 5108 case CAS_IDLE: // fill the local rdata buffers 5109 { 5110 if (m_cmd_cas_addr_fifo.rok()) 5111 { 5112 5113 #if DEBUG_MEMC_CAS 5114 if (m_debug) 5115 std::cout << " <MEMC " << name() << " CAS_IDLE> CAS command: " << std::hex 5116 << " srcid = " << std::dec << m_cmd_cas_srcid_fifo.read() 5117 << " addr = " << std::hex << m_cmd_cas_addr_fifo.read() 5118 << " wdata = " << m_cmd_cas_wdata_fifo.read() 5119 << " eop = " << std::dec << m_cmd_cas_eop_fifo.read() 5120 << " cpt = " << std::dec << r_cas_cpt.read() << std::endl; 5121 #endif 5122 if (m_cmd_cas_eop_fifo.read()) 5123 { 5124 r_cas_fsm = CAS_DIR_REQ; 5125 } 5126 else // we keep the last word in the FIFO 5127 { 5128 cmd_cas_fifo_get = true; 5129 } 5130 5131 // We fill the two buffers 5132 if (r_cas_cpt.read() < 2) // 32 bits access 5133 r_cas_rdata[r_cas_cpt.read()] = m_cmd_cas_wdata_fifo.read(); 5134 5135 if ((r_cas_cpt.read() == 1) and m_cmd_cas_eop_fifo.read()) 5136 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 5137 5138 assert( (r_cas_cpt.read() <= 3) and // no more than 4 flits... 5139 "MEMC ERROR in CAS_IDLE state: illegal CAS command"); 5140 5141 if (r_cas_cpt.read() ==2) 5142 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 5143 5144 r_cas_cpt = r_cas_cpt.read() +1; 5145 } 5146 break; 5147 } 5148 ///////////////// 5149 case CAS_DIR_REQ: 5150 { 5151 if (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) 5152 { 5153 r_cas_fsm = CAS_DIR_LOCK; 5154 } 5155 5156 #if DEBUG_MEMC_CAS 5157 if (m_debug) 5158 std::cout << " <MEMC " << name() << " CAS_DIR_REQ> Requesting DIR lock " << std::endl; 5159 #endif 5160 break; 5161 } 5162 ///////////////// 5163 case CAS_DIR_LOCK: // Read the directory 5164 { 5165 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5166 "MEMC ERROR in CAS_DIR_LOCK: Bad DIR allocation"); 5167 5168 size_t way = 0; 5169 DirectoryEntry entry(m_cache_directory.read(m_cmd_cas_addr_fifo.read(), way)); 5170 5171 r_cas_is_cnt = entry.is_cnt; 5172 r_cas_dirty = entry.dirty; 5173 r_cas_tag = entry.tag; 5174 r_cas_way = way; 5175 r_cas_copy = entry.owner.srcid; 5176 r_cas_copy_inst = entry.owner.inst; 5177 r_cas_ptr = entry.ptr; 5178 r_cas_count = entry.count; 5179 5180 if (entry.valid) r_cas_fsm = CAS_DIR_HIT_READ; 5181 else r_cas_fsm = CAS_MISS_TRT_LOCK; 5182 5183 #if DEBUG_MEMC_CAS 5184 if (m_debug) 5185 std::cout << " <MEMC " << name() << " CAS_DIR_LOCK> Directory acces" 5186 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 5187 << " / hit = " << std::dec << entry.valid 5188 << " / count = " << entry.count 5189 << " / is_cnt = " << entry.is_cnt << std::endl; 5190 #endif 5191 5192 break; 5193 } 5194 ///////////////////// 5195 case CAS_DIR_HIT_READ: // update directory for lock and dirty bit 5196 // and check data change in cache 5197 { 5198 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5199 "MEMC ERROR in CAS_DIR_HIT_READ: Bad DIR allocation"); 5200 5201 size_t way = r_cas_way.read(); 5202 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5203 5204 // update directory (lock & dirty bits) 5205 DirectoryEntry entry; 5206 entry.valid = true; 5207 entry.is_cnt = r_cas_is_cnt.read(); 5208 entry.dirty = true; 5209 entry.lock = true; 5210 entry.tag = r_cas_tag.read(); 5211 entry.owner.srcid = r_cas_copy.read(); 5212 entry.owner.inst = r_cas_copy_inst.read(); 5213 entry.count = r_cas_count.read(); 5214 entry.ptr = r_cas_ptr.read(); 5215 5216 m_cache_directory.write(set, way, entry); 5217 5218 // Store data from cache in buffer to do the comparison in next state 5219 m_cache_data.read_line(way, set, r_cas_data); 5220 5221 r_cas_fsm = CAS_DIR_HIT_COMPARE; 5222 5223 #if DEBUG_MEMC_CAS 5224 if (m_debug) 5225 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_READ> Read data from " 5226 << " cache and store it in buffer" << std::endl; 5227 #endif 5228 break; 5229 } 5230 //////////////////////// 5231 case CAS_DIR_HIT_COMPARE: 5232 { 5233 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5234 5235 // check data change 5236 bool ok = (r_cas_rdata[0].read() == r_cas_data[word].read()); 5237 5238 if (r_cas_cpt.read() == 4) // 64 bits CAS 5239 ok &= (r_cas_rdata[1] == r_cas_data[word+1]); 5240 5241 // to avoid livelock, force the atomic access to fail pseudo-randomly 5242 bool forced_fail = ((r_cas_lfsr % (64) == 0) and RANDOMIZE_CAS); 5243 r_cas_lfsr = (r_cas_lfsr >> 1) ^ ((- (r_cas_lfsr & 1)) & 0xd0000001); 5244 5245 if (ok and not forced_fail) r_cas_fsm = CAS_DIR_HIT_WRITE; 5246 else r_cas_fsm = CAS_RSP_FAIL; 5247 5248 #if DEBUG_MEMC_CAS 5249 if (m_debug) 5250 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_COMPARE> Compare old and new data" 5251 << " / expected value = " << std::hex << r_cas_rdata[0].read() 5252 << " / actual value = " << std::hex << r_cas_data[word].read() 5253 << " / forced_fail = " << std::dec << forced_fail << std::endl; 5254 #endif 5255 break; 5256 } 5257 ////////////////////// 5258 case CAS_DIR_HIT_WRITE: // test if a CC transaction is required 5259 // write data in cache if no CC request 5260 { 5261 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5262 "MEMC ERROR in CAS_DIR_HIT_WRITE: Bad DIR allocation"); 5263 5264 // The CAS is a success => sw access to the llsc_global_table 5265 m_llsc_table.sw( m_nline[(addr_t)m_cmd_cas_addr_fifo.read()], 5266 m_x[(addr_t)(m_cmd_cas_addr_fifo.read())], 5267 m_x[(addr_t)(m_cmd_cas_addr_fifo.read())] ); 5268 5269 // test coherence request 5270 if (r_cas_count.read()) // replicated line 5271 { 5272 if (r_cas_is_cnt.read()) 5273 { 5274 r_cas_fsm = CAS_BC_TRT_LOCK; // broadcast invalidate required 5275 5276 #if DEBUG_MEMC_CAS 5277 if (m_debug) 5278 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 5279 << " Broacast Inval required" 5280 << " / copies = " << r_cas_count.read() << std::endl; 5281 #endif 5282 } 5283 else if (not r_cas_to_cc_send_multi_req.read() and 5284 not r_cas_to_cc_send_brdcast_req.read()) 5285 { 5286 r_cas_fsm = CAS_UPT_LOCK; // multi update required 5287 5288 #if DEBUG_MEMC_CAS 5289 if (m_debug) 5290 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 5291 << " Multi Inval required" 5292 << " / copies = " << r_cas_count.read() << std::endl; 5293 #endif 5294 } 5295 else 5296 { 5297 r_cas_fsm = CAS_WAIT; 5298 5299 #if DEBUG_MEMC_CAS 5300 if (m_debug) 5301 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 5302 << " CC_SEND FSM busy: release all locks and retry" << std::endl; 5303 #endif 5304 } 5305 } 5306 else // no copies 5307 { 5308 size_t way = r_cas_way.read(); 5309 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5310 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5311 5312 // cache update 5313 m_cache_data.write(way, set, word, r_cas_wdata.read()); 5314 if (r_cas_cpt.read() == 4) 5315 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 5316 5317 r_cas_fsm = CAS_RSP_SUCCESS; 5318 5319 #if DEBUG_MEMC_CAS 5320 if (m_debug) 5321 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE> Update cache:" 5322 << " way = " << std::dec << way 5323 << " / set = " << set 5324 << " / word = " << word 5325 << " / value = " << r_cas_wdata.read() 5326 << " / count = " << r_cas_count.read() 5327 << " / global_llsc_table access" << std::endl; 5328 #endif 5329 } 5330 break; 5331 } 5332 ///////////////// 5333 case CAS_UPT_LOCK: // try to register the transaction in UPT 5334 // and write data in cache if successful registration 5335 // releases locks to retry later if UPT full 5336 { 5337 if (r_alloc_upt_fsm.read() == ALLOC_UPT_CAS) 5338 { 5339 bool wok = false; 5340 size_t index = 0; 5341 size_t srcid = m_cmd_cas_srcid_fifo.read(); 5342 size_t trdid = m_cmd_cas_trdid_fifo.read(); 5343 size_t pktid = m_cmd_cas_pktid_fifo.read(); 5344 addr_t nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5345 size_t nb_copies = r_cas_count.read(); 5346 5347 wok = m_upt.set( true, // it's an update transaction 5348 false, // it's not a broadcast 5349 true, // response required 5350 false, // no acknowledge required 5351 srcid, 5352 trdid, 5353 pktid, 5354 nline, 5355 nb_copies, 5356 index); 5357 if (wok) // coherence transaction registered in UPT 5358 { 5359 // cache update 5360 size_t way = r_cas_way.read(); 5361 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5362 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5363 5364 m_cache_data.write(way, set, word, r_cas_wdata.read()); 5365 if (r_cas_cpt.read() ==4) 5366 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 5367 5368 r_cas_upt_index = index; 5369 r_cas_fsm = CAS_UPT_HEAP_LOCK; 5370 } 5371 else // releases the locks protecting UPT and DIR UPT full 5372 { 5373 r_cas_fsm = CAS_WAIT; 5374 } 5375 5376 #if DEBUG_MEMC_CAS 5377 if (m_debug) 5378 std::cout << " <MEMC " << name() 5379 << " CAS_UPT_LOCK> Register multi-update transaction in UPT" 5380 << " / wok = " << wok 5381 << " / address = " << std::hex << nline*m_words*4 5382 << " / count = " << nb_copies << std::endl; 5383 #endif 5384 } 5385 break; 5386 } 5387 ///////////// 5388 case CAS_WAIT: // release all locks and retry from beginning 5389 { 5390 5391 #if DEBUG_MEMC_CAS 5392 if (m_debug) 5393 std::cout << " <MEMC " << name() << " CAS_WAIT> Release all locks" << std::endl; 5394 #endif 5395 r_cas_fsm = CAS_DIR_REQ; 5396 break; 5397 } 5398 ////////////////////// 5399 case CAS_UPT_HEAP_LOCK: // lock the heap 5400 { 5401 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) 5402 { 5403 5404 #if DEBUG_MEMC_CAS 5405 if (m_debug) 5406 { 5407 std::cout << " <MEMC " << name() 5408 << " CAS_UPT_HEAP_LOCK> Get access to the heap" << std::endl; 5409 } 5410 #endif 5411 r_cas_fsm = CAS_UPT_REQ; 5412 } 5413 break; 5414 } 5415 //////////////// 5416 case CAS_UPT_REQ: // send a first update request to CC_SEND FSM 5417 { 5418 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) and 5419 "VCI_MEM_CACHE ERROR : bad HEAP allocation"); 5420 5421 if (!r_cas_to_cc_send_multi_req.read() and !r_cas_to_cc_send_brdcast_req.read()) 5422 { 5423 r_cas_to_cc_send_brdcast_req = false; 5424 r_cas_to_cc_send_trdid = r_cas_upt_index.read(); 5425 r_cas_to_cc_send_nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5426 r_cas_to_cc_send_index = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5427 r_cas_to_cc_send_wdata = r_cas_wdata.read(); 5428 5429 if (r_cas_cpt.read() == 4) 5430 { 5431 r_cas_to_cc_send_is_long = true; 5432 r_cas_to_cc_send_wdata_high = m_cmd_cas_wdata_fifo.read(); 5433 } 5434 else 5435 { 5436 r_cas_to_cc_send_is_long = false; 5437 r_cas_to_cc_send_wdata_high = 0; 5438 } 5439 5440 // We put the first copy in the fifo 5441 cas_to_cc_send_fifo_put = true; 5442 cas_to_cc_send_fifo_inst = r_cas_copy_inst.read(); 5443 cas_to_cc_send_fifo_srcid = r_cas_copy.read(); 5444 if (r_cas_count.read() == 1) // one single copy 5445 { 5446 r_cas_fsm = CAS_IDLE; // Response will be sent after receiving 5447 // update responses 5448 cmd_cas_fifo_get = true; 5449 r_cas_to_cc_send_multi_req = true; 5450 r_cas_cpt = 0; 5451 } 5452 else // several copies 5453 { 5454 r_cas_fsm = CAS_UPT_NEXT; 5455 } 5456 5457 #if DEBUG_MEMC_CAS 5458 if (m_debug) 5459 { 5460 std::cout << " <MEMC " << name() << " CAS_UPT_REQ> Send the first update request to CC_SEND FSM " 5461 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 5462 << " / wdata = " << std::hex << r_cas_wdata.read() 5463 << " / srcid = " << std::dec << r_cas_copy.read() 5464 << " / inst = " << std::dec << r_cas_copy_inst.read() << std::endl; 5465 } 5466 #endif 5467 } 5468 break; 5469 } 5470 ///////////////// 5471 case CAS_UPT_NEXT: // send a multi-update request to CC_SEND FSM 5472 { 5473 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) 5474 and "VCI_MEM_CACHE ERROR : bad HEAP allocation"); 5475 5476 HeapEntry entry = m_heap.read(r_cas_ptr.read()); 5477 cas_to_cc_send_fifo_srcid = entry.owner.srcid; 5478 cas_to_cc_send_fifo_inst = entry.owner.inst; 5479 cas_to_cc_send_fifo_put = true; 5480 5481 if (m_cas_to_cc_send_inst_fifo.wok()) // request accepted by CC_SEND FSM 5482 { 5483 r_cas_ptr = entry.next; 5484 if (entry.next == r_cas_ptr.read()) // last copy 5485 { 5486 r_cas_to_cc_send_multi_req = true; 5487 r_cas_fsm = CAS_IDLE; // Response will be sent after receiving 5488 // all update responses 5489 cmd_cas_fifo_get = true; 5490 r_cas_cpt = 0; 5491 } 5492 } 5493 5494 #if DEBUG_MEMC_CAS 5495 if (m_debug) 5496 { 5497 std::cout << " <MEMC " << name() << " CAS_UPT_NEXT> Send the next update request to CC_SEND FSM " 5498 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 5499 << " / wdata = " << std::hex << r_cas_wdata.read() 5500 << " / srcid = " << std::dec << entry.owner.srcid 5501 << " / inst = " << std::dec << entry.owner.inst << std::endl; 5502 } 5503 #endif 5504 break; 5505 } 5506 ///////////////////// 5507 case CAS_BC_TRT_LOCK: // get TRT lock to check TRT not full 5508 { 5509 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5510 "MEMC ERROR in CAS_BC_TRT_LOCK state: Bas DIR allocation"); 5511 5512 if (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 5513 { 5514 size_t wok_index = 0; 5515 bool wok = !m_trt.full(wok_index); 5516 if (wok ) 5517 { 5518 r_cas_trt_index = wok_index; 5519 r_cas_fsm = CAS_BC_IVT_LOCK; 5520 } 5521 else 5522 { 5523 r_cas_fsm = CAS_WAIT; 5524 } 5525 5526 #if DEBUG_MEMC_CAS 5527 if (m_debug) 5528 std::cout << " <MEMC " << name() << " CAS_BC_TRT_LOCK> Check TRT" 5529 << " : wok = " << wok << " / index = " << wok_index << std::endl; 5530 #endif 5531 } 5532 break; 5533 } 5534 ///////////////////// 5535 case CAS_BC_IVT_LOCK: // get IVT lock and register BC transaction in IVT 5536 { 5537 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5538 "MEMC ERROR in CAS_BC_IVT_LOCK state: Bas DIR allocation"); 5539 5540 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 5541 "MEMC ERROR in CAS_BC_IVT_LOCK state: Bas TRT allocation"); 5542 5543 if (r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS ) 5544 { 5545 // register broadcast inval transaction in IVT 5546 bool wok = false; 5547 size_t index = 0; 5548 size_t srcid = m_cmd_cas_srcid_fifo.read(); 5549 size_t trdid = m_cmd_cas_trdid_fifo.read(); 5550 size_t pktid = m_cmd_cas_pktid_fifo.read(); 5551 addr_t nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5552 size_t nb_copies = r_cas_count.read(); 5553 5554 wok = m_ivt.set( false, // it's an inval transaction 5555 true, // it's a broadcast 5556 true, // response required 5557 false, // no acknowledge required 5558 srcid, 5559 trdid, 5560 pktid, 5561 nline, 5562 nb_copies, 5563 index); 5564 #if DEBUG_MEMC_CAS 5565 if (m_debug and wok ) 5566 std::cout << " <MEMC " << name() << " CAS_BC_IVT_LOCK> Register broadcast inval in IVT" 5567 << " / copies = " << r_cas_count.read() << std::endl; 5568 #endif 5569 r_cas_upt_index = index; 5570 if (wok ) r_cas_fsm = CAS_BC_DIR_INVAL; 5571 else r_cas_fsm = CAS_WAIT; 5572 } 5573 break; 5574 } 5575 ////////////////////// 5576 case CAS_BC_DIR_INVAL: // Register PUT transaction in TRT, 5577 // and inval the DIR entry 5578 { 5579 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5580 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad DIR allocation"); 5581 5582 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 5583 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad TRT allocation"); 5584 5585 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS) and 5586 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad IVT allocation"); 5587 5588 // set TRT 5589 std::vector<data_t> data_vector; 5590 data_vector.clear(); 5591 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5592 for(size_t i=0; i<m_words; i++) 5593 { 5594 if (i == word) // first modified word 5595 data_vector.push_back( r_cas_wdata.read()); 5596 else if ((i == word+1) and (r_cas_cpt.read() == 4)) // second modified word 5597 data_vector.push_back( m_cmd_cas_wdata_fifo.read()); 5598 else // unmodified words 5599 data_vector.push_back( r_cas_data[i].read()); 5600 } 5601 m_trt.set( r_cas_trt_index.read(), 5602 false, // PUT request 5603 m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())], 5604 0, 5605 0, 5606 0, 5607 false, // not a processor read 5608 0, 5609 0, 5610 std::vector<be_t> (m_words,0), 5611 data_vector ); 5612 5613 // invalidate directory entry 5614 DirectoryEntry entry; 5615 entry.valid = false; 5616 entry.dirty = false; 5617 entry.tag = 0; 5618 entry.is_cnt = false; 5619 entry.lock = false; 5620 entry.count = 0; 5621 entry.owner.srcid = 0; 5622 entry.owner.inst = false; 5623 entry.ptr = 0; 5624 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5625 size_t way = r_cas_way.read(); 5626 5627 m_cache_directory.write(set, way, entry); 5628 5629 r_cas_fsm = CAS_BC_CC_SEND; 5630 5631 #if DEBUG_MEMC_CAS 5632 if (m_debug) 5633 std::cout << " <MEMC " << name() << " CAS_BC_DIR_INVAL> Inval DIR & register in TRT:" 5634 << " address = " << m_cmd_cas_addr_fifo.read() << std::endl; 5635 #endif 5636 break; 5637 } 5638 /////////////////// 5639 case CAS_BC_CC_SEND: // Request the broadcast inval to CC_SEND FSM 5640 { 5641 if (not r_cas_to_cc_send_multi_req.read() and 5642 not r_cas_to_cc_send_brdcast_req.read()) 5643 { 5644 r_cas_to_cc_send_multi_req = false; 5645 r_cas_to_cc_send_brdcast_req = true; 5646 r_cas_to_cc_send_trdid = r_cas_upt_index.read(); 5647 r_cas_to_cc_send_nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5648 r_cas_to_cc_send_index = 0; 5649 r_cas_to_cc_send_wdata = 0; 5650 5651 r_cas_fsm = CAS_BC_XRAM_REQ; 5652 5653 #if DEBUG_MEMC_CAS 5654 if (m_debug) 5655 std::cout << " <MEMC " << name() 5656 << " CAS_BC_CC_SEND> Post a broadcast request to CC_SEND FSM" << std::endl; 5657 #endif 5658 } 5659 break; 5660 } 5661 //////////////////// 5662 case CAS_BC_XRAM_REQ: // request the IXR FSM to start a PUT transaction 5663 { 5664 if (not r_cas_to_ixr_cmd_req.read()) 5665 { 5666 r_cas_to_ixr_cmd_req = true; 5667 r_cas_to_ixr_cmd_put = true; 5668 r_cas_to_ixr_cmd_index = r_cas_trt_index.read(); 5669 r_cas_fsm = CAS_IDLE; 5670 cmd_cas_fifo_get = true; 5671 r_cas_cpt = 0; 5672 5673 #if DEBUG_MEMC_CAS 5674 if (m_debug) 5675 std::cout << " <MEMC " << name() 5676 << " CAS_BC_XRAM_REQ> Request a PUT transaction to IXR_CMD FSM" << std::hex 5677 << " / address = " << (addr_t) m_cmd_cas_addr_fifo.read() 5678 << " / trt_index = " << r_cas_trt_index.read() << std::endl; 5679 #endif 5680 } 5681 break; 5682 } 5683 ///////////////// 5684 case CAS_RSP_FAIL: // request TGT_RSP FSM to send a failure response 5685 { 5686 if (not r_cas_to_tgt_rsp_req.read()) 5687 { 5688 cmd_cas_fifo_get = true; 5689 r_cas_cpt = 0; 5690 r_cas_to_tgt_rsp_req = true; 5691 r_cas_to_tgt_rsp_data = 1; 5692 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read(); 5693 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read(); 5694 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read(); 5695 r_cas_fsm = CAS_IDLE; 5696 5697 #if DEBUG_MEMC_CAS 5698 if (m_debug) 5699 std::cout << " <MEMC " << name() 5700 << " CAS_RSP_FAIL> Request TGT_RSP to send a failure response" << std::endl; 5701 #endif 5702 } 5703 break; 5704 } 5705 //////////////////// 5706 case CAS_RSP_SUCCESS: // request TGT_RSP FSM to send a success response 5707 { 5708 if (not r_cas_to_tgt_rsp_req.read()) 5709 { 5710 cmd_cas_fifo_get = true; 5711 r_cas_cpt = 0; 5712 r_cas_to_tgt_rsp_req = true; 5713 r_cas_to_tgt_rsp_data = 0; 5714 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read(); 5715 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read(); 5716 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read(); 5717 r_cas_fsm = CAS_IDLE; 5718 5719 #if DEBUG_MEMC_CAS 5720 if (m_debug) 5721 std::cout << " <MEMC " << name() 5722 << " CAS_RSP_SUCCESS> Request TGT_RSP to send a success response" << std::endl; 5723 #endif 5724 } 5725 break; 5726 } 5727 /////////////////////// 5728 case CAS_MISS_TRT_LOCK: // cache miss : request access to transaction Table 5729 { 5730 if (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 5731 { 5732 size_t index = 0; 5733 bool hit_read = m_trt.hit_read( 5734 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()],index); 5735 bool hit_write = m_trt.hit_write( 5736 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()]); 5737 bool wok = not m_trt.full(index); 5738 5739 #if DEBUG_MEMC_CAS 5740 if (m_debug) 5741 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_LOCK> Check TRT state" 5742 << " / hit_read = " << hit_read 5743 << " / hit_write = " << hit_write 5744 << " / wok = " << wok 5745 << " / index = " << index << std::endl; 5746 #endif 5747 5748 if (hit_read or !wok or hit_write) // missing line already requested or TRT full 5749 { 5750 r_cas_fsm = CAS_WAIT; 5751 } 5752 else 5753 { 5754 r_cas_trt_index = index; 5755 r_cas_fsm = CAS_MISS_TRT_SET; 5756 } 5757 } 5758 break; 5759 } 5760 ////////////////////// 5761 case CAS_MISS_TRT_SET: // register the GET transaction in TRT 5762 { 5763 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 5764 "MEMC ERROR in CAS_MISS_TRT_SET state: Bad TRT allocation"); 5765 5766 std::vector<be_t> be_vector; 5767 std::vector<data_t> data_vector; 5768 be_vector.clear(); 5769 data_vector.clear(); 5770 for(size_t i=0; i<m_words; i++) 5771 { 5772 be_vector.push_back(0); 5773 data_vector.push_back(0); 5774 } 5775 5776 m_trt.set( r_cas_trt_index.read(), 5777 true, // GET 5778 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()], 5779 m_cmd_cas_srcid_fifo.read(), 5780 m_cmd_cas_trdid_fifo.read(), 5781 m_cmd_cas_pktid_fifo.read(), 5782 false, // write request from processor 5783 0, 5784 0, 5785 std::vector<be_t>(m_words,0), 5786 std::vector<data_t>(m_words,0)); 5787 5788 r_cas_fsm = CAS_MISS_XRAM_REQ; 5789 5790 #if DEBUG_MEMC_CAS 5791 if (m_debug) 5792 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_SET> Register GET transaction in TRT" 5793 << " / address = " << std::hex << (addr_t)m_cmd_cas_addr_fifo.read() 5794 << " / trt_index = " << std::dec << r_cas_trt_index.read() << std::endl; 5795 #endif 5796 break; 5797 } 5798 ////////////////////// 5799 case CAS_MISS_XRAM_REQ: // request the IXR_CMD FSM a GET request 5800 { 5801 if (not r_cas_to_ixr_cmd_req.read()) 5802 { 5803 r_cas_to_ixr_cmd_req = true; 5804 r_cas_to_ixr_cmd_put = false; 5805 r_cas_to_ixr_cmd_index = r_cas_trt_index.read(); 5806 r_cas_fsm = CAS_WAIT; 5807 5808 #if DEBUG_MEMC_CAS 5809 if (m_debug) 5810 std::cout << " <MEMC " << name() << " CAS_MISS_XRAM_REQ> Request a GET transaction" 5811 << " / address = " << std::hex << (addr_t) m_cmd_cas_addr_fifo.read() 5812 << " / trt_index = " << std::dec << r_cas_trt_index.read() << std::endl; 5813 #endif 5814 } 5815 break; 5816 } 5817 } // end switch r_cas_fsm 5818 5819 5820 ////////////////////////////////////////////////////////////////////////////// 5821 // CC_SEND FSM 5822 ////////////////////////////////////////////////////////////////////////////// 5823 // The CC_SEND fsm controls the DSPIN initiator port on the coherence 5824 // network, used to update or invalidate cache lines in L1 caches. 5825 // 5826 // It implements a round-robin priority between the four possible client FSMs 5827 // XRAM_RSP > CAS > WRITE > CONFIG 5828 // 5829 // Each FSM can request the next services: 5830 // - r_xram_rsp_to_cc_send_multi_req : multi-inval 5831 // r_xram_rsp_to_cc_send_brdcast_req : broadcast-inval 5832 // - r_write_to_cc_send_multi_req : multi-update 5833 // r_write_to_cc_send_brdcast_req : broadcast-inval 5834 // - r_cas_to_cc_send_multi_req : multi-update 5835 // r_cas_to_cc_send_brdcast_req : broadcast-inval 5836 // - r_config_to_cc_send_multi_req : multi-inval 5837 // r_config_to_cc_send_brdcast_req : broadcast-inval 5838 // 5839 // An inval request is a double DSPIN flit command containing: 5840 // 1. the index of the line to be invalidated. 5841 // 5842 // An update request is a multi-flit DSPIN command containing: 5843 // 1. the index of the cache line to be updated. 5844 // 2. the index of the first modified word in the line. 5845 // 3. the data to update 5846 /////////////////////////////////////////////////////////////////////////////// 5847 5848 //std::cout << std::endl << "cc_send_fsm" << std::endl; 5849 5850 switch(r_cc_send_fsm.read()) 5851 { 5852 ///////////////////////// 5853 case CC_SEND_CONFIG_IDLE: // XRAM_RSP FSM has highest priority 5854 { 5855 // XRAM_RSP 5856 if (m_xram_rsp_to_cc_send_inst_fifo.rok() or 5857 r_xram_rsp_to_cc_send_multi_req.read()) 5858 { 5859 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 5860 break; 5861 } 5862 if (r_xram_rsp_to_cc_send_brdcast_req.read()) 5863 { 5864 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 5865 break; 5866 } 5867 // CAS 5868 if (m_cas_to_cc_send_inst_fifo.rok() or 5869 r_cas_to_cc_send_multi_req.read()) 5870 { 5871 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 5872 break; 5873 } 5874 if (r_cas_to_cc_send_brdcast_req.read()) 5875 { 5876 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 5877 break; 5878 } 5879 // WRITE 5880 if (m_write_to_cc_send_inst_fifo.rok() or 5881 r_write_to_cc_send_multi_req.read()) 5882 { 5883 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 5884 break; 5885 } 5886 if (r_write_to_cc_send_brdcast_req.read()) 5887 { 5888 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 5889 break; 5890 } 5891 // CONFIG 5892 if (r_config_to_cc_send_multi_req.read()) 5893 { 5894 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 5895 break; 5896 } 5897 if (r_config_to_cc_send_brdcast_req.read()) 5898 { 5899 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 5900 break; 5901 } 5902 break; 5903 } 5904 //////////////////////// 5905 case CC_SEND_WRITE_IDLE: // CONFIG FSM has highest priority 5906 { 5907 // CONFIG 5908 if (r_config_to_cc_send_multi_req.read()) 5909 { 5910 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 5911 break; 5912 } 5913 if (r_config_to_cc_send_brdcast_req.read()) 5914 { 5915 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 5916 break; 5917 } 5918 // XRAM_RSP 5919 if (m_xram_rsp_to_cc_send_inst_fifo.rok() or 5920 r_xram_rsp_to_cc_send_multi_req.read()) 5921 { 5922 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 5923 break; 5924 } 5925 if (r_xram_rsp_to_cc_send_brdcast_req.read()) 5926 { 5927 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 5928 break; 5929 } 5930 // CAS 5931 if (m_cas_to_cc_send_inst_fifo.rok() or 5932 r_cas_to_cc_send_multi_req.read()) 5933 { 5934 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 5935 break; 5936 } 5937 if (r_cas_to_cc_send_brdcast_req.read()) 5938 { 5939 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 5940 break; 5941 } 5942 // WRITE 5943 if (m_write_to_cc_send_inst_fifo.rok() or 5944 r_write_to_cc_send_multi_req.read()) 5945 { 5946 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 5947 break; 5948 } 5949 if (r_write_to_cc_send_brdcast_req.read()) 5950 { 5951 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 5952 break; 5953 } 5954 break; 5955 } 5956 /////////////////////////// 5957 case CC_SEND_XRAM_RSP_IDLE: // CAS FSM has highest priority 5958 { 5959 // CAS 5960 if (m_cas_to_cc_send_inst_fifo.rok() or 5961 r_cas_to_cc_send_multi_req.read()) 5962 { 5963 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 5964 break; 5965 } 5966 if (r_cas_to_cc_send_brdcast_req.read()) 5967 { 5968 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 5969 break; 5970 } 5971 // WRITE 5972 if (m_write_to_cc_send_inst_fifo.rok() or 5973 r_write_to_cc_send_multi_req.read()) 5974 { 5975 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 5976 break; 5977 } 5978 5979 if (r_write_to_cc_send_brdcast_req.read()) 5980 { 5981 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 5982 break; 5983 } 5984 // CONFIG 5985 if (r_config_to_cc_send_multi_req.read()) 5986 { 5987 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 5988 break; 5989 } 5990 if (r_config_to_cc_send_brdcast_req.read()) 5991 { 5992 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 5993 break; 5994 } 5995 // XRAM_RSP 5996 if (m_xram_rsp_to_cc_send_inst_fifo.rok() or 5997 r_xram_rsp_to_cc_send_multi_req.read()) 5998 { 5999 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 6000 break; 6001 } 6002 if (r_xram_rsp_to_cc_send_brdcast_req.read()) 6003 { 6004 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 6005 break; 6006 } 6007 break; 6008 } 6009 ////////////////////// 6010 case CC_SEND_CAS_IDLE: // CLEANUP FSM has highest priority 6011 { 6012 if (m_write_to_cc_send_inst_fifo.rok() or 6013 r_write_to_cc_send_multi_req.read()) 6014 { 6015 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 6016 break; 6017 } 6018 if (r_write_to_cc_send_brdcast_req.read()) 6019 { 6020 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 6021 break; 6022 } 6023 // CONFIG 6024 if (r_config_to_cc_send_multi_req.read()) 6025 { 6026 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 6027 break; 6028 } 6029 if (r_config_to_cc_send_brdcast_req.read()) 6030 { 6031 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 6032 break; 6033 } 6034 if (m_xram_rsp_to_cc_send_inst_fifo.rok() or 6035 r_xram_rsp_to_cc_send_multi_req.read()) 6036 { 6037 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 6038 break; 6039 } 6040 if (r_xram_rsp_to_cc_send_brdcast_req.read()) 6041 { 6042 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 6043 break; 6044 } 6045 if (m_cas_to_cc_send_inst_fifo.rok() or 6046 r_cas_to_cc_send_multi_req.read()) 6047 { 6048 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6049 break; 6050 } 6051 if (r_cas_to_cc_send_brdcast_req.read()) 6052 { 6053 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 6054 break; 6055 } 6056 break; 6057 } 6058 ///////////////////////////////// 6059 case CC_SEND_CONFIG_INVAL_HEADER: // send first flit multi-inval (from CONFIG FSM) 6060 { 6061 if (m_config_to_cc_send_inst_fifo.rok()) 6062 { 6063 if (not p_dspin_m2p.read) break; 6064 // <Activity Counters> 6065 if (is_local_req(m_config_to_cc_send_srcid_fifo.read())) 6066 { 6067 m_cpt_m_inval_local++; 6068 } 6069 else 6070 { 6071 m_cpt_m_inval_remote++; 6072 m_cpt_m_inval_cost += req_distance(m_config_to_cc_send_srcid_fifo.read()); 6073 } 6074 // </Activity Counters> 6075 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_NLINE; 6076 break; 6077 } 6078 if (r_config_to_cc_send_multi_req.read()) r_config_to_cc_send_multi_req = false; 6079 // <Activity Counters> 6080 m_cpt_m_inval++; 6081 // </Activity Counters> 6082 r_cc_send_fsm = CC_SEND_CONFIG_IDLE; 6083 break; 6084 } 6085 //////////////////////////////// 6086 case CC_SEND_CONFIG_INVAL_NLINE: // send second flit multi-inval (from CONFIG FSM) 6087 { 6088 if (not p_dspin_m2p.read) break; 6089 config_to_cc_send_fifo_get = true; 6090 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 6091 6092 #if DEBUG_MEMC_CC_SEND 6093 if (m_debug) 6094 std::cout << " <MEMC " << name() 6095 << " CC_SEND_CONFIG_INVAL_NLINE> multi-inval for line " 6096 << std::hex << r_config_to_cc_send_nline.read() << std::endl; 6097 #endif 6098 break; 6099 } 6100 /////////////////////////////////// 6101 case CC_SEND_CONFIG_BRDCAST_HEADER: // send first flit BC-inval (from CONFIG FSM) 6102 { 6103 if (not p_dspin_m2p.read) break; 6104 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_NLINE; 6105 break; 6106 } 6107 ////////////////////////////////// 6108 case CC_SEND_CONFIG_BRDCAST_NLINE: // send second flit BC-inval (from CONFIG FSM) 6109 { 6110 if (not p_dspin_m2p.read) break; 6111 // <Activity Counters> 6112 m_cpt_br_inval++; 6113 // </Activity Counters> 6114 r_config_to_cc_send_brdcast_req = false; 6115 r_cc_send_fsm = CC_SEND_CONFIG_IDLE; 6116 6117 #if DEBUG_MEMC_CC_SEND 6118 if (m_debug) 6119 std::cout << " <MEMC " << name() 6120 << " CC_SEND_CONFIG_BRDCAST_NLINE> BC-Inval for line " 6121 << std::hex << r_config_to_cc_send_nline.read() << std::endl; 6122 #endif 6123 break; 6124 } 6125 /////////////////////////////////// 6126 case CC_SEND_XRAM_RSP_INVAL_HEADER: // send first flit multi-inval (from XRAM_RSP FSM) 6127 { 6128 if (m_xram_rsp_to_cc_send_inst_fifo.rok()) 6129 { 6130 if (not p_dspin_m2p.read) break; 6131 // <Activity Counters> 6132 if (is_local_req(m_xram_rsp_to_cc_send_srcid_fifo.read())) 6133 { 6134 m_cpt_m_inval_local++; 6135 } 6136 else 6137 { 6138 m_cpt_m_inval_remote++; 6139 m_cpt_m_inval_cost += req_distance(m_xram_rsp_to_cc_send_srcid_fifo.read()); 6140 } 6141 // </Activity Counters> 6142 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_NLINE; 6143 break; 6144 } 6145 if (r_xram_rsp_to_cc_send_multi_req.read()) r_xram_rsp_to_cc_send_multi_req = false; 6146 // <Activity Counters> 6147 m_cpt_m_inval++; 6148 // </Activity Counters> 6149 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 6150 break; 6151 } 6152 ////////////////////////////////// 6153 case CC_SEND_XRAM_RSP_INVAL_NLINE: // send second flit multi-inval (from XRAM_RSP FSM) 6154 { 6155 if (not p_dspin_m2p.read) break; 6156 xram_rsp_to_cc_send_fifo_get = true; 6157 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 6158 6159 #if DEBUG_MEMC_CC_SEND 6160 if (m_debug) 6161 std::cout << " <MEMC " << name() 6162 << " CC_SEND_XRAM_RSP_INVAL_NLINE> Multicast-Inval for line " 6163 << std::hex << r_xram_rsp_to_cc_send_nline.read() << std::endl; 6164 #endif 6165 break; 6166 } 6167 ///////////////////////////////////// 6168 case CC_SEND_XRAM_RSP_BRDCAST_HEADER: // send first flit broadcast-inval (from XRAM_RSP FSM) 6169 { 6170 if (not p_dspin_m2p.read) break; 6171 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_NLINE; 6172 break; 6173 } 6174 //////////////////////////////////// 6175 case CC_SEND_XRAM_RSP_BRDCAST_NLINE: // send second flit broadcast-inval (from XRAM_RSP FSM) 6176 { 6177 if (not p_dspin_m2p.read) break; 6178 // <Activity Counters> 6179 m_cpt_br_inval++; 6180 // </Activity Counters> 6181 r_xram_rsp_to_cc_send_brdcast_req = false; 6182 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 6183 6184 #if DEBUG_MEMC_CC_SEND 6185 if (m_debug) 6186 std::cout << " <MEMC " << name() 6187 << " CC_SEND_XRAM_RSP_BRDCAST_NLINE> BC-Inval for line " 6188 << std::hex << r_xram_rsp_to_cc_send_nline.read() << std::endl; 6189 #endif 6190 break; 6191 } 6192 ////////////////////////////////// 6193 case CC_SEND_WRITE_BRDCAST_HEADER: // send first flit broadcast-inval (from WRITE FSM) 6194 { 6195 if (not p_dspin_m2p.read) break; 6196 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_NLINE; 6197 break; 6198 } 6199 ///////////////////////////////// 6200 case CC_SEND_WRITE_BRDCAST_NLINE: // send second flit broadcast-inval (from WRITE FSM) 6201 { 6202 if (not p_dspin_m2p.read) break; 6203 6204 // <Activity Counters> 6205 m_cpt_br_inval++; 6206 // </Activity Counters> 6207 6208 r_write_to_cc_send_brdcast_req = false; 6209 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 6210 6211 #if DEBUG_MEMC_CC_SEND 6212 if (m_debug) 6213 std::cout << " <MEMC " << name() 6214 << " CC_SEND_WRITE_BRDCAST_NLINE> BC-Inval for line " 6215 << std::hex << r_write_to_cc_send_nline.read() << std::endl; 6216 #endif 6217 break; 6218 } 6219 /////////////////////////////// 6220 case CC_SEND_WRITE_UPDT_HEADER: // send first flit for a multi-update (from WRITE FSM) 6221 { 6222 if (m_write_to_cc_send_inst_fifo.rok()) 6223 { 6224 if (not p_dspin_m2p.read) break; 6225 // <Activity Counters> 6226 if (is_local_req(m_write_to_cc_send_srcid_fifo.read())) 6227 { 6228 m_cpt_update_local++; 6229 } 6230 else 6231 { 6232 m_cpt_update_remote++; 6233 m_cpt_update_cost += req_distance(m_write_to_cc_send_srcid_fifo.read()); 6234 } 6235 // </Activity Counters> 6236 6237 r_cc_send_fsm = CC_SEND_WRITE_UPDT_NLINE; 6238 break; 6239 } 6240 6241 if (r_write_to_cc_send_multi_req.read()) 6242 { 6243 r_write_to_cc_send_multi_req = false; 6244 } 6245 6246 // <Activity Counters> 6247 m_cpt_update++; 6248 // </Activity Counters> 6249 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 6250 break; 6251 } 6252 ////////////////////////////// 6253 case CC_SEND_WRITE_UPDT_NLINE: // send second flit for a multi-update (from WRITE FSM) 6254 { 6255 if (not p_dspin_m2p.read) break; 6256 6257 r_cc_send_cpt = 0; 6258 r_cc_send_fsm = CC_SEND_WRITE_UPDT_DATA; 6259 6260 #if DEBUG_MEMC_CC_SEND 6261 if (m_debug) 6262 std::cout << " <MEMC " << name() 6263 << " CC_SEND_WRITE_UPDT_NLINE> Multicast-Update for address " 6264 << r_write_to_cc_send_nline.read()*m_words*4 << std::endl; 6265 #endif 6266 break; 6267 } 6268 ///////////////////////////// 6269 case CC_SEND_WRITE_UPDT_DATA: // send data flits for multi-update (from WRITE FSM) 6270 { 6271 if (not p_dspin_m2p.read) break; 6272 if (r_cc_send_cpt.read() == (r_write_to_cc_send_count.read() - 1)) 6273 { 6274 write_to_cc_send_fifo_get = true; 6275 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 6276 break; 6277 } 6278 6279 r_cc_send_cpt = r_cc_send_cpt.read() + 1; 6280 break; 6281 } 6282 //////////////////////////////// 6283 case CC_SEND_CAS_BRDCAST_HEADER: // send first flit broadcast-inval (from CAS FSM) 6284 { 6285 if (not p_dspin_m2p.read) break; 6286 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_NLINE; 6287 break; 6288 } 6289 /////////////////////////////// 6290 case CC_SEND_CAS_BRDCAST_NLINE: // send second flit broadcast-inval (from CAS FSM) 6291 { 6292 if (not p_dspin_m2p.read) break; 6293 // <Activity Counters> 6294 m_cpt_br_inval++; 6295 // </Activity Counters> 6296 6297 r_cas_to_cc_send_brdcast_req = false; 6298 r_cc_send_fsm = CC_SEND_CAS_IDLE; 6299 6300 #if DEBUG_MEMC_CC_SEND 6301 if (m_debug) 6302 std::cout << " <MEMC " << name() 6303 << " CC_SEND_CAS_BRDCAST_NLINE> Broadcast-Inval for address: " 6304 << r_cas_to_cc_send_nline.read()*m_words*4 << std::endl; 6305 #endif 6306 break; 6307 } 6308 ///////////////////////////// 6309 case CC_SEND_CAS_UPDT_HEADER: // send first flit for a multi-update (from CAS FSM) 6310 { 6311 if (m_cas_to_cc_send_inst_fifo.rok()) 6312 { 6313 if (not p_dspin_m2p.read) break; 6314 // <Activity Counters> 6315 if (is_local_req(m_cas_to_cc_send_srcid_fifo.read())) 6316 { 6317 m_cpt_update_local++; 6318 } 6319 else 6320 { 6321 m_cpt_update_remote++; 6322 m_cpt_update_cost += req_distance(m_cas_to_cc_send_srcid_fifo.read()); 6323 } 6324 // </Activity Counters> 6325 r_cc_send_fsm = CC_SEND_CAS_UPDT_NLINE; 6326 break; 6327 } 6328 6329 // no more packets to send for the multi-update 6330 if (r_cas_to_cc_send_multi_req.read()) 6331 { 6332 r_cas_to_cc_send_multi_req = false; 6333 } 6334 6335 // <Activity Counters> 6336 m_cpt_update++; 6337 // </Activity Counters> 6338 r_cc_send_fsm = CC_SEND_CAS_IDLE; 6339 break; 6340 } 6341 //////////////////////////// 6342 case CC_SEND_CAS_UPDT_NLINE: // send second flit for a multi-update (from CAS FSM) 6343 { 6344 if (not p_dspin_m2p.read) break; 6345 r_cc_send_cpt = 0; 6346 r_cc_send_fsm = CC_SEND_CAS_UPDT_DATA; 6347 6348 #if DEBUG_MEMC_CC_SEND 6349 if (m_debug) 6350 std::cout << " <MEMC " << name() 6351 << " CC_SEND_CAS_UPDT_NLINE> Multicast-Update for address " 6352 << r_cas_to_cc_send_nline.read()*m_words*4 << std::endl; 6353 #endif 6354 break; 6355 } 6356 /////////////////////////// 6357 case CC_SEND_CAS_UPDT_DATA: // send first data for a multi-update (from CAS FSM) 6358 { 6359 if (not p_dspin_m2p.read) break; 6360 6361 if (r_cas_to_cc_send_is_long.read()) 6362 { 6363 r_cc_send_fsm = CC_SEND_CAS_UPDT_DATA_HIGH; 6364 break; 6365 } 6366 6367 cas_to_cc_send_fifo_get = true; 6368 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6369 break; 6370 } 6371 //////////////////////////////// 6372 case CC_SEND_CAS_UPDT_DATA_HIGH: // send second data for multi-update (from CAS FSM) 6373 { 6374 if (not p_dspin_m2p.read) break; 6375 cas_to_cc_send_fifo_get = true; 6376 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6377 break; 6378 } 6379 } 6380 // end switch r_cc_send_fsm 6381 6382 ////////////////////////////////////////////////////////////////////////////// 6383 // CC_RECEIVE FSM 6384 ////////////////////////////////////////////////////////////////////////////// 6385 // The CC_RECEIVE fsm controls the DSPIN target port on the coherence 6386 // network. 6387 ////////////////////////////////////////////////////////////////////////////// 6388 6389 //std::cout << std::endl << "cc_receive_fsm" << std::endl; 6390 6391 switch(r_cc_receive_fsm.read()) 6392 { 6393 ///////////////////// 6394 case CC_RECEIVE_IDLE: 6395 { 6396 if (not p_dspin_p2m.write) break; 6397 6398 uint8_t type = 6399 DspinDhccpParam::dspin_get( 6400 p_dspin_p2m.data.read(), 6401 DspinDhccpParam::P2M_TYPE); 6402 6403 if ((type == DspinDhccpParam::TYPE_CLEANUP_DATA) or 6404 (type == DspinDhccpParam::TYPE_CLEANUP_INST)) 6405 { 6406 r_cc_receive_fsm = CC_RECEIVE_CLEANUP; 6407 break; 6408 } 6409 6410 if (type == DspinDhccpParam::TYPE_MULTI_ACK) 6411 { 6412 r_cc_receive_fsm = CC_RECEIVE_MULTI_ACK; 6413 break; 6414 } 6415 6416 assert(false and 6417 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 6418 "Illegal type in coherence request"); 6419 6420 break; 6421 } 6422 //////////////////////// 6423 case CC_RECEIVE_CLEANUP: 6424 { 6425 // write first CLEANUP flit in CC_RECEIVE to CLEANUP fifo 6426 6427 if (not p_dspin_p2m.write or not m_cc_receive_to_cleanup_fifo.wok()) 6428 break; 6429 6430 assert(not p_dspin_p2m.eop.read() and 6431 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 6432 "CLEANUP command must have two flits"); 6433 6434 cc_receive_to_cleanup_fifo_put = true; 6435 r_cc_receive_fsm = CC_RECEIVE_CLEANUP_EOP; 6436 6437 break; 6438 } 6439 //////////////////////////// 6440 case CC_RECEIVE_CLEANUP_EOP: 6441 { 6442 // write second CLEANUP flit in CC_RECEIVE to CLEANUP fifo 6443 6444 if (not p_dspin_p2m.write or not m_cc_receive_to_cleanup_fifo.wok()) 6445 break; 6446 6447 assert(p_dspin_p2m.eop.read() and 6448 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 6449 "CLEANUP command must have two flits"); 6450 6451 cc_receive_to_cleanup_fifo_put = true; 6452 r_cc_receive_fsm = CC_RECEIVE_IDLE; 6453 6454 break; 6455 } 6456 6457 ////////////////////////// 6458 case CC_RECEIVE_MULTI_ACK: 6459 { 6460 // write MULTI_ACK flit in CC_RECEIVE to MULTI_ACK fifo 6461 6462 // wait for a WOK in the CC_RECEIVE to MULTI_ACK fifo 6463 if (not p_dspin_p2m.write or not m_cc_receive_to_multi_ack_fifo.wok()) 6464 break; 6465 6466 assert(p_dspin_p2m.eop.read() and 6467 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 6468 "MULTI_ACK command must have one flit"); 6469 6470 cc_receive_to_multi_ack_fifo_put = true; 6471 r_cc_receive_fsm = CC_RECEIVE_IDLE; 6472 break; 6473 } 6474 } 6475 6476 ////////////////////////////////////////////////////////////////////////// 6477 // TGT_RSP FSM 6478 ////////////////////////////////////////////////////////////////////////// 6479 // The TGT_RSP fsm sends the responses on the VCI target port 6480 // with a round robin priority between eigth requests : 6481 // - r_config_to_tgt_rsp_req 6482 // - r_tgt_cmd_to_tgt_rsp_req 6483 // - r_read_to_tgt_rsp_req 6484 // - r_write_to_tgt_rsp_req 6485 // - r_cas_to_tgt_rsp_req 6486 // - r_cleanup_to_tgt_rsp_req 6487 // - r_xram_rsp_to_tgt_rsp_req 6488 // - r_multi_ack_to_tgt_rsp_req 6489 // 6490 // The ordering is : 6491 // config >tgt_cmd > read > write > cas > xram > multi_ack > cleanup 6492 ////////////////////////////////////////////////////////////////////////// 6493 6494 //std::cout << std::endl << "tgt_rsp_fsm" << std::endl; 6495 6496 switch(r_tgt_rsp_fsm.read()) 6497 { 6498 ///////////////////////// 6499 case TGT_RSP_CONFIG_IDLE: // tgt_cmd requests have the highest priority 6500 { 6501 if (r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6502 else if (r_read_to_tgt_rsp_req) 6503 { 6504 r_tgt_rsp_fsm = TGT_RSP_READ; 6505 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6506 } 6507 else if (r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6508 else if (r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 6509 else if (r_xram_rsp_to_tgt_rsp_req) 6510 { 6511 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6512 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6513 } 6514 else if (r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 6515 else if (r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6516 else if (r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6517 break; 6518 } 6519 ////////////////////////// 6520 case TGT_RSP_TGT_CMD_IDLE: // read requests have the highest priority 6521 { 6522 if (r_read_to_tgt_rsp_req) 6523 { 6524 r_tgt_rsp_fsm = TGT_RSP_READ; 6525 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6526 } 6527 else if (r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6528 else if (r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 6529 else if (r_xram_rsp_to_tgt_rsp_req) 6530 { 6531 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6532 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6533 } 6534 else if (r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 6535 else if (r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6536 else if (r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6537 else if (r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6538 break; 6539 } 6540 /////////////////////// 6541 case TGT_RSP_READ_IDLE: // write requests have the highest priority 6542 { 6543 if (r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6544 else if (r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 6545 else if (r_xram_rsp_to_tgt_rsp_req) 6546 { 6547 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6548 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6549 } 6550 else if (r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 6551 else if (r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6552 else if (r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6553 else if (r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6554 else if (r_read_to_tgt_rsp_req) 6555 { 6556 r_tgt_rsp_fsm = TGT_RSP_READ; 6557 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6558 } 6559 break; 6560 } 6561 //////////////////////// 6562 case TGT_RSP_WRITE_IDLE: // cas requests have the highest priority 6563 { 6564 if (r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 6565 else if (r_xram_rsp_to_tgt_rsp_req) 6566 { 6567 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6568 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6569 } 6570 else if (r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 6571 else if (r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6572 else if (r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6573 else if (r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6574 else if (r_read_to_tgt_rsp_req) 6575 { 6576 r_tgt_rsp_fsm = TGT_RSP_READ; 6577 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6578 } 6579 else if (r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6580 break; 6581 } 6582 /////////////////////// 6583 case TGT_RSP_CAS_IDLE: // xram_rsp requests have the highest priority 6584 { 6585 if (r_xram_rsp_to_tgt_rsp_req) 6586 { 6587 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6588 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6589 } 6590 else if (r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK ; 6591 else if (r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6592 else if (r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6593 else if (r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6594 else if (r_read_to_tgt_rsp_req) 6595 { 6596 r_tgt_rsp_fsm = TGT_RSP_READ; 6597 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6598 } 6599 else if (r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6600 else if (r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 6601 break; 6602 } 6603 /////////////////////// 6604 case TGT_RSP_XRAM_IDLE: // multi ack requests have the highest priority 6605 { 6606 6607 if (r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK ; 6608 else if (r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6609 else if (r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6610 else if (r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6611 else if (r_read_to_tgt_rsp_req) 6612 { 6613 r_tgt_rsp_fsm = TGT_RSP_READ; 6614 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6615 } 6616 else if (r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6617 else if (r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 6618 else if (r_xram_rsp_to_tgt_rsp_req) 6619 { 6620 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6621 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6622 } 6623 break; 6624 } 6625 //////////////////////////// 6626 case TGT_RSP_MULTI_ACK_IDLE: // cleanup requests have the highest priority 6627 { 6628 if (r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6629 else if (r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6630 else if (r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6631 else if (r_read_to_tgt_rsp_req) 6632 { 6633 r_tgt_rsp_fsm = TGT_RSP_READ; 6634 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6635 } 6636 else if (r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6637 else if (r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 6638 else if (r_xram_rsp_to_tgt_rsp_req) 6639 { 6640 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6641 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6642 } 6643 else if (r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 6644 break; 6645 } 6646 ////////////////////////// 6647 case TGT_RSP_CLEANUP_IDLE: // tgt cmd requests have the highest priority 6648 { 6649 if (r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6650 else if (r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6651 else if (r_read_to_tgt_rsp_req) 6652 { 6653 r_tgt_rsp_fsm = TGT_RSP_READ; 6654 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6655 } 6656 else if (r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6657 else if (r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 6658 else if (r_xram_rsp_to_tgt_rsp_req) 6659 { 6660 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6661 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6662 } 6663 else if (r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK ; 6664 else if (r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6665 break; 6666 } 6667 //////////////////// 6668 case TGT_RSP_CONFIG: // send the response for a config transaction 6669 { 6670 if (p_vci_tgt.rspack ) 6671 { 6672 r_config_to_tgt_rsp_req = false; 6673 r_tgt_rsp_fsm = TGT_RSP_CONFIG_IDLE; 6674 6675 #if DEBUG_MEMC_TGT_RSP 6676 if (m_debug ) 6677 { 6678 std::cout 6679 << " <MEMC " << name() 6680 << " TGT_RSP_CONFIG> Config transaction completed response" 6681 << " / rsrcid = " << std::hex << r_config_to_tgt_rsp_srcid.read() 6682 << " / rtrdid = " << r_config_to_tgt_rsp_trdid.read() 6683 << " / rpktid = " << r_config_to_tgt_rsp_pktid.read() 6684 << std::endl; 6685 } 6686 #endif 6687 } 6688 break; 6689 } 6690 ///////////////////// 6691 case TGT_RSP_TGT_CMD: // send the response for a configuration access 6692 { 6693 if (p_vci_tgt.rspack ) 6694 { 6695 r_tgt_cmd_to_tgt_rsp_req = false; 6696 r_tgt_rsp_fsm = TGT_RSP_TGT_CMD_IDLE; 6697 6698 #if DEBUG_MEMC_TGT_RSP 6699 if (m_debug ) 6700 { 6701 std::cout 6702 << " <MEMC " << name() 6703 << " TGT_RSP_TGT_CMD> Send response for a configuration access" 6704 << " / rsrcid = " << std::hex << r_tgt_cmd_to_tgt_rsp_srcid.read() 6705 << " / rtrdid = " << r_tgt_cmd_to_tgt_rsp_trdid.read() 6706 << " / rpktid = " << r_tgt_cmd_to_tgt_rsp_pktid.read() 6707 << " / error = " << r_tgt_cmd_to_tgt_rsp_error.read() 6708 << std::endl; 6709 } 6710 #endif 6711 } 6712 break; 6713 } 6714 ////////////////// 6715 case TGT_RSP_READ: // send the response to a read 6716 { 6717 if (p_vci_tgt.rspack ) 6718 { 6719 6720 #if DEBUG_MEMC_TGT_RSP 6721 if (m_debug ) 6722 { 6723 std::cout 6724 << " <MEMC " << name() << " TGT_RSP_READ> Read response" 6725 << " / rsrcid = " << std::hex << r_read_to_tgt_rsp_srcid.read() 6726 << " / rtrdid = " << r_read_to_tgt_rsp_trdid.read() 6727 << " / rpktid = " << r_read_to_tgt_rsp_pktid.read() 6728 << " / rdata = " << r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() 6729 << " / cpt = " << std::dec << r_tgt_rsp_cpt.read() << std::endl; 6730 } 6731 #endif 6732 6733 uint32_t last_word_idx = r_read_to_tgt_rsp_word.read() + 6734 r_read_to_tgt_rsp_length.read() - 1; 6735 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 6736 bool is_ll = ((r_read_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 6737 6738 if ((is_last_word and not is_ll) or 6739 (r_tgt_rsp_key_sent.read() and is_ll)) 6740 { 6741 // Last word in case of READ or second flit in case if LL 6742 r_tgt_rsp_key_sent = false; 6743 r_read_to_tgt_rsp_req = false; 6744 r_tgt_rsp_fsm = TGT_RSP_READ_IDLE; 6745 } 6746 else 6747 { 6748 if (is_ll) 6749 { 6750 r_tgt_rsp_key_sent = true; // Send second flit of ll 6751 } 6752 else 6753 { 6754 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; // Send next word of read 6755 } 6756 } 6757 } 6758 break; 6759 } 6760 ////////////////// 6761 case TGT_RSP_WRITE: // send the write acknowledge 6762 { 6763 if (p_vci_tgt.rspack) 6764 { 6765 6766 #if DEBUG_MEMC_TGT_RSP 6767 if (m_debug) 6768 std::cout << " <MEMC " << name() << " TGT_RSP_WRITE> Write response" 6769 << " / rsrcid = " << std::hex << r_write_to_tgt_rsp_srcid.read() 6770 << " / rtrdid = " << r_write_to_tgt_rsp_trdid.read() 6771 << " / rpktid = " << r_write_to_tgt_rsp_pktid.read() << std::endl; 6772 #endif 6773 r_tgt_rsp_fsm = TGT_RSP_WRITE_IDLE; 6774 r_write_to_tgt_rsp_req = false; 6775 } 6776 break; 6777 } 6778 ///////////////////// 6779 case TGT_RSP_CLEANUP: // pas clair pour moi (AG) 6780 { 6781 if (p_vci_tgt.rspack) 6782 { 6783 6784 #if DEBUG_MEMC_TGT_RSP 6785 if (m_debug) 6786 std::cout << " <MEMC " << name() << " TGT_RSP_CLEANUP> Cleanup response" 6787 << " / rsrcid = " << std::hex << r_cleanup_to_tgt_rsp_srcid.read() 6788 << " / rtrdid = " << r_cleanup_to_tgt_rsp_trdid.read() 6789 << " / rpktid = " << r_cleanup_to_tgt_rsp_pktid.read() << std::endl; 6790 #endif 6791 r_tgt_rsp_fsm = TGT_RSP_CLEANUP_IDLE; 6792 r_cleanup_to_tgt_rsp_req = false; 6793 } 6794 break; 6795 } 6796 ///////////////// 6797 case TGT_RSP_CAS: // send one atomic word response 6798 { 6799 if (p_vci_tgt.rspack) 6800 { 6801 6802 #if DEBUG_MEMC_TGT_RSP 6803 if (m_debug) 6804 std::cout << " <MEMC " << name() << " TGT_RSP_CAS> CAS response" 6805 << " / rsrcid = " << std::hex << r_cas_to_tgt_rsp_srcid.read() 6806 << " / rtrdid = " << r_cas_to_tgt_rsp_trdid.read() 6807 << " / rpktid = " << r_cas_to_tgt_rsp_pktid.read() << std::endl; 6808 #endif 6809 r_tgt_rsp_fsm = TGT_RSP_CAS_IDLE; 6810 r_cas_to_tgt_rsp_req = false; 6811 } 6812 break; 6813 } 6814 ////////////////// 6815 case TGT_RSP_XRAM: // send the response after XRAM access 6816 { 6817 if (p_vci_tgt.rspack ) 6818 { 6819 6820 #if DEBUG_MEMC_TGT_RSP 6821 if (m_debug ) 6822 std::cout << " <MEMC " << name() << " TGT_RSP_XRAM> Response following XRAM access" 6823 << " / rsrcid = " << std::hex << r_xram_rsp_to_tgt_rsp_srcid.read() 6824 << " / rtrdid = " << r_xram_rsp_to_tgt_rsp_trdid.read() 6825 << " / rpktid = " << r_xram_rsp_to_tgt_rsp_pktid.read() 6826 << " / rdata = " << r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() 6827 << " / cpt = " << std::dec << r_tgt_rsp_cpt.read() << std::endl; 6828 #endif 6829 uint32_t last_word_idx = r_xram_rsp_to_tgt_rsp_word.read() + 6830 r_xram_rsp_to_tgt_rsp_length.read() - 1; 6831 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 6832 bool is_ll = ((r_xram_rsp_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 6833 bool is_error = r_xram_rsp_to_tgt_rsp_rerror.read(); 6834 6835 if (((is_last_word or is_error) and not is_ll) or 6836 (r_tgt_rsp_key_sent.read() and is_ll)) 6837 { 6838 // Last word sent in case of READ or second flit sent in case if LL 6839 r_tgt_rsp_key_sent = false; 6840 r_xram_rsp_to_tgt_rsp_req = false; 6841 r_tgt_rsp_fsm = TGT_RSP_XRAM_IDLE; 6842 } 6843 else 6844 { 6845 if (is_ll) 6846 { 6847 r_tgt_rsp_key_sent = true; // Send second flit of ll 6848 } 6849 else 6850 { 6851 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; // Send next word of read 6852 } 6853 } 6854 } 6855 break; 6856 } 6857 /////////////////////// 6858 case TGT_RSP_MULTI_ACK: // send the write response after coherence transaction 6859 { 6860 if (p_vci_tgt.rspack) 6861 { 6862 6863 #if DEBUG_MEMC_TGT_RSP 6864 if (m_debug) 6865 std::cout << " <MEMC " << name() << " TGT_RSP_MULTI_ACK> Write response after coherence transaction" 6866 << " / rsrcid = " << std::hex << r_multi_ack_to_tgt_rsp_srcid.read() 6867 << " / rtrdid = " << r_multi_ack_to_tgt_rsp_trdid.read() 6868 << " / rpktid = " << r_multi_ack_to_tgt_rsp_pktid.read() << std::endl; 6869 #endif 6870 r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK_IDLE; 6871 r_multi_ack_to_tgt_rsp_req = false; 6872 } 6873 break; 6874 } 6875 } // end switch tgt_rsp_fsm 6876 6877 //////////////////////////////////////////////////////////////////////////////////// 6878 // ALLOC_UPT FSM 6879 //////////////////////////////////////////////////////////////////////////////////// 6880 // The ALLOC_UPT FSM allocates the access to the Update Table (UPT), 6881 // with a round robin priority between three FSMs, with the following order: 6882 // WRITE -> CAS -> MULTI_ACK 6883 // - The WRITE FSM initiates update transaction and sets a new entry in UPT. 6884 // - The CAS FSM does the same thing as the WRITE FSM. 6885 // - The MULTI_ACK FSM complete those trasactions and erase the UPT entry. 6886 // The resource is always allocated. 6887 ///////////////////////////////////////////////////////////////////////////////////// 6888 6889 //std::cout << std::endl << "alloc_upt_fsm" << std::endl; 6890 6891 switch(r_alloc_upt_fsm.read()) 6892 { 6893 ///////////////////////// 6894 case ALLOC_UPT_WRITE: // allocated to WRITE FSM 6895 if (r_write_fsm.read() != WRITE_UPT_LOCK) 6896 { 6897 if (r_cas_fsm.read() == CAS_UPT_LOCK) 6898 r_alloc_upt_fsm = ALLOC_UPT_CAS; 6899 6900 else if (r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK) 6901 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK; 6902 } 6903 break; 6904 6905 ///////////////////////// 6906 case ALLOC_UPT_CAS: // allocated to CAS FSM 6907 if (r_cas_fsm.read() != CAS_UPT_LOCK) 6908 { 6909 if (r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK) 6910 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK; 6911 6912 else if (r_write_fsm.read() == WRITE_UPT_LOCK) 6913 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 6914 } 6915 break; 6916 6917 ///////////////////////// 6918 case ALLOC_UPT_MULTI_ACK: // allocated to MULTI_ACK FSM 6919 if (r_multi_ack_fsm.read() != MULTI_ACK_UPT_LOCK) 6920 { 6921 if (r_write_fsm.read() == WRITE_UPT_LOCK) 6922 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 6923 6924 else if (r_cas_fsm.read() == CAS_UPT_LOCK) 6925 r_alloc_upt_fsm = ALLOC_UPT_CAS; 6926 } 6927 break; 6928 } // end switch r_alloc_upt_fsm 6929 6930 //////////////////////////////////////////////////////////////////////////////////// 6931 // ALLOC_IVT FSM 6932 //////////////////////////////////////////////////////////////////////////////////// 6933 // The ALLOC_IVT FSM allocates the access to the Invalidate Table (IVT), 6934 // with a round robin priority between five FSMs, with the following order: 6935 // WRITE -> XRAM_RSP -> CLEANUP -> CAS -> CONFIG 6936 // - The WRITE FSM initiates broadcast invalidate transactions and sets a new entry 6937 // in IVT. 6938 // - The CAS FSM does the same thing as the WRITE FSM. 6939 // - The XRAM_RSP FSM initiates broadcast/multicast invalidate transaction and sets 6940 // a new entry in the IVT 6941 // - The CONFIG FSM does the same thing as the XRAM_RSP FSM 6942 // - The CLEANUP FSM complete those trasactions and erase the IVT entry. 6943 // The resource is always allocated. 6944 ///////////////////////////////////////////////////////////////////////////////////// 6945 6946 //std::cout << std::endl << "alloc_ivt_fsm" << std::endl; 6947 6948 switch(r_alloc_ivt_fsm.read()) 6949 { 6950 ///////////////////// 6951 case ALLOC_IVT_WRITE: // allocated to WRITE FSM 6952 if (r_write_fsm.read() != WRITE_BC_IVT_LOCK) 6953 { 6954 if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 6955 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 6956 6957 else if (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 6958 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 6959 6960 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 6961 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 6962 6963 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 6964 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 6965 } 6966 break; 6967 6968 //////////////////////// 6969 case ALLOC_IVT_XRAM_RSP: // allocated to XRAM_RSP FSM 6970 if (r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK) 6971 { 6972 if (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 6973 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 6974 6975 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 6976 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 6977 6978 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 6979 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 6980 6981 else if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 6982 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 6983 } 6984 break; 6985 6986 /////////////////////// 6987 case ALLOC_IVT_CLEANUP: // allocated to CLEANUP FSM 6988 if ((r_cleanup_fsm.read() != CLEANUP_IVT_LOCK ) and 6989 (r_cleanup_fsm.read() != CLEANUP_IVT_DECREMENT)) 6990 { 6991 if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 6992 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 6993 6994 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 6995 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 6996 6997 else if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 6998 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 6999 7000 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 7001 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 7002 } 7003 break; 7004 7005 ////////////////////////// 7006 case ALLOC_IVT_CAS: // allocated to CAS FSM 7007 if (r_cas_fsm.read() != CAS_BC_IVT_LOCK) 7008 { 7009 if (r_config_fsm.read() == CONFIG_IVT_LOCK) 7010 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 7011 7012 else if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 7013 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 7014 7015 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 7016 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 7017 7018 else if (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 7019 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 7020 } 7021 break; 7022 7023 ////////////////////////// 7024 case ALLOC_IVT_CONFIG: // allocated to CONFIG FSM 7025 if (r_config_fsm.read() != CONFIG_IVT_LOCK) 7026 { 7027 if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 7028 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 7029 7030 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 7031 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 7032 7033 else if (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 7034 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 7035 7036 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 7037 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 7038 } 7039 break; 7040 7041 } // end switch r_alloc_ivt_fsm 7042 7043 //////////////////////////////////////////////////////////////////////////////////// 7044 // ALLOC_DIR FSM 7045 //////////////////////////////////////////////////////////////////////////////////// 7046 // The ALLOC_DIR FSM allocates the access to the directory and 7047 // the data cache with a round robin priority between 6 user FSMs : 7048 // The cyclic ordering is CONFIG > READ > WRITE > CAS > CLEANUP > XRAM_RSP 7049 // The ressource is always allocated. 7050 ///////////////////////////////////////////////////////////////////////////////////// 7051 7052 //std::cout << std::endl << "alloc_dir_fsm" << std::endl; 7053 7054 switch(r_alloc_dir_fsm.read()) 7055 { 7056 ///////////////////// 7057 case ALLOC_DIR_RESET: // Initializes the directory one SET per cycle. 7058 // All the WAYS of a SET initialized in parallel 7059 7060 r_alloc_dir_reset_cpt.write(r_alloc_dir_reset_cpt.read() + 1); 7061 7062 if (r_alloc_dir_reset_cpt.read() == (m_sets - 1)) 7063 { 7064 m_cache_directory.init(); 7065 r_alloc_dir_fsm = ALLOC_DIR_READ; 7066 } 7067 break; 7068 7069 ////////////////////// 7070 case ALLOC_DIR_CONFIG: // allocated to CONFIG FSM 7071 if ((r_config_fsm.read() != CONFIG_DIR_REQ) and 7072 (r_config_fsm.read() != CONFIG_DIR_ACCESS) and 7073 (r_config_fsm.read() != CONFIG_TRT_LOCK) and 7074 (r_config_fsm.read() != CONFIG_TRT_SET) and 7075 (r_config_fsm.read() != CONFIG_IVT_LOCK)) 7076 { 7077 if (r_read_fsm.read() == READ_DIR_REQ) 7078 r_alloc_dir_fsm = ALLOC_DIR_READ; 7079 7080 else if (r_write_fsm.read() == WRITE_DIR_REQ) 7081 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 7082 7083 else if (r_cas_fsm.read() == CAS_DIR_REQ) 7084 r_alloc_dir_fsm = ALLOC_DIR_CAS; 7085 7086 else if (r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 7087 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 7088 7089 else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 7090 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 7091 } 7092 break; 7093 7094 //////////////////// 7095 case ALLOC_DIR_READ: // allocated to READ FSM 7096 if (((r_read_fsm.read() != READ_DIR_REQ) and 7097 (r_read_fsm.read() != READ_DIR_LOCK) and 7098 (r_read_fsm.read() != READ_TRT_LOCK) and 7099 (r_read_fsm.read() != READ_HEAP_REQ)) 7100 or 7101 ((r_read_fsm.read() == READ_TRT_LOCK) and 7102 (r_alloc_trt_fsm.read() == ALLOC_TRT_READ))) 7103 { 7104 if (r_write_fsm.read() == WRITE_DIR_REQ) 7105 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 7106 7107 else if (r_cas_fsm.read() == CAS_DIR_REQ) 7108 r_alloc_dir_fsm = ALLOC_DIR_CAS; 7109 7110 else if (r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 7111 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 7112 7113 else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 7114 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 7115 7116 else if (r_config_fsm.read() == CONFIG_DIR_REQ) 7117 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 7118 } 7119 break; 7120 7121 ///////////////////// 7122 case ALLOC_DIR_WRITE: // allocated to WRITE FSM 7123 if (((r_write_fsm.read() != WRITE_DIR_REQ) and 7124 (r_write_fsm.read() != WRITE_DIR_LOCK) and 7125 (r_write_fsm.read() != WRITE_BC_DIR_READ) and 7126 (r_write_fsm.read() != WRITE_DIR_HIT) and 7127 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 7128 (r_write_fsm.read() != WRITE_BC_IVT_LOCK) and 7129 (r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 7130 (r_write_fsm.read() != WRITE_UPT_LOCK) and 7131 (r_write_fsm.read() != WRITE_UPT_HEAP_LOCK)) 7132 or 7133 ((r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) and 7134 (r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE)) 7135 or 7136 ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) and 7137 (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE))) 7138 { 7139 if (r_cas_fsm.read() == CAS_DIR_REQ) 7140 r_alloc_dir_fsm = ALLOC_DIR_CAS; 7141 7142 else if (r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 7143 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 7144 7145 else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 7146 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 7147 7148 else if (r_config_fsm.read() == CONFIG_DIR_REQ) 7149 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 7150 7151 else if (r_read_fsm.read() == READ_DIR_REQ) 7152 r_alloc_dir_fsm = ALLOC_DIR_READ; 7153 } 7154 break; 7155 7156 /////////////////// 7157 case ALLOC_DIR_CAS: // allocated to CAS FSM 7158 if (((r_cas_fsm.read() != CAS_DIR_REQ) and 7159 (r_cas_fsm.read() != CAS_DIR_LOCK) and 7160 (r_cas_fsm.read() != CAS_DIR_HIT_READ) and 7161 (r_cas_fsm.read() != CAS_DIR_HIT_COMPARE) and 7162 (r_cas_fsm.read() != CAS_DIR_HIT_WRITE) and 7163 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 7164 (r_cas_fsm.read() != CAS_BC_IVT_LOCK) and 7165 (r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 7166 (r_cas_fsm.read() != CAS_UPT_LOCK) and 7167 (r_cas_fsm.read() != CAS_UPT_HEAP_LOCK)) 7168 or 7169 ((r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) and 7170 (r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS)) 7171 or 7172 ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) and 7173 (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS))) 7174 { 7175 if (r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 7176 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 7177 7178 else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 7179 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 7180 7181 else if (r_config_fsm.read() == CONFIG_DIR_REQ) 7182 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 7183 7184 else if (r_read_fsm.read() == READ_DIR_REQ) 7185 r_alloc_dir_fsm = ALLOC_DIR_READ; 7186 7187 else if (r_write_fsm.read() == WRITE_DIR_REQ) 7188 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 7189 } 7190 break; 7191 7192 /////////////////////// 7193 case ALLOC_DIR_CLEANUP: // allocated to CLEANUP FSM 7194 if ((r_cleanup_fsm.read() != CLEANUP_DIR_REQ) and 7195 (r_cleanup_fsm.read() != CLEANUP_DIR_LOCK) and 7196 (r_cleanup_fsm.read() != CLEANUP_HEAP_REQ) and 7197 (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK)) 7198 { 7199 if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 7200 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 7201 7202 else if (r_config_fsm.read() == CONFIG_DIR_REQ) 7203 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 7204 7205 else if (r_read_fsm.read() == READ_DIR_REQ) 7206 r_alloc_dir_fsm = ALLOC_DIR_READ; 7207 7208 else if (r_write_fsm.read() == WRITE_DIR_REQ) 7209 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 7210 7211 else if (r_cas_fsm.read() == CAS_DIR_REQ) 7212 r_alloc_dir_fsm = ALLOC_DIR_CAS; 7213 } 7214 break; 7215 7216 //////////////////////// 7217 case ALLOC_DIR_XRAM_RSP: // allocated to XRAM_RSP FSM 7218 if ((r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) and 7219 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) and 7220 (r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK)) 7221 { 7222 if (r_config_fsm.read() == CONFIG_DIR_REQ) 7223 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 7224 7225 else if (r_read_fsm.read() == READ_DIR_REQ) 7226 r_alloc_dir_fsm = ALLOC_DIR_READ; 7227 7228 else if (r_write_fsm.read() == WRITE_DIR_REQ) 7229 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 7230 7231 else if (r_cas_fsm.read() == CAS_DIR_REQ) 7232 r_alloc_dir_fsm = ALLOC_DIR_CAS; 7233 7234 else if (r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 7235 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 7236 } 7237 break; 7238 7239 } // end switch alloc_dir_fsm 7240 7241 //////////////////////////////////////////////////////////////////////////////////// 7242 // ALLOC_TRT FSM 7243 //////////////////////////////////////////////////////////////////////////////////// 7244 // The ALLOC_TRT fsm allocates the access to the Transaction Table (write buffer) 7245 // with a round robin priority between 7 user FSMs : 7246 // The priority is READ > WRITE > CAS > IXR_CMD > XRAM_RSP > IXR_RSP > CONFIG 7247 // The ressource is always allocated. 7248 /////////////////////////////////////////////////////////////////////////////////// 7249 7250 //std::cout << std::endl << "alloc_trt_fsm" << std::endl; 7251 7252 switch(r_alloc_trt_fsm.read()) 7253 { 7254 //////////////////// 7255 case ALLOC_TRT_READ: 7256 if (r_read_fsm.read() != READ_TRT_LOCK) 7257 { 7258 if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7259 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7260 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7261 7262 else if ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7263 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7264 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7265 7266 else if ((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7267 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7268 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7269 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7270 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT)) 7271 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7272 7273 else if ((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7274 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7275 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7276 7277 else if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7278 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7279 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7280 7281 else if (r_config_fsm.read() == CONFIG_TRT_LOCK ) 7282 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7283 } 7284 break; 7285 7286 ///////////////////// 7287 case ALLOC_TRT_WRITE: 7288 if ((r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 7289 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 7290 (r_write_fsm.read() != WRITE_BC_IVT_LOCK)) 7291 { 7292 if ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7293 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7294 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7295 7296 else if ((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7297 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7298 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7299 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7300 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT)) 7301 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7302 7303 else if ((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7304 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7305 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7306 7307 else if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7308 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7309 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7310 7311 else if (r_config_fsm.read() == CONFIG_TRT_LOCK ) 7312 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7313 7314 else if (r_read_fsm.read() == READ_TRT_LOCK) 7315 r_alloc_trt_fsm = ALLOC_TRT_READ; 7316 } 7317 break; 7318 7319 /////////////////// 7320 case ALLOC_TRT_CAS: 7321 if ((r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 7322 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 7323 (r_cas_fsm.read() != CAS_BC_IVT_LOCK)) 7324 { 7325 if ((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7326 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7327 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7328 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7329 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT)) 7330 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7331 7332 if ((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7333 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7334 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7335 7336 else if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7337 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7338 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7339 7340 else if (r_config_fsm.read() == CONFIG_TRT_LOCK ) 7341 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7342 7343 else if (r_read_fsm.read() == READ_TRT_LOCK) 7344 r_alloc_trt_fsm = ALLOC_TRT_READ; 7345 7346 else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7347 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7348 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7349 } 7350 break; 7351 7352 /////////////////////// 7353 case ALLOC_TRT_IXR_CMD: 7354 if ((r_ixr_cmd_fsm.read() != IXR_CMD_READ_TRT) and 7355 (r_ixr_cmd_fsm.read() != IXR_CMD_WRITE_TRT) and 7356 (r_ixr_cmd_fsm.read() != IXR_CMD_CAS_TRT) and 7357 (r_ixr_cmd_fsm.read() != IXR_CMD_XRAM_TRT) and 7358 (r_ixr_cmd_fsm.read() != IXR_CMD_CONFIG_TRT)) 7359 { 7360 if ((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7361 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7362 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7363 7364 else if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7365 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7366 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7367 7368 else if (r_config_fsm.read() == CONFIG_TRT_LOCK ) 7369 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7370 7371 else if (r_read_fsm.read() == READ_TRT_LOCK) 7372 r_alloc_trt_fsm = ALLOC_TRT_READ; 7373 7374 else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7375 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7376 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7377 7378 else if ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7379 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7380 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7381 } 7382 break; 7383 7384 //////////////////////// 7385 case ALLOC_TRT_XRAM_RSP: 7386 if (((r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) or 7387 (r_alloc_dir_fsm.read() != ALLOC_DIR_XRAM_RSP)) and 7388 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) and 7389 (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_UPDT) and 7390 (r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK)) 7391 { 7392 if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7393 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7394 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7395 7396 else if (r_config_fsm.read() == CONFIG_TRT_LOCK ) 7397 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7398 7399 else if (r_read_fsm.read() == READ_TRT_LOCK) 7400 r_alloc_trt_fsm = ALLOC_TRT_READ; 7401 7402 else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7403 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7404 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7405 7406 else if ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7407 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7408 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7409 7410 else if ((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7411 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7412 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7413 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7414 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT)) 7415 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7416 7417 } 7418 break; 7419 7420 /////////////////////// 7421 case ALLOC_TRT_IXR_RSP: 7422 if ((r_ixr_rsp_fsm.read() != IXR_RSP_TRT_ERASE) and 7423 (r_ixr_rsp_fsm.read() != IXR_RSP_TRT_READ)) 7424 { 7425 if (r_config_fsm.read() == CONFIG_TRT_LOCK) 7426 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7427 7428 else if (r_read_fsm.read() == READ_TRT_LOCK) 7429 r_alloc_trt_fsm = ALLOC_TRT_READ; 7430 7431 else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7432 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7433 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7434 7435 else if ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7436 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7437 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7438 7439 else if ((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7440 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7441 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7442 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7443 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT)) 7444 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7445 7446 else if ((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7447 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7448 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7449 } 7450 break; 7451 7452 ////////////////////// 7453 case ALLOC_TRT_CONFIG: 7454 if ((r_config_fsm.read() != CONFIG_TRT_LOCK) and 7455 (r_config_fsm.read() != CONFIG_TRT_SET)) 7456 { 7457 if (r_read_fsm.read() == READ_TRT_LOCK) 7458 r_alloc_trt_fsm = ALLOC_TRT_READ; 7459 7460 else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7461 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7462 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7463 7464 else if ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7465 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7466 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7467 7468 else if ((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7469 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7470 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7471 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7472 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT)) 7473 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7474 7475 else if ((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7476 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7477 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7478 7479 else if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7480 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7481 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7482 } 7483 break; 7484 7485 } // end switch alloc_trt_fsm 7486 7487 //////////////////////////////////////////////////////////////////////////////////// 7488 // ALLOC_HEAP FSM 7489 //////////////////////////////////////////////////////////////////////////////////// 7490 // The ALLOC_HEAP FSM allocates the access to the heap 7491 // with a round robin priority between 6 user FSMs : 7492 // The cyclic ordering is READ > WRITE > CAS > CLEANUP > XRAM_RSP > CONFIG 7493 // The ressource is always allocated. 7494 ///////////////////////////////////////////////////////////////////////////////////// 7495 7496 //std::cout << std::endl << "alloc_heap_fsm" << std::endl; 7497 7498 switch (r_alloc_heap_fsm.read()) 7499 { 7500 //////////////////// 7501 case ALLOC_HEAP_RESET: 7502 // Initializes the heap one ENTRY each cycle. 7503 7504 r_alloc_heap_reset_cpt.write(r_alloc_heap_reset_cpt.read() + 1); 7505 7506 if (r_alloc_heap_reset_cpt.read() == (m_heap_size - 1)) 7507 { 7508 m_heap.init(); 7509 7510 r_alloc_heap_fsm = ALLOC_HEAP_READ; 7511 } 7512 break; 7513 7514 //////////////////// 7515 case ALLOC_HEAP_READ: 7516 if ((r_read_fsm.read() != READ_HEAP_REQ) and 7517 (r_read_fsm.read() != READ_HEAP_LOCK) and 7518 (r_read_fsm.read() != READ_HEAP_ERASE)) 7519 { 7520 if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 7521 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 7522 7523 else if (r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 7524 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 7525 7526 else if (r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 7527 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 7528 7529 else if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 7530 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 7531 7532 else if (r_config_fsm.read() == CONFIG_HEAP_REQ) 7533 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 7534 } 7535 break; 7536 7537 ///////////////////// 7538 case ALLOC_HEAP_WRITE: 7539 if ((r_write_fsm.read() != WRITE_UPT_HEAP_LOCK) and 7540 (r_write_fsm.read() != WRITE_UPT_REQ) and 7541 (r_write_fsm.read() != WRITE_UPT_NEXT)) 7542 { 7543 if (r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 7544 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 7545 7546 else if (r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 7547 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 7548 7549 else if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 7550 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 7551 7552 else if (r_config_fsm.read() == CONFIG_HEAP_REQ) 7553 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 7554 7555 else if (r_read_fsm.read() == READ_HEAP_REQ) 7556 r_alloc_heap_fsm = ALLOC_HEAP_READ; 7557 } 7558 break; 7559 7560 //////////////////// 7561 case ALLOC_HEAP_CAS: 7562 if ((r_cas_fsm.read() != CAS_UPT_HEAP_LOCK) and 7563 (r_cas_fsm.read() != CAS_UPT_REQ) and 7564 (r_cas_fsm.read() != CAS_UPT_NEXT)) 7565 { 7566 if (r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 7567 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 7568 7569 else if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 7570 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 7571 7572 else if (r_config_fsm.read() == CONFIG_HEAP_REQ) 7573 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 7574 7575 else if (r_read_fsm.read() == READ_HEAP_REQ) 7576 r_alloc_heap_fsm = ALLOC_HEAP_READ; 7577 7578 else if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 7579 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 7580 } 7581 break; 7582 7583 /////////////////////// 7584 case ALLOC_HEAP_CLEANUP: 7585 if ((r_cleanup_fsm.read() != CLEANUP_HEAP_REQ) and 7586 (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK) and 7587 (r_cleanup_fsm.read() != CLEANUP_HEAP_SEARCH) and 7588 (r_cleanup_fsm.read() != CLEANUP_HEAP_CLEAN)) 7589 { 7590 if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 7591 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 7592 7593 else if (r_config_fsm.read() == CONFIG_HEAP_REQ) 7594 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 7595 7596 else if (r_read_fsm.read() == READ_HEAP_REQ) 7597 r_alloc_heap_fsm = ALLOC_HEAP_READ; 7598 7599 else if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 7600 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 7601 7602 else if (r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 7603 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 7604 } 7605 break; 7606 7607 //////////////////////// 7608 case ALLOC_HEAP_XRAM_RSP: 7609 if ((r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_REQ) and 7610 (r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_ERASE)) 7611 { 7612 if (r_config_fsm.read() == CONFIG_HEAP_REQ) 7613 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 7614 7615 else if (r_read_fsm.read() == READ_HEAP_REQ) 7616 r_alloc_heap_fsm = ALLOC_HEAP_READ; 7617 7618 else if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 7619 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 7620 7621 else if (r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 7622 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 7623 7624 else if (r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 7625 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 7626 7627 } 7628 break; 7629 7630 /////////////////////// 7631 case ALLOC_HEAP_CONFIG: 7632 if ((r_config_fsm.read() != CONFIG_HEAP_REQ) and 7633 (r_config_fsm.read() != CONFIG_HEAP_SCAN)) 7634 { 7635 if (r_read_fsm.read() == READ_HEAP_REQ) 7636 r_alloc_heap_fsm = ALLOC_HEAP_READ; 7637 7638 else if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 7639 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 7640 7641 else if (r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 7642 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 7643 7644 else if (r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 7645 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 7646 7647 if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 7648 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 7649 } 7650 break; 7651 7652 } // end switch alloc_heap_fsm 7653 7654 //std::cout << std::endl << "fifo_update" << std::endl; 7655 7656 ///////////////////////////////////////////////////////////////////// 7657 // TGT_CMD to READ FIFO 7658 ///////////////////////////////////////////////////////////////////// 7659 7660 m_cmd_read_addr_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 7661 p_vci_tgt.address.read()); 7662 m_cmd_read_length_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 7663 p_vci_tgt.plen.read() >> 2 ); 7664 m_cmd_read_srcid_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 7665 p_vci_tgt.srcid.read()); 7666 m_cmd_read_trdid_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 7667 p_vci_tgt.trdid.read()); 7668 m_cmd_read_pktid_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 7669 p_vci_tgt.pktid.read()); 7670 7671 ///////////////////////////////////////////////////////////////////// 7672 // TGT_CMD to WRITE FIFO 7673 ///////////////////////////////////////////////////////////////////// 7674 7675 m_cmd_write_addr_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 7676 (addr_t)p_vci_tgt.address.read()); 7677 m_cmd_write_eop_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 7678 p_vci_tgt.eop.read()); 7679 m_cmd_write_srcid_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 7680 p_vci_tgt.srcid.read()); 7681 m_cmd_write_trdid_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 7682 p_vci_tgt.trdid.read()); 7683 m_cmd_write_pktid_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 7684 p_vci_tgt.pktid.read()); 7685 m_cmd_write_data_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 7686 p_vci_tgt.wdata.read()); 7687 m_cmd_write_be_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 7688 p_vci_tgt.be.read()); 7689 7690 //////////////////////////////////////////////////////////////////////////////////// 7691 // TGT_CMD to CAS FIFO 7692 //////////////////////////////////////////////////////////////////////////////////// 7693 7694 m_cmd_cas_addr_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 7695 (addr_t)p_vci_tgt.address.read()); 7696 m_cmd_cas_eop_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 7697 p_vci_tgt.eop.read()); 7698 m_cmd_cas_srcid_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 7699 p_vci_tgt.srcid.read()); 7700 m_cmd_cas_trdid_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 7701 p_vci_tgt.trdid.read()); 7702 m_cmd_cas_pktid_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 7703 p_vci_tgt.pktid.read()); 7704 m_cmd_cas_wdata_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 7705 p_vci_tgt.wdata.read()); 7706 7707 //////////////////////////////////////////////////////////////////////////////////// 7708 // CC_RECEIVE to CLEANUP FIFO 7709 //////////////////////////////////////////////////////////////////////////////////// 7710 7711 m_cc_receive_to_cleanup_fifo.update( cc_receive_to_cleanup_fifo_get, 7712 cc_receive_to_cleanup_fifo_put, 7713 p_dspin_p2m.data.read()); 7714 7715 //////////////////////////////////////////////////////////////////////////////////// 7716 // CC_RECEIVE to MULTI_ACK FIFO 7717 //////////////////////////////////////////////////////////////////////////////////// 7718 7719 m_cc_receive_to_multi_ack_fifo.update( cc_receive_to_multi_ack_fifo_get, 7720 cc_receive_to_multi_ack_fifo_put, 7721 p_dspin_p2m.data.read()); 7722 7723 //////////////////////////////////////////////////////////////////////////////////// 7724 // WRITE to CC_SEND FIFO 7725 //////////////////////////////////////////////////////////////////////////////////// 7726 7727 m_write_to_cc_send_inst_fifo.update( write_to_cc_send_fifo_get, 7728 write_to_cc_send_fifo_put, 7729 write_to_cc_send_fifo_inst ); 7730 m_write_to_cc_send_srcid_fifo.update( write_to_cc_send_fifo_get, 7731 write_to_cc_send_fifo_put, 7732 write_to_cc_send_fifo_srcid ); 7733 7734 //////////////////////////////////////////////////////////////////////////////////// 7735 // CONFIG to CC_SEND FIFO 7736 //////////////////////////////////////////////////////////////////////////////////// 7737 7738 m_config_to_cc_send_inst_fifo.update( config_to_cc_send_fifo_get, 7739 config_to_cc_send_fifo_put, 7740 config_to_cc_send_fifo_inst ); 7741 m_config_to_cc_send_srcid_fifo.update( config_to_cc_send_fifo_get, 7742 config_to_cc_send_fifo_put, 7743 config_to_cc_send_fifo_srcid ); 7744 7745 //////////////////////////////////////////////////////////////////////////////////// 7746 // XRAM_RSP to CC_SEND FIFO 7747 //////////////////////////////////////////////////////////////////////////////////// 7748 7749 m_xram_rsp_to_cc_send_inst_fifo.update( xram_rsp_to_cc_send_fifo_get, 7750 xram_rsp_to_cc_send_fifo_put, 7751 xram_rsp_to_cc_send_fifo_inst ); 7752 m_xram_rsp_to_cc_send_srcid_fifo.update( xram_rsp_to_cc_send_fifo_get, 7753 xram_rsp_to_cc_send_fifo_put, 7754 xram_rsp_to_cc_send_fifo_srcid ); 7755 7756 //////////////////////////////////////////////////////////////////////////////////// 7757 // CAS to CC_SEND FIFO 7758 //////////////////////////////////////////////////////////////////////////////////// 7759 7760 m_cas_to_cc_send_inst_fifo.update( cas_to_cc_send_fifo_get, 7761 cas_to_cc_send_fifo_put, 7762 cas_to_cc_send_fifo_inst ); 7763 m_cas_to_cc_send_srcid_fifo.update( cas_to_cc_send_fifo_get, 7764 cas_to_cc_send_fifo_put, 7765 cas_to_cc_send_fifo_srcid ); 7766 m_cpt_cycles++; 7767 7768 } // end transition() 7769 7770 ///////////////////////////// 7771 tmpl(void)::genMoore() 7772 ///////////////////////////// 7773 { 7774 //////////////////////////////////////////////////////////// 7775 // Command signals on the p_vci_ixr port 7776 //////////////////////////////////////////////////////////// 7777 7778 // DATA width is 8 bytes 7779 // The following values are not transmitted to XRAM 7780 // p_vci_ixr.be 7781 // p_vci_ixr.pktid 7782 // p_vci_ixr.cons 7783 // p_vci_ixr.wrap 7784 // p_vci_ixr.contig 7785 // p_vci_ixr.clen 7786 // p_vci_ixr.cfixed 7787 7788 p_vci_ixr.plen = 64; 7789 p_vci_ixr.srcid = m_srcid_x; 7790 p_vci_ixr.trdid = r_ixr_cmd_trdid.read(); 7791 p_vci_ixr.address = (addr_t)r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2); 7792 p_vci_ixr.be = 0xFF; 7793 p_vci_ixr.pktid = 0; 7794 p_vci_ixr.cons = false; 7795 p_vci_ixr.wrap = false; 7796 p_vci_ixr.contig = true; 7797 p_vci_ixr.clen = 0; 7798 p_vci_ixr.cfixed = false; 7799 7800 if ((r_ixr_cmd_fsm.read() == IXR_CMD_READ_SEND) or 7801 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_SEND) or 7802 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_SEND) or 7803 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_SEND) or 7804 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_SEND)) 7805 { 7806 p_vci_ixr.cmdval = true; 7807 7808 if (r_ixr_cmd_get.read()) // GET 7809 { 7810 p_vci_ixr.cmd = vci_param_ext::CMD_READ; 7811 p_vci_ixr.wdata = 0; 7812 p_vci_ixr.eop = true; 2775 7813 } 2776 2777 #if DEBUG_MEMC_WRITE 2778 if(m_debug and wok) 2779 { 2780 std::cout << " <MEMC " << name() 2781 << " WRITE_UPT_LOCK> Register the multicast update in UPT / " 2782 << " nb_copies = " << r_write_count.read() << std::endl; 2783 } 2784 #endif 2785 r_write_upt_index = index; 2786 // releases the lock protecting UPT and the DIR if no entry... 2787 if(wok) r_write_fsm = WRITE_UPT_HEAP_LOCK; 2788 else r_write_fsm = WRITE_WAIT; 2789 } 2790 break; 2791 } 2792 2793 ///////////////////////// 2794 case WRITE_UPT_HEAP_LOCK: // get access to heap 2795 { 2796 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE) 2797 { 2798 2799 #if DEBUG_MEMC_WRITE 2800 if(m_debug) 2801 std::cout << " <MEMC " << name() 2802 << " WRITE_UPT_HEAP_LOCK> Get acces to the HEAP" << std::endl; 2803 #endif 2804 r_write_fsm = WRITE_UPT_REQ; 2805 } 2806 break; 2807 } 2808 2809 ////////////////// 2810 case WRITE_UPT_REQ: // prepare the coherence transaction for the CC_SEND FSM 2811 // and write the first copy in the FIFO 2812 // send the request if only one copy 2813 { 2814 assert(not r_write_to_cc_send_multi_req.read() and 2815 not r_write_to_cc_send_brdcast_req.read() and 2816 "Error in VCI_MEM_CACHE : pending multicast or broadcast\n" 2817 "transaction in WRITE_UPT_REQ state" 2818 ); 2819 2820 r_write_to_cc_send_brdcast_req = false; 2821 r_write_to_cc_send_trdid = r_write_upt_index.read(); 2822 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())]; 2823 r_write_to_cc_send_index = r_write_word_index.read(); 2824 r_write_to_cc_send_count = r_write_word_count.read(); 2825 2826 for(size_t i=0; i<m_words ; i++) r_write_to_cc_send_be[i]=r_write_be[i].read(); 2827 2828 size_t min = r_write_word_index.read(); 2829 size_t max = r_write_word_index.read() + r_write_word_count.read(); 2830 for(size_t i=min ; i<max ; i++) r_write_to_cc_send_data[i] = r_write_data[i]; 2831 2832 if( (r_write_copy.read() != r_write_srcid.read()) or 2833 (r_write_pktid.read() == TYPE_SC) or r_write_copy_inst.read()) 2834 { 2835 // put the first srcid in the fifo 2836 write_to_cc_send_fifo_put = true; 2837 write_to_cc_send_fifo_inst = r_write_copy_inst.read(); 2838 write_to_cc_send_fifo_srcid = r_write_copy.read(); 2839 if(r_write_count.read() == 1) 2840 { 2841 r_write_fsm = WRITE_IDLE; 2842 r_write_to_cc_send_multi_req = true; 7814 else // PUT 7815 { 7816 size_t word = r_ixr_cmd_word.read(); 7817 p_vci_ixr.cmd = vci_param_ext::CMD_WRITE; 7818 p_vci_ixr.wdata = ((wide_data_t)(r_ixr_cmd_wdata[word].read())) | 7819 ((wide_data_t)(r_ixr_cmd_wdata[word+1].read()) << 32); 7820 p_vci_ixr.eop = (word == (m_words-2)); 7821 } 2843 7822 } 2844 7823 else 2845 7824 { 2846 r_write_fsm = WRITE_UPT_NEXT; 2847 r_write_to_dec = false; 2848 7825 p_vci_ixr.cmdval = false; 2849 7826 } 2850 } 2851 else 2852 { 2853 r_write_fsm = WRITE_UPT_NEXT; 2854 r_write_to_dec = false; 2855 } 2856 2857 #if DEBUG_MEMC_WRITE 2858 if(m_debug) 2859 { 2860 std::cout 2861 << " <MEMC " << name() 2862 << " WRITE_UPT_REQ> Post first request to CC_SEND FSM" 2863 << " / srcid = " << std::dec << r_write_copy.read() 2864 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 2865 2866 if(r_write_count.read() == 1) 2867 std::cout << " ... and this is the last" << std::endl; 2868 } 2869 #endif 2870 break; 2871 } 2872 2873 /////////////////// 2874 case WRITE_UPT_NEXT: 2875 { 2876 // continue the multi-update request to CC_SEND fsm 2877 // when there is copies in the heap. 2878 // if one copy in the heap is the writer itself 2879 // the corresponding SRCID should not be written in the fifo, 2880 // but the UPT counter must be decremented. 2881 // As this decrement is done in the WRITE_UPT_DEC state, 2882 // after the last copy has been found, the decrement request 2883 // must be registered in the r_write_to_dec flip-flop. 2884 2885 HeapEntry entry = m_heap.read(r_write_ptr.read()); 2886 2887 bool dec_upt_counter; 2888 2889 // put the next srcid in the fifo 2890 if( (entry.owner.srcid != r_write_srcid.read()) or 2891 (r_write_pktid.read() == TYPE_SC) or entry.owner.inst) 2892 { 2893 dec_upt_counter = false; 2894 write_to_cc_send_fifo_put = true; 2895 write_to_cc_send_fifo_inst = entry.owner.inst; 2896 write_to_cc_send_fifo_srcid = entry.owner.srcid; 2897 2898 #if DEBUG_MEMC_WRITE 2899 if(m_debug) 7827 7828 //////////////////////////////////////////////////// 7829 // Response signals on the p_vci_ixr port 7830 //////////////////////////////////////////////////// 7831 7832 if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ) or 7833 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE)) 7834 { 7835 p_vci_ixr.rspack = (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP); 7836 } 7837 else // r_ixr_rsp_fsm == IXR_RSP_IDLE 2900 7838 { 2901 std::cout << " <MEMC " << name() << " WRITE_UPT_NEXT> Post another request to CC_SEND FSM" 2902 << " / heap_index = " << std::dec << r_write_ptr.read() 2903 << " / srcid = " << std::dec << r_write_copy.read() 2904 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 2905 if(entry.next == r_write_ptr.read()) 2906 std::cout << " ... and this is the last" << std::endl; 7839 p_vci_ixr.rspack = false; 2907 7840 } 2908 #endif 2909 } 2910 else // the UPT counter must be decremented 2911 { 2912 dec_upt_counter = true; 2913 2914 #if DEBUG_MEMC_WRITE 2915 if(m_debug) 7841 7842 //////////////////////////////////////////////////// 7843 // Command signals on the p_vci_tgt port 7844 //////////////////////////////////////////////////// 7845 7846 switch((tgt_cmd_fsm_state_e) r_tgt_cmd_fsm.read()) 2916 7847 { 2917 std::cout << " <MEMC " << name() << " WRITE_UPT_NEXT> Skip one entry in heap matching the writer" 2918 << " / heap_index = " << std::dec << r_write_ptr.read() 2919 << " / srcid = " << std::dec << r_write_copy.read() 2920 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 2921 if(entry.next == r_write_ptr.read()) 2922 std::cout << " ... and this is the last" << std::endl; 7848 case TGT_CMD_IDLE: 7849 p_vci_tgt.cmdack = false; 7850 break; 7851 7852 case TGT_CMD_CONFIG: 7853 case TGT_CMD_ERROR: 7854 p_vci_tgt.cmdack = not r_tgt_cmd_to_tgt_rsp_req.read(); 7855 break; 7856 7857 case TGT_CMD_READ: 7858 p_vci_tgt.cmdack = m_cmd_read_addr_fifo.wok(); 7859 break; 7860 7861 case TGT_CMD_WRITE: 7862 p_vci_tgt.cmdack = m_cmd_write_addr_fifo.wok(); 7863 break; 7864 7865 case TGT_CMD_CAS: 7866 p_vci_tgt.cmdack = m_cmd_cas_addr_fifo.wok(); 7867 break; 2923 7868 } 2924 #endif 2925 } 2926 2927 // register the possible UPT decrement request 2928 r_write_to_dec = dec_upt_counter or r_write_to_dec.read(); 2929 2930 if(not m_write_to_cc_send_inst_fifo.wok()) 2931 { 2932 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_UPT_NEXT state" << std::endl 2933 << "The write_to_cc_send_fifo should not be full" << std::endl 2934 << "as the depth should be larger than the max number of copies" << std::endl; 2935 exit(0); 2936 } 2937 2938 r_write_ptr = entry.next; 2939 2940 if(entry.next == r_write_ptr.read()) // last copy 2941 { 2942 r_write_to_cc_send_multi_req = true; 2943 if(r_write_to_dec.read() or dec_upt_counter) r_write_fsm = WRITE_UPT_DEC; 2944 else r_write_fsm = WRITE_IDLE; 2945 } 2946 break; 2947 } 2948 2949 ////////////////// 2950 case WRITE_UPT_DEC: 2951 { 2952 // If the initial writer has a copy, it should not 2953 // receive an update request, but the counter in the 2954 // update table must be decremented by the MULTI_ACK FSM. 2955 2956 if(!r_write_to_multi_ack_req.read()) 2957 { 2958 r_write_to_multi_ack_req = true; 2959 r_write_to_multi_ack_upt_index = r_write_upt_index.read(); 2960 r_write_fsm = WRITE_IDLE; 2961 } 2962 break; 2963 } 2964 2965 /////////////// 2966 case WRITE_RSP: // Post a request to TGT_RSP FSM to acknowledge the write 2967 // In order to increase the Write requests throughput, 2968 // we don't wait to return in the IDLE state to consume 2969 // a new request in the write FIFO 2970 { 2971 if(!r_write_to_tgt_rsp_req.read()) 2972 { 2973 // post the request to TGT_RSP_FSM 2974 r_write_to_tgt_rsp_req = true; 2975 r_write_to_tgt_rsp_srcid = r_write_srcid.read(); 2976 r_write_to_tgt_rsp_trdid = r_write_trdid.read(); 2977 r_write_to_tgt_rsp_pktid = r_write_pktid.read(); 2978 r_write_to_tgt_rsp_sc_fail = r_write_sc_fail.read(); 2979 2980 // try to get a new write request from the FIFO 2981 if(m_cmd_write_addr_fifo.rok()) 7869 7870 //////////////////////////////////////////////////// 7871 // Response signals on the p_vci_tgt port 7872 //////////////////////////////////////////////////// 7873 7874 switch(r_tgt_rsp_fsm.read()) 2982 7875 { 2983 if((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 2984 m_cpt_sc++; 2985 else 2986 { 2987 m_cpt_write++; 2988 m_cpt_write_cells++; 2989 } 2990 2991 // consume a word in the FIFO & write it in the local buffer 2992 cmd_write_fifo_get = true; 2993 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 2994 2995 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 2996 r_write_word_index = index; 2997 r_write_word_count = 1; 2998 r_write_data[index] = m_cmd_write_data_fifo.read(); 2999 r_write_srcid = m_cmd_write_srcid_fifo.read(); 3000 r_write_trdid = m_cmd_write_trdid_fifo.read(); 3001 r_write_pktid = m_cmd_write_pktid_fifo.read(); 3002 r_write_pending_sc = false; 3003 3004 // initialize the be field for all words 3005 for(size_t word=0 ; word<m_words ; word++) 3006 { 3007 if(word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 3008 else r_write_be[word] = 0x0; 3009 } 3010 3011 if(m_cmd_write_eop_fifo.read() or ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC)) 3012 { 3013 r_write_fsm = WRITE_DIR_REQ; 3014 } 3015 else 3016 { 3017 r_write_fsm = WRITE_NEXT; 3018 } 7876 case TGT_RSP_CONFIG_IDLE: 7877 case TGT_RSP_TGT_CMD_IDLE: 7878 case TGT_RSP_READ_IDLE: 7879 case TGT_RSP_WRITE_IDLE: 7880 case TGT_RSP_CAS_IDLE: 7881 case TGT_RSP_XRAM_IDLE: 7882 case TGT_RSP_MULTI_ACK_IDLE: 7883 case TGT_RSP_CLEANUP_IDLE: 7884 { 7885 p_vci_tgt.rspval = false; 7886 p_vci_tgt.rsrcid = 0; 7887 p_vci_tgt.rdata = 0; 7888 p_vci_tgt.rpktid = 0; 7889 p_vci_tgt.rtrdid = 0; 7890 p_vci_tgt.rerror = 0; 7891 p_vci_tgt.reop = false; 7892 break; 7893 } 7894 case TGT_RSP_CONFIG: 7895 { 7896 p_vci_tgt.rspval = true; 7897 p_vci_tgt.rdata = 0; 7898 p_vci_tgt.rsrcid = r_config_to_tgt_rsp_srcid.read(); 7899 p_vci_tgt.rtrdid = r_config_to_tgt_rsp_trdid.read(); 7900 p_vci_tgt.rpktid = r_config_to_tgt_rsp_pktid.read(); 7901 p_vci_tgt.rerror = r_config_to_tgt_rsp_error.read(); 7902 p_vci_tgt.reop = true; 7903 7904 break; 7905 } 7906 case TGT_RSP_TGT_CMD: 7907 { 7908 p_vci_tgt.rspval = true; 7909 p_vci_tgt.rdata = r_tgt_cmd_to_tgt_rsp_rdata.read(); 7910 p_vci_tgt.rsrcid = r_tgt_cmd_to_tgt_rsp_srcid.read(); 7911 p_vci_tgt.rtrdid = r_tgt_cmd_to_tgt_rsp_trdid.read(); 7912 p_vci_tgt.rpktid = r_tgt_cmd_to_tgt_rsp_pktid.read(); 7913 p_vci_tgt.rerror = r_tgt_cmd_to_tgt_rsp_error.read(); 7914 p_vci_tgt.reop = true; 7915 7916 break; 7917 } 7918 case TGT_RSP_READ: 7919 { 7920 uint32_t last_word_idx = r_read_to_tgt_rsp_word.read() + r_read_to_tgt_rsp_length - 1; 7921 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 7922 bool is_ll = ((r_read_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 7923 7924 p_vci_tgt.rspval = true; 7925 7926 if (is_ll and not r_tgt_rsp_key_sent.read()) 7927 { 7928 // LL response first flit 7929 p_vci_tgt.rdata = r_read_to_tgt_rsp_ll_key.read(); 7930 } 7931 else 7932 { 7933 // LL response second flit or READ response 7934 p_vci_tgt.rdata = r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 7935 } 7936 7937 p_vci_tgt.rsrcid = r_read_to_tgt_rsp_srcid.read(); 7938 p_vci_tgt.rtrdid = r_read_to_tgt_rsp_trdid.read(); 7939 p_vci_tgt.rpktid = r_read_to_tgt_rsp_pktid.read(); 7940 p_vci_tgt.rerror = 0; 7941 p_vci_tgt.reop = (is_last_word and not is_ll) or (r_tgt_rsp_key_sent.read() and is_ll); 7942 break; 7943 } 7944 7945 case TGT_RSP_WRITE: 7946 p_vci_tgt.rspval = true; 7947 if (((r_write_to_tgt_rsp_pktid.read() & 0x7) == TYPE_SC) and r_write_to_tgt_rsp_sc_fail.read()) 7948 p_vci_tgt.rdata = 1; 7949 else 7950 p_vci_tgt.rdata = 0; 7951 p_vci_tgt.rsrcid = r_write_to_tgt_rsp_srcid.read(); 7952 p_vci_tgt.rtrdid = r_write_to_tgt_rsp_trdid.read(); 7953 p_vci_tgt.rpktid = r_write_to_tgt_rsp_pktid.read(); 7954 p_vci_tgt.rerror = 0; 7955 p_vci_tgt.reop = true; 7956 break; 7957 7958 case TGT_RSP_CLEANUP: 7959 p_vci_tgt.rspval = true; 7960 p_vci_tgt.rdata = 0; 7961 p_vci_tgt.rsrcid = r_cleanup_to_tgt_rsp_srcid.read(); 7962 p_vci_tgt.rtrdid = r_cleanup_to_tgt_rsp_trdid.read(); 7963 p_vci_tgt.rpktid = r_cleanup_to_tgt_rsp_pktid.read(); 7964 p_vci_tgt.rerror = 0; // Can be a CAS rsp 7965 p_vci_tgt.reop = true; 7966 break; 7967 7968 case TGT_RSP_CAS: 7969 p_vci_tgt.rspval = true; 7970 p_vci_tgt.rdata = r_cas_to_tgt_rsp_data.read(); 7971 p_vci_tgt.rsrcid = r_cas_to_tgt_rsp_srcid.read(); 7972 p_vci_tgt.rtrdid = r_cas_to_tgt_rsp_trdid.read(); 7973 p_vci_tgt.rpktid = r_cas_to_tgt_rsp_pktid.read(); 7974 p_vci_tgt.rerror = 0; 7975 p_vci_tgt.reop = true; 7976 break; 7977 7978 case TGT_RSP_XRAM: 7979 { 7980 uint32_t last_word_idx = r_xram_rsp_to_tgt_rsp_word.read() + r_xram_rsp_to_tgt_rsp_length.read() - 1; 7981 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 7982 bool is_ll = ((r_xram_rsp_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 7983 bool is_error = r_xram_rsp_to_tgt_rsp_rerror.read(); 7984 7985 p_vci_tgt.rspval = true; 7986 7987 if (is_ll and not r_tgt_rsp_key_sent.read()) { 7988 // LL response first flit 7989 p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_ll_key.read(); 7990 } 7991 else { 7992 // LL response second flit or READ response 7993 p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 7994 } 7995 7996 p_vci_tgt.rsrcid = r_xram_rsp_to_tgt_rsp_srcid.read(); 7997 p_vci_tgt.rtrdid = r_xram_rsp_to_tgt_rsp_trdid.read(); 7998 p_vci_tgt.rpktid = r_xram_rsp_to_tgt_rsp_pktid.read(); 7999 p_vci_tgt.rerror = is_error; 8000 p_vci_tgt.reop = (((is_last_word or is_error) and not is_ll) or 8001 (r_tgt_rsp_key_sent.read() and is_ll)); 8002 break; 8003 } 8004 8005 case TGT_RSP_MULTI_ACK: 8006 p_vci_tgt.rspval = true; 8007 p_vci_tgt.rdata = 0; // Can be a CAS or SC rsp 8008 p_vci_tgt.rsrcid = r_multi_ack_to_tgt_rsp_srcid.read(); 8009 p_vci_tgt.rtrdid = r_multi_ack_to_tgt_rsp_trdid.read(); 8010 p_vci_tgt.rpktid = r_multi_ack_to_tgt_rsp_pktid.read(); 8011 p_vci_tgt.rerror = 0; 8012 p_vci_tgt.reop = true; 8013 break; 8014 } // end switch r_tgt_rsp_fsm 8015 8016 //////////////////////////////////////////////////////////////////// 8017 // p_dspin_m2p port (CC_SEND FSM) 8018 //////////////////////////////////////////////////////////////////// 8019 8020 p_dspin_m2p.write = false; 8021 p_dspin_m2p.eop = false; 8022 p_dspin_m2p.data = 0; 8023 8024 switch(r_cc_send_fsm.read()) 8025 { 8026 /////////////////////////// 8027 case CC_SEND_CONFIG_IDLE: 8028 case CC_SEND_XRAM_RSP_IDLE: 8029 case CC_SEND_WRITE_IDLE: 8030 case CC_SEND_CAS_IDLE: 8031 { 8032 break; 8033 } 8034 //////////////////////////////// 8035 case CC_SEND_CONFIG_INVAL_HEADER: 8036 { 8037 uint8_t multi_inval_type; 8038 if (m_config_to_cc_send_inst_fifo.read()) 8039 { 8040 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_INST; 8041 } 8042 else 8043 { 8044 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_DATA; 8045 } 8046 8047 uint64_t flit = 0; 8048 uint64_t dest = m_config_to_cc_send_srcid_fifo.read() << 8049 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 8050 8051 DspinDhccpParam::dspin_set( flit, 8052 dest, 8053 DspinDhccpParam::MULTI_INVAL_DEST); 8054 8055 DspinDhccpParam::dspin_set( flit, 8056 m_cc_global_id, 8057 DspinDhccpParam::MULTI_INVAL_SRCID); 8058 8059 DspinDhccpParam::dspin_set( flit, 8060 r_config_to_cc_send_trdid.read(), 8061 DspinDhccpParam::MULTI_INVAL_UPDT_INDEX); 8062 8063 DspinDhccpParam::dspin_set( flit, 8064 multi_inval_type, 8065 DspinDhccpParam::M2P_TYPE); 8066 p_dspin_m2p.write = true; 8067 p_dspin_m2p.data = flit; 8068 break; 8069 } 8070 //////////////////////////////// 8071 case CC_SEND_CONFIG_INVAL_NLINE: 8072 { 8073 uint64_t flit = 0; 8074 DspinDhccpParam::dspin_set( flit, 8075 r_config_to_cc_send_nline.read(), 8076 DspinDhccpParam::MULTI_INVAL_NLINE); 8077 p_dspin_m2p.eop = true; 8078 p_dspin_m2p.write = true; 8079 p_dspin_m2p.data = flit; 8080 break; 8081 } 8082 /////////////////////////////////// 8083 case CC_SEND_XRAM_RSP_INVAL_HEADER: 8084 { 8085 if (not m_xram_rsp_to_cc_send_inst_fifo.rok()) break; 8086 8087 uint8_t multi_inval_type; 8088 if (m_xram_rsp_to_cc_send_inst_fifo.read()) 8089 { 8090 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_INST; 8091 } 8092 else 8093 { 8094 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_DATA; 8095 } 8096 8097 uint64_t flit = 0; 8098 uint64_t dest = m_xram_rsp_to_cc_send_srcid_fifo.read() << 8099 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 8100 8101 DspinDhccpParam::dspin_set( flit, 8102 dest, 8103 DspinDhccpParam::MULTI_INVAL_DEST); 8104 8105 DspinDhccpParam::dspin_set( flit, 8106 m_cc_global_id, 8107 DspinDhccpParam::MULTI_INVAL_SRCID); 8108 8109 DspinDhccpParam::dspin_set( flit, 8110 r_xram_rsp_to_cc_send_trdid.read(), 8111 DspinDhccpParam::MULTI_INVAL_UPDT_INDEX); 8112 8113 DspinDhccpParam::dspin_set( flit, 8114 multi_inval_type, 8115 DspinDhccpParam::M2P_TYPE); 8116 p_dspin_m2p.write = true; 8117 p_dspin_m2p.data = flit; 8118 break; 8119 } 8120 8121 ////////////////////////////////// 8122 case CC_SEND_XRAM_RSP_INVAL_NLINE: 8123 { 8124 uint64_t flit = 0; 8125 8126 DspinDhccpParam::dspin_set( flit, 8127 r_xram_rsp_to_cc_send_nline.read(), 8128 DspinDhccpParam::MULTI_INVAL_NLINE); 8129 p_dspin_m2p.eop = true; 8130 p_dspin_m2p.write = true; 8131 p_dspin_m2p.data = flit; 8132 break; 8133 } 8134 8135 ///////////////////////////////////// 8136 case CC_SEND_CONFIG_BRDCAST_HEADER: 8137 case CC_SEND_XRAM_RSP_BRDCAST_HEADER: 8138 case CC_SEND_WRITE_BRDCAST_HEADER: 8139 case CC_SEND_CAS_BRDCAST_HEADER: 8140 { 8141 uint64_t flit = 0; 8142 8143 DspinDhccpParam::dspin_set( flit, 8144 m_broadcast_boundaries, 8145 DspinDhccpParam::BROADCAST_BOX); 8146 8147 DspinDhccpParam::dspin_set( flit, 8148 m_cc_global_id, 8149 DspinDhccpParam::BROADCAST_SRCID); 8150 8151 DspinDhccpParam::dspin_set( flit, 8152 1ULL, 8153 DspinDhccpParam::M2P_BC); 8154 p_dspin_m2p.write = true; 8155 p_dspin_m2p.data = flit; 8156 break; 8157 } 8158 //////////////////////////////////// 8159 case CC_SEND_XRAM_RSP_BRDCAST_NLINE: 8160 { 8161 uint64_t flit = 0; 8162 DspinDhccpParam::dspin_set( flit, 8163 r_xram_rsp_to_cc_send_nline.read(), 8164 DspinDhccpParam::BROADCAST_NLINE); 8165 p_dspin_m2p.write = true; 8166 p_dspin_m2p.eop = true; 8167 p_dspin_m2p.data = flit; 8168 break; 8169 } 8170 ////////////////////////////////// 8171 case CC_SEND_CONFIG_BRDCAST_NLINE: 8172 { 8173 uint64_t flit = 0; 8174 DspinDhccpParam::dspin_set( flit, 8175 r_config_to_cc_send_nline.read(), 8176 DspinDhccpParam::BROADCAST_NLINE); 8177 p_dspin_m2p.write = true; 8178 p_dspin_m2p.eop = true; 8179 p_dspin_m2p.data = flit; 8180 break; 8181 } 8182 ///////////////////////////////// 8183 case CC_SEND_WRITE_BRDCAST_NLINE: 8184 { 8185 uint64_t flit = 0; 8186 DspinDhccpParam::dspin_set( flit, 8187 r_write_to_cc_send_nline.read(), 8188 DspinDhccpParam::BROADCAST_NLINE); 8189 p_dspin_m2p.write = true; 8190 p_dspin_m2p.eop = true; 8191 p_dspin_m2p.data = flit; 8192 break; 8193 } 8194 /////////////////////////////// 8195 case CC_SEND_CAS_BRDCAST_NLINE: 8196 { 8197 uint64_t flit = 0; 8198 DspinDhccpParam::dspin_set( flit, 8199 r_cas_to_cc_send_nline.read(), 8200 DspinDhccpParam::BROADCAST_NLINE); 8201 p_dspin_m2p.write = true; 8202 p_dspin_m2p.eop = true; 8203 p_dspin_m2p.data = flit; 8204 break; 8205 } 8206 /////////////////////////////// 8207 case CC_SEND_WRITE_UPDT_HEADER: 8208 { 8209 if (not m_write_to_cc_send_inst_fifo.rok()) break; 8210 8211 uint8_t multi_updt_type; 8212 if (m_write_to_cc_send_inst_fifo.read()) 8213 { 8214 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_INST; 8215 } 8216 else 8217 { 8218 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_DATA; 8219 } 8220 8221 uint64_t flit = 0; 8222 uint64_t dest = 8223 m_write_to_cc_send_srcid_fifo.read() << 8224 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 8225 8226 DspinDhccpParam::dspin_set( 8227 flit, 8228 dest, 8229 DspinDhccpParam::MULTI_UPDT_DEST); 8230 8231 DspinDhccpParam::dspin_set( 8232 flit, 8233 m_cc_global_id, 8234 DspinDhccpParam::MULTI_UPDT_SRCID); 8235 8236 DspinDhccpParam::dspin_set( 8237 flit, 8238 r_write_to_cc_send_trdid.read(), 8239 DspinDhccpParam::MULTI_UPDT_UPDT_INDEX); 8240 8241 DspinDhccpParam::dspin_set( 8242 flit, 8243 multi_updt_type, 8244 DspinDhccpParam::M2P_TYPE); 8245 8246 p_dspin_m2p.write = true; 8247 p_dspin_m2p.data = flit; 8248 8249 break; 8250 } 8251 ////////////////////////////// 8252 case CC_SEND_WRITE_UPDT_NLINE: 8253 { 8254 uint64_t flit = 0; 8255 8256 DspinDhccpParam::dspin_set( 8257 flit, 8258 r_write_to_cc_send_index.read(), 8259 DspinDhccpParam::MULTI_UPDT_WORD_INDEX); 8260 8261 DspinDhccpParam::dspin_set( 8262 flit, 8263 r_write_to_cc_send_nline.read(), 8264 DspinDhccpParam::MULTI_UPDT_NLINE); 8265 8266 p_dspin_m2p.write = true; 8267 p_dspin_m2p.data = flit; 8268 8269 break; 8270 } 8271 ///////////////////////////// 8272 case CC_SEND_WRITE_UPDT_DATA: 8273 { 8274 8275 uint8_t multi_updt_cpt = 8276 r_cc_send_cpt.read() + r_write_to_cc_send_index.read(); 8277 8278 uint8_t multi_updt_be = r_write_to_cc_send_be[multi_updt_cpt].read(); 8279 uint32_t multi_updt_data = r_write_to_cc_send_data[multi_updt_cpt].read(); 8280 8281 uint64_t flit = 0; 8282 8283 DspinDhccpParam::dspin_set( 8284 flit, 8285 multi_updt_be, 8286 DspinDhccpParam::MULTI_UPDT_BE); 8287 8288 DspinDhccpParam::dspin_set( 8289 flit, 8290 multi_updt_data, 8291 DspinDhccpParam::MULTI_UPDT_DATA); 8292 8293 p_dspin_m2p.write = true; 8294 p_dspin_m2p.eop = (r_cc_send_cpt.read() == (r_write_to_cc_send_count.read()-1)); 8295 p_dspin_m2p.data = flit; 8296 8297 break; 8298 } 8299 //////////////////////////// 8300 case CC_SEND_CAS_UPDT_HEADER: 8301 { 8302 if (not m_cas_to_cc_send_inst_fifo.rok()) break; 8303 8304 uint8_t multi_updt_type; 8305 if (m_cas_to_cc_send_inst_fifo.read()) 8306 { 8307 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_INST; 8308 } 8309 else 8310 { 8311 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_DATA; 8312 } 8313 8314 uint64_t flit = 0; 8315 uint64_t dest = 8316 m_cas_to_cc_send_srcid_fifo.read() << 8317 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 8318 8319 DspinDhccpParam::dspin_set( 8320 flit, 8321 dest, 8322 DspinDhccpParam::MULTI_UPDT_DEST); 8323 8324 DspinDhccpParam::dspin_set( 8325 flit, 8326 m_cc_global_id, 8327 DspinDhccpParam::MULTI_UPDT_SRCID); 8328 8329 DspinDhccpParam::dspin_set( 8330 flit, 8331 r_cas_to_cc_send_trdid.read(), 8332 DspinDhccpParam::MULTI_UPDT_UPDT_INDEX); 8333 8334 DspinDhccpParam::dspin_set( 8335 flit, 8336 multi_updt_type, 8337 DspinDhccpParam::M2P_TYPE); 8338 8339 p_dspin_m2p.write = true; 8340 p_dspin_m2p.data = flit; 8341 8342 break; 8343 } 8344 //////////////////////////// 8345 case CC_SEND_CAS_UPDT_NLINE: 8346 { 8347 uint64_t flit = 0; 8348 8349 DspinDhccpParam::dspin_set( 8350 flit, 8351 r_cas_to_cc_send_index.read(), 8352 DspinDhccpParam::MULTI_UPDT_WORD_INDEX); 8353 8354 DspinDhccpParam::dspin_set( 8355 flit, 8356 r_cas_to_cc_send_nline.read(), 8357 DspinDhccpParam::MULTI_UPDT_NLINE); 8358 8359 p_dspin_m2p.write = true; 8360 p_dspin_m2p.data = flit; 8361 8362 break; 8363 } 8364 /////////////////////////// 8365 case CC_SEND_CAS_UPDT_DATA: 8366 { 8367 uint64_t flit = 0; 8368 8369 DspinDhccpParam::dspin_set( 8370 flit, 8371 0xF, 8372 DspinDhccpParam::MULTI_UPDT_BE); 8373 8374 DspinDhccpParam::dspin_set( 8375 flit, 8376 r_cas_to_cc_send_wdata.read(), 8377 DspinDhccpParam::MULTI_UPDT_DATA); 8378 8379 p_dspin_m2p.write = true; 8380 p_dspin_m2p.eop = not r_cas_to_cc_send_is_long.read(); 8381 p_dspin_m2p.data = flit; 8382 8383 break; 8384 } 8385 //////////////////////////////// 8386 case CC_SEND_CAS_UPDT_DATA_HIGH: 8387 { 8388 uint64_t flit = 0; 8389 8390 DspinDhccpParam::dspin_set( 8391 flit, 8392 0xF, 8393 DspinDhccpParam::MULTI_UPDT_BE); 8394 8395 DspinDhccpParam::dspin_set( 8396 flit, 8397 r_cas_to_cc_send_wdata_high.read(), 8398 DspinDhccpParam::MULTI_UPDT_DATA); 8399 8400 p_dspin_m2p.write = true; 8401 p_dspin_m2p.eop = true; 8402 p_dspin_m2p.data = flit; 8403 8404 break; 8405 } 8406 } 8407 8408 //////////////////////////////////////////////////////////////////// 8409 // p_dspin_clack port (CLEANUP FSM) 8410 //////////////////////////////////////////////////////////////////// 8411 8412 if (r_cleanup_fsm.read() == CLEANUP_SEND_CLACK) 8413 { 8414 uint8_t cleanup_ack_type; 8415 if (r_cleanup_inst.read()) 8416 { 8417 cleanup_ack_type = DspinDhccpParam::TYPE_CLACK_INST; 8418 } 8419 else 8420 { 8421 cleanup_ack_type = DspinDhccpParam::TYPE_CLACK_DATA; 8422 } 8423 8424 uint64_t flit = 0; 8425 uint64_t dest = r_cleanup_srcid.read() << 8426 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 8427 8428 DspinDhccpParam::dspin_set( 8429 flit, 8430 dest, 8431 DspinDhccpParam::CLACK_DEST); 8432 8433 DspinDhccpParam::dspin_set( 8434 flit, 8435 r_cleanup_nline.read(), 8436 DspinDhccpParam::CLACK_SET); 8437 8438 DspinDhccpParam::dspin_set( 8439 flit, 8440 r_cleanup_way_index.read(), 8441 DspinDhccpParam::CLACK_WAY); 8442 8443 DspinDhccpParam::dspin_set( 8444 flit, 8445 cleanup_ack_type, 8446 DspinDhccpParam::CLACK_TYPE); 8447 8448 p_dspin_clack.eop = true; 8449 p_dspin_clack.write = true; 8450 p_dspin_clack.data = flit; 3019 8451 } 3020 8452 else 3021 8453 { 3022 r_write_fsm = WRITE_IDLE; 8454 p_dspin_clack.write = false; 8455 p_dspin_clack.eop = false; 8456 p_dspin_clack.data = 0; 3023 8457 } 3024 8458 3025 #if DEBUG_MEMC_WRITE 3026 if(m_debug) 3027 { 3028 std::cout << " <MEMC " << name() << " WRITE_RSP> Post a request to TGT_RSP FSM" 3029 << " : rsrcid = " << std::hex << r_write_srcid.read() << std::endl; 3030 if(m_cmd_write_addr_fifo.rok()) 3031 { 3032 std::cout << " New Write request: " 3033 << " srcid = " << std::hex << m_cmd_write_srcid_fifo.read() 3034 << " / address = " << m_cmd_write_addr_fifo.read() 3035 << " / data = " << m_cmd_write_data_fifo.read() << std::endl; 3036 } 3037 } 3038 #endif 3039 } 3040 break; 3041 } 3042 3043 ///////////////////////// 3044 case WRITE_MISS_TRT_LOCK: // Miss : check Transaction Table 3045 { 3046 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3047 { 3048 3049 #if DEBUG_MEMC_WRITE 3050 if(m_debug) 3051 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_LOCK> Check the TRT" << std::endl; 3052 #endif 3053 size_t hit_index = 0; 3054 size_t wok_index = 0; 3055 addr_t addr = (addr_t) r_write_address.read(); 3056 bool hit_read = m_trt.hit_read(m_nline[addr], hit_index); 3057 bool hit_write = m_trt.hit_write(m_nline[addr]); 3058 bool wok = not m_trt.full(wok_index); 3059 3060 if(hit_read) // register the modified data in TRT 8459 /////////////////////////////////////////////////////////////////// 8460 // p_dspin_p2m port (CC_RECEIVE FSM) 8461 /////////////////////////////////////////////////////////////////// 8462 // 8463 switch(r_cc_receive_fsm.read()) 3061 8464 { 3062 r_write_trt_index = hit_index; 3063 r_write_fsm = WRITE_MISS_TRT_DATA; 3064 m_cpt_write_miss++; 8465 case CC_RECEIVE_IDLE: 8466 { 8467 p_dspin_p2m.read = false; 8468 break; 8469 } 8470 case CC_RECEIVE_CLEANUP: 8471 case CC_RECEIVE_CLEANUP_EOP: 8472 { 8473 p_dspin_p2m.read = m_cc_receive_to_cleanup_fifo.wok(); 8474 break; 8475 } 8476 case CC_RECEIVE_MULTI_ACK: 8477 { 8478 p_dspin_p2m.read = m_cc_receive_to_multi_ack_fifo.wok(); 8479 break; 8480 } 3065 8481 } 3066 else if(wok and !hit_write) // set a new entry in TRT 3067 { 3068 r_write_trt_index = wok_index; 3069 r_write_fsm = WRITE_MISS_TRT_SET; 3070 m_cpt_write_miss++; 3071 } 3072 else // wait an empty entry in TRT 3073 { 3074 r_write_fsm = WRITE_WAIT; 3075 m_cpt_trt_full++; 3076 } 3077 } 3078 break; 3079 } 3080 3081 //////////////// 3082 case WRITE_WAIT: // release the locks protecting the shared ressources 3083 { 3084 3085 #if DEBUG_MEMC_WRITE 3086 if(m_debug) 3087 std::cout << " <MEMC " << name() << " WRITE_WAIT> Releases the locks before retry" << std::endl; 3088 #endif 3089 r_write_fsm = WRITE_DIR_REQ; 3090 break; 3091 } 3092 3093 //////////////////////// 3094 case WRITE_MISS_TRT_SET: // register a new transaction in TRT (Write Buffer) 3095 { 3096 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3097 { 3098 std::vector<be_t> be_vector; 3099 std::vector<data_t> data_vector; 3100 be_vector.clear(); 3101 data_vector.clear(); 3102 for(size_t i=0; i<m_words; i++) 3103 { 3104 be_vector.push_back(r_write_be[i]); 3105 data_vector.push_back(r_write_data[i]); 3106 } 3107 m_trt.set(r_write_trt_index.read(), 3108 true, // read request to XRAM 3109 m_nline[(addr_t)(r_write_address.read())], 3110 r_write_srcid.read(), 3111 r_write_trdid.read(), 3112 r_write_pktid.read(), 3113 false, // not a processor read 3114 0, // not a single word 3115 0, // word index 3116 be_vector, 3117 data_vector); 3118 r_write_fsm = WRITE_MISS_XRAM_REQ; 3119 3120 #if DEBUG_MEMC_WRITE 3121 if(m_debug) 3122 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_SET> Set a new entry in TRT" << std::endl; 3123 #endif 3124 } 3125 break; 3126 } 3127 3128 ///////////////////////// 3129 case WRITE_MISS_TRT_DATA: // update an entry in TRT (used as a Write Buffer) 3130 { 3131 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3132 { 3133 std::vector<be_t> be_vector; 3134 std::vector<data_t> data_vector; 3135 be_vector.clear(); 3136 data_vector.clear(); 3137 for(size_t i=0; i<m_words; i++) 3138 { 3139 be_vector.push_back(r_write_be[i]); 3140 data_vector.push_back(r_write_data[i]); 3141 } 3142 m_trt.write_data_mask( r_write_trt_index.read(), 3143 be_vector, 3144 data_vector ); 3145 r_write_fsm = WRITE_RSP; 3146 3147 #if DEBUG_MEMC_WRITE 3148 if(m_debug) 3149 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_DATA> Modify an existing entry in TRT" << std::endl; 3150 #endif 3151 } 3152 break; 3153 } 3154 ///////////////////////// 3155 case WRITE_MISS_XRAM_REQ: // send a GET request to IXR_CMD FSM 3156 { 3157 if( not r_write_to_ixr_cmd_req.read() ) 3158 { 3159 r_write_to_ixr_cmd_req = true; 3160 r_write_to_ixr_cmd_put = false; 3161 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 3162 r_write_fsm = WRITE_RSP; 3163 3164 #if DEBUG_MEMC_WRITE 3165 if(m_debug) 3166 std::cout << " <MEMC " << name() << " WRITE_MISS_XRAM_REQ> Post a GET request to the IXR_CMD FSM" << std::endl; 3167 #endif 3168 } 3169 break; 3170 } 3171 /////////////////////// 3172 case WRITE_BC_DIR_READ: // enter this state if a broadcast-inval is required 3173 // the cache line must be erased in mem-cache, and written 3174 // into XRAM. we read the cache and complete the buffer 3175 { 3176 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3177 "MEMC ERROR in WRITE_BC_DIR_READ state: Bad DIR allocation"); 3178 3179 // update local buffer 3180 size_t set = m_y[(addr_t)(r_write_address.read())]; 3181 size_t way = r_write_way.read(); 3182 for(size_t word=0 ; word<m_words ; word++) 3183 { 3184 data_t mask = 0; 3185 if(r_write_be[word].read() & 0x1) mask = mask | 0x000000FF; 3186 if(r_write_be[word].read() & 0x2) mask = mask | 0x0000FF00; 3187 if(r_write_be[word].read() & 0x4) mask = mask | 0x00FF0000; 3188 if(r_write_be[word].read() & 0x8) mask = mask | 0xFF000000; 3189 3190 // complete only if mask is not null (for energy consumption) 3191 r_write_data[word] = (r_write_data[word].read() & mask) | 3192 (m_cache_data.read(way, set, word) & ~mask); 3193 } // end for 3194 3195 r_write_fsm = WRITE_BC_TRT_LOCK; 3196 3197 #if DEBUG_MEMC_WRITE 3198 if(m_debug) 3199 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_READ>" 3200 << " Read the cache to complete local buffer" << std::endl; 3201 #endif 3202 break; 3203 } 3204 /////////////////////// 3205 case WRITE_BC_TRT_LOCK: // get TRT lock to check TRT not full 3206 { 3207 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3208 "MEMC ERROR in WRITE_BC_TRT_LOCK state: Bad DIR allocation"); 3209 3210 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3211 { 3212 size_t wok_index = 0; 3213 bool wok = not m_trt.full(wok_index); 3214 if( wok ) 3215 { 3216 r_write_trt_index = wok_index; 3217 r_write_fsm = WRITE_BC_IVT_LOCK; 3218 } 3219 else // wait an empty slot in TRT 3220 { 3221 r_write_fsm = WRITE_WAIT; 3222 } 3223 3224 #if DEBUG_MEMC_WRITE 3225 if(m_debug) 3226 std::cout << " <MEMC " << name() << " WRITE_BC_TRT_LOCK> Check TRT" 3227 << " : wok = " << wok << " / index = " << wok_index << std::endl; 3228 #endif 3229 } 3230 break; 3231 } 3232 ////////////////////// 3233 case WRITE_BC_IVT_LOCK: // get IVT lock and register BC transaction in IVT 3234 { 3235 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3236 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad DIR allocation"); 3237 3238 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 3239 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad TRT allocation"); 3240 3241 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 3242 { 3243 bool wok = false; 3244 size_t index = 0; 3245 size_t srcid = r_write_srcid.read(); 3246 size_t trdid = r_write_trdid.read(); 3247 size_t pktid = r_write_pktid.read(); 3248 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 3249 size_t nb_copies = r_write_count.read(); 3250 3251 wok = m_ivt.set(false, // it's an inval transaction 3252 true, // it's a broadcast 3253 true, // response required 3254 false, // no acknowledge required 3255 srcid, 3256 trdid, 3257 pktid, 3258 nline, 3259 nb_copies, 3260 index); 3261 #if DEBUG_MEMC_WRITE 3262 if( m_debug and wok ) 3263 std::cout << " <MEMC " << name() << " WRITE_BC_IVT_LOCK> Register broadcast inval in IVT" 3264 << " / nb_copies = " << r_write_count.read() << std::endl; 3265 #endif 3266 r_write_upt_index = index; 3267 3268 if( wok ) r_write_fsm = WRITE_BC_DIR_INVAL; 3269 else r_write_fsm = WRITE_WAIT; 3270 } 3271 break; 3272 } 3273 //////////////////////// 3274 case WRITE_BC_DIR_INVAL: // Register a put transaction in TRT 3275 // and invalidate the line in directory 3276 { 3277 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3278 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad DIR allocation"); 3279 3280 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 3281 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad TRT allocation"); 3282 3283 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) and 3284 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad IVT allocation"); 3285 3286 // register PUT request in TRT 3287 std::vector<data_t> data_vector; 3288 data_vector.clear(); 3289 for(size_t i=0; i<m_words; i++) data_vector.push_back(r_write_data[i].read()); 3290 m_trt.set( r_write_trt_index.read(), 3291 false, // PUT request 3292 m_nline[(addr_t)(r_write_address.read())], 3293 0, // unused 3294 0, // unused 3295 0, // unused 3296 false, // not a processor read 3297 0, // unused 3298 0, // unused 3299 std::vector<be_t> (m_words,0), 3300 data_vector ); 3301 3302 // invalidate directory entry 3303 DirectoryEntry entry; 3304 entry.valid = false; 3305 entry.dirty = false; 3306 entry.tag = 0; 3307 entry.is_cnt = false; 3308 entry.lock = false; 3309 entry.owner.srcid = 0; 3310 entry.owner.inst = false; 3311 entry.ptr = 0; 3312 entry.count = 0; 3313 size_t set = m_y[(addr_t)(r_write_address.read())]; 3314 size_t way = r_write_way.read(); 3315 3316 m_cache_directory.write(set, way, entry); 3317 3318 #if DEBUG_MEMC_WRITE 3319 if(m_debug) 3320 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_INVAL> Inval DIR and register in TRT:" 3321 << " address = " << r_write_address.read() << std::endl; 3322 #endif 3323 r_write_fsm = WRITE_BC_CC_SEND; 3324 break; 3325 } 3326 3327 ////////////////////// 3328 case WRITE_BC_CC_SEND: // Post a coherence broadcast request to CC_SEND FSM 3329 { 3330 if(!r_write_to_cc_send_multi_req.read() and !r_write_to_cc_send_brdcast_req.read()) 3331 { 3332 r_write_to_cc_send_multi_req = false; 3333 r_write_to_cc_send_brdcast_req = true; 3334 r_write_to_cc_send_trdid = r_write_upt_index.read(); 3335 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())]; 3336 r_write_to_cc_send_index = 0; 3337 r_write_to_cc_send_count = 0; 3338 3339 for(size_t i=0; i<m_words ; i++) // Ã quoi sert ce for? (AG) 3340 { 3341 r_write_to_cc_send_be[i]=0; 3342 r_write_to_cc_send_data[i] = 0; 3343 } 3344 r_write_fsm = WRITE_BC_XRAM_REQ; 3345 3346 #if DEBUG_MEMC_WRITE 3347 if(m_debug) 3348 std::cout << " <MEMC " << name() 3349 << " WRITE_BC_CC_SEND> Post a broadcast request to CC_SEND FSM" << std::endl; 3350 #endif 3351 } 3352 break; 3353 } 3354 3355 /////////////////////// 3356 case WRITE_BC_XRAM_REQ: // Post a PUT request to IXR_CMD FSM 3357 { 3358 if( not r_write_to_ixr_cmd_req.read() ) 3359 { 3360 r_write_to_ixr_cmd_req = true; 3361 r_write_to_ixr_cmd_put = true; 3362 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 3363 r_write_fsm = WRITE_IDLE; 3364 3365 #if DEBUG_MEMC_WRITE 3366 if(m_debug) 3367 std::cout << " <MEMC " << name() 3368 << " WRITE_BC_XRAM_REQ> Post a put request to IXR_CMD FSM" << std::endl; 3369 #endif 3370 } 3371 break; 3372 } 3373 } // end switch r_write_fsm 3374 3375 /////////////////////////////////////////////////////////////////////// 3376 // IXR_CMD FSM 3377 /////////////////////////////////////////////////////////////////////// 3378 // The IXR_CMD fsm controls the command packets to the XRAM : 3379 // It handles requests from 5 FSMs with a round-robin priority: 3380 // READ > WRITE > CAS > XRAM_RSP > CONFIG 3381 // 3382 // - It sends a single flit VCI read to the XRAM in case of 3383 // GET request posted by the READ, WRITE or CAS FSMs. 3384 // - It sends a multi-flit VCI write in case of PUT request posted by 3385 // the XRAM_RSP, WRITE, CAS, or CONFIG FSMs. 3386 // 3387 // For each client, there is three steps: 3388 // - IXR_CMD_*_IDLE : round-robin allocation to a client 3389 // - IXR_CMD_*_TRT : access to TRT for address and data 3390 // - IXR_CMD_*_SEND : send the PUT or GET VCI command 3391 // 3392 // The address and data to be written (for a PUT) are stored in TRT. 3393 // The trdid field contains always the TRT entry index. 3394 //////////////////////////////////////////////////////////////////////// 3395 3396 //std::cout << std::endl << "ixr_cmd_fsm" << std::endl; 3397 3398 switch(r_ixr_cmd_fsm.read()) 3399 { 3400 /////////////////////// 3401 case IXR_CMD_READ_IDLE: 3402 { 3403 if (r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3404 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3405 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3406 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3407 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3408 break; 3409 } 3410 //////////////////////// 3411 case IXR_CMD_WRITE_IDLE: 3412 { 3413 if (r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3414 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3415 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3416 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3417 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3418 break; 3419 } 3420 ////////////////////// 3421 case IXR_CMD_CAS_IDLE: 3422 { 3423 if (r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3424 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3425 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3426 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3427 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3428 break; 3429 } 3430 /////////////////////// 3431 case IXR_CMD_XRAM_IDLE: 3432 { 3433 if (r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3434 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3435 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3436 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3437 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3438 break; 3439 } 3440 ///////////////////////// 3441 case IXR_CMD_CONFIG_IDLE: 3442 { 3443 if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3444 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3445 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3446 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3447 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3448 break; 3449 } 3450 3451 ////////////////////// 3452 case IXR_CMD_READ_TRT: // access TRT for a GET 3453 { 3454 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3455 { 3456 TransactionTabEntry entry = m_trt.read( r_read_to_ixr_cmd_index.read() ); 3457 r_ixr_cmd_address = entry.nline * (m_words<<2); 3458 r_ixr_cmd_trdid = r_read_to_ixr_cmd_index.read(); 3459 r_ixr_cmd_get = true; 3460 r_ixr_cmd_word = 0; 3461 r_ixr_cmd_fsm = IXR_CMD_READ_SEND; 3462 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3463 3464 #if DEBUG_MEMC_IXR_CMD 3465 if(m_debug) 3466 std::cout << " <MEMC " << name() << " IXR_CMD_READ_TRT> TRT access" 3467 << " index = " << std::dec << r_read_to_ixr_cmd_index.read() 3468 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3469 #endif 3470 } 3471 break; 3472 } 3473 /////////////////////// 3474 case IXR_CMD_WRITE_TRT: // access TRT for a PUT or a GET 3475 { 3476 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3477 { 3478 TransactionTabEntry entry = m_trt.read( r_write_to_ixr_cmd_index.read() ); 3479 r_ixr_cmd_address = entry.nline * (m_words<<2); 3480 r_ixr_cmd_trdid = r_write_to_ixr_cmd_index.read(); 3481 r_ixr_cmd_get = entry.xram_read; 3482 r_ixr_cmd_word = 0; 3483 r_ixr_cmd_fsm = IXR_CMD_WRITE_SEND; 3484 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3485 3486 #if DEBUG_MEMC_IXR_CMD 3487 if(m_debug) 3488 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_TRT> TRT access" 3489 << " index = " << std::dec << r_write_to_ixr_cmd_index.read() 3490 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3491 #endif 3492 } 3493 break; 3494 } 3495 ///////////////////// 3496 case IXR_CMD_CAS_TRT: // access TRT for a PUT or a GET 3497 { 3498 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3499 { 3500 TransactionTabEntry entry = m_trt.read( r_cas_to_ixr_cmd_index.read() ); 3501 r_ixr_cmd_address = entry.nline * (m_words<<2); 3502 r_ixr_cmd_trdid = r_cas_to_ixr_cmd_index.read(); 3503 r_ixr_cmd_get = entry.xram_read; 3504 r_ixr_cmd_word = 0; 3505 r_ixr_cmd_fsm = IXR_CMD_CAS_SEND; 3506 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3507 3508 #if DEBUG_MEMC_IXR_CMD 3509 if(m_debug) 3510 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_TRT> TRT access" 3511 << " index = " << std::dec << r_cas_to_ixr_cmd_index.read() 3512 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3513 #endif 3514 } 3515 break; 3516 } 3517 ////////////////////// 3518 case IXR_CMD_XRAM_TRT: // access TRT for a PUT 3519 { 3520 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3521 { 3522 TransactionTabEntry entry = m_trt.read( r_xram_rsp_to_ixr_cmd_index.read() ); 3523 r_ixr_cmd_address = entry.nline * (m_words<<2); 3524 r_ixr_cmd_trdid = r_xram_rsp_to_ixr_cmd_index.read(); 3525 r_ixr_cmd_get = false; 3526 r_ixr_cmd_word = 0; 3527 r_ixr_cmd_fsm = IXR_CMD_XRAM_SEND; 3528 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3529 3530 #if DEBUG_MEMC_IXR_CMD 3531 if(m_debug) 3532 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_TRT> TRT access" 3533 << " index = " << std::dec << r_xram_rsp_to_ixr_cmd_index.read() 3534 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3535 #endif 3536 } 3537 break; 3538 } 3539 //////////////////////// 3540 case IXR_CMD_CONFIG_TRT: // access TRT for a PUT 3541 { 3542 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3543 { 3544 TransactionTabEntry entry = m_trt.read( r_config_to_ixr_cmd_index.read() ); 3545 r_ixr_cmd_address = entry.nline * (m_words<<2); 3546 r_ixr_cmd_trdid = r_config_to_ixr_cmd_index.read(); 3547 r_ixr_cmd_get = false; 3548 r_ixr_cmd_word = 0; 3549 r_ixr_cmd_fsm = IXR_CMD_CONFIG_SEND; 3550 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3551 3552 #if DEBUG_MEMC_IXR_CMD 3553 if(m_debug) 3554 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_TRT> TRT access" 3555 << " index = " << std::dec << r_config_to_ixr_cmd_index.read() 3556 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3557 #endif 3558 } 3559 break; 3560 } 3561 3562 /////////////////////// 3563 case IXR_CMD_READ_SEND: // send a get from READ FSM 3564 { 3565 if(p_vci_ixr.cmdack) 3566 { 3567 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 3568 r_read_to_ixr_cmd_req = false; 3569 3570 #if DEBUG_MEMC_IXR_CMD 3571 if(m_debug) 3572 std::cout << " <MEMC " << name() << " IXR_CMD_READ_SEND> GET request:" << std::hex 3573 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3574 #endif 3575 } 3576 break; 3577 } 3578 //////////////////////// 3579 case IXR_CMD_WRITE_SEND: // send a put or get from WRITE FSM 3580 { 3581 if(p_vci_ixr.cmdack) 3582 { 3583 if(r_write_to_ixr_cmd_put.read()) // PUT 3584 { 3585 if(r_ixr_cmd_word.read() == (m_words - 2)) 3586 { 3587 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3588 r_write_to_ixr_cmd_req = false; 3589 } 3590 else 3591 { 3592 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3593 } 3594 3595 #if DEBUG_MEMC_IXR_CMD 3596 if(m_debug) 3597 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> PUT request:" << std::hex 3598 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3599 #endif 3600 } 3601 else // GET 3602 { 3603 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3604 r_write_to_ixr_cmd_req = false; 3605 3606 #if DEBUG_MEMC_IXR_CMD 3607 if(m_debug) 3608 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> GET request:" << std::hex 3609 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3610 #endif 3611 } 3612 } 3613 break; 3614 } 3615 ////////////////////// 3616 case IXR_CMD_CAS_SEND: // send a put or get command from CAS FSM 3617 { 3618 if(p_vci_ixr.cmdack) 3619 { 3620 if(r_cas_to_ixr_cmd_put.read()) // PUT 3621 { 3622 if(r_ixr_cmd_word.read() == (m_words - 2)) 3623 { 3624 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3625 r_cas_to_ixr_cmd_req = false; 3626 } 3627 else 3628 { 3629 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3630 } 3631 3632 #if DEBUG_MEMC_IXR_CMD 3633 if(m_debug) 3634 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> PUT request:" << std::hex 3635 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3636 #endif 3637 } 3638 else // GET 3639 { 3640 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3641 r_cas_to_ixr_cmd_req = false; 3642 3643 #if DEBUG_MEMC_IXR_CMD 3644 if(m_debug) 3645 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> GET request:" << std::hex 3646 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3647 #endif 3648 } 3649 } 3650 break; 3651 } 3652 /////////////////////// 3653 case IXR_CMD_XRAM_SEND: // send a put from XRAM_RSP FSM 3654 { 3655 if(p_vci_ixr.cmdack.read()) 3656 { 3657 if(r_ixr_cmd_word.read() == (m_words - 2)) 3658 { 3659 r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE; 3660 r_xram_rsp_to_ixr_cmd_req = false; 3661 } 3662 else 3663 { 3664 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3665 } 3666 3667 #if DEBUG_MEMC_IXR_CMD 3668 if(m_debug) 3669 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_SEND> PUT request:" << std::hex 3670 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3671 #endif 3672 } 3673 break; 3674 } 3675 ///////////////////////// 3676 case IXR_CMD_CONFIG_SEND: // send a put from CONFIG FSM 3677 { 3678 if(p_vci_ixr.cmdack.read()) 3679 { 3680 if(r_ixr_cmd_word.read() == (m_words - 2)) 3681 { 3682 r_ixr_cmd_fsm = IXR_CMD_CONFIG_IDLE; 3683 r_config_to_ixr_cmd_req = false; 3684 } 3685 else 3686 { 3687 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3688 } 3689 3690 #if DEBUG_MEMC_IXR_CMD 3691 if(m_debug) 3692 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_SEND> PUT request:" << std::hex 3693 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3694 #endif 3695 } 3696 break; 3697 } 3698 } // end switch r_ixr_cmd_fsm 3699 3700 //////////////////////////////////////////////////////////////////////////// 3701 // IXR_RSP FSM 3702 //////////////////////////////////////////////////////////////////////////// 3703 // The IXR_RSP FSM receives the response packets from the XRAM, 3704 // for both PUT transaction, and GET transaction. 3705 // 3706 // - A response to a PUT request is a single-cell VCI packet. 3707 // The TRT index is contained in the RTRDID field. 3708 // The FSM takes the lock protecting the TRT, and the corresponding 3709 // entry is erased. If an acknowledge was required (in case of software SYNC) 3710 // the r_config_rsp_lines counter is decremented. 3711 // 3712 // - A response to a GET request is a multi-cell VCI packet. 3713 // The TRT index is contained in the RTRDID field. 3714 // The N cells contain the N words of the cache line in the RDATA field. 3715 // The FSM takes the lock protecting the TRT to store the line in the TRT 3716 // (taking into account the write requests already stored in the TRT). 3717 // When the line is completely written, the r_ixr_rsp_to_xram_rsp_rok[index] 3718 // signal is set to inform the XRAM_RSP FSM. 3719 /////////////////////////////////////////////////////////////////////////////// 3720 3721 //std::cout << std::endl << "ixr_rsp_fsm" << std::endl; 3722 3723 switch(r_ixr_rsp_fsm.read()) 3724 { 3725 ////////////////// 3726 case IXR_RSP_IDLE: // test transaction type: PUT/GET 3727 { 3728 if(p_vci_ixr.rspval.read()) 3729 { 3730 r_ixr_rsp_cpt = 0; 3731 r_ixr_rsp_trt_index = p_vci_ixr.rtrdid.read(); 3732 3733 assert( ((p_vci_ixr.rerror.read() & 0x1) == 0) and 3734 "MEMC ERROR in IXR_RSP state: XRAM response error !"); 3735 3736 if(p_vci_ixr.reop.read()) // PUT 3737 { 3738 r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 3739 3740 #if DEBUG_MEMC_IXR_RSP 3741 if(m_debug) 3742 std::cout << " <MEMC " << name() 3743 << " IXR_RSP_IDLE> Response from XRAM to a put transaction" << std::endl; 3744 #endif 3745 } 3746 else // GET 3747 { 3748 r_ixr_rsp_fsm = IXR_RSP_TRT_READ; 3749 3750 #if DEBUG_MEMC_IXR_RSP 3751 if(m_debug) 3752 std::cout << " <MEMC " << name() 3753 << " IXR_RSP_IDLE> Response from XRAM to a get transaction" << std::endl; 3754 #endif 3755 } 3756 } 3757 break; 3758 } 3759 //////////////////////// 3760 case IXR_RSP_TRT_ERASE: // erase the entry in the TRT 3761 // decrease the line counter if config request 3762 { 3763 if(r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) 3764 { 3765 size_t index = r_ixr_rsp_trt_index.read(); 3766 if (m_trt.is_config(index) ) r_config_rsp_lines = r_config_rsp_lines.read() - 1; 3767 m_trt.erase(index); 3768 r_ixr_rsp_fsm = IXR_RSP_IDLE; 3769 3770 #if DEBUG_MEMC_IXR_RSP 3771 if(m_debug) 3772 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_ERASE> Erase TRT entry " 3773 << r_ixr_rsp_trt_index.read() << std::endl; 3774 #endif 3775 } 3776 break; 3777 } 3778 ////////////////////// 3779 case IXR_RSP_TRT_READ: // write a 64 bits data word in TRT 3780 { 3781 if((r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) and p_vci_ixr.rspval) 3782 { 3783 size_t index = r_ixr_rsp_trt_index.read(); 3784 size_t word = r_ixr_rsp_cpt.read(); 3785 bool eop = p_vci_ixr.reop.read(); 3786 wide_data_t data = p_vci_ixr.rdata.read(); 3787 bool error = ((p_vci_ixr.rerror.read() & 0x1) == 1); 3788 3789 assert(((eop == (word == (m_words-2))) or error) and 3790 "MEMC ERROR in IXR_RSP_TRT_READ state : invalid response from XRAM"); 3791 3792 m_trt.write_rsp( index, 3793 word, 3794 data ); 3795 3796 r_ixr_rsp_cpt = word + 2; 3797 3798 if( eop ) 3799 { 3800 r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()] = true; 3801 r_ixr_rsp_fsm = IXR_RSP_IDLE; 3802 } 3803 3804 #if DEBUG_MEMC_IXR_RSP 3805 if(m_debug) 3806 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_READ> Writing 2 words in TRT : " 3807 << " index = " << std::dec << index 3808 << " / word = " << word 3809 << " / data = " << std::hex << data << std::endl; 3810 #endif 3811 } 3812 break; 3813 } 3814 } // end swich r_ixr_rsp_fsm 3815 3816 //////////////////////////////////////////////////////////////////////////// 3817 // XRAM_RSP FSM 3818 //////////////////////////////////////////////////////////////////////////// 3819 // The XRAM_RSP FSM handles the incoming cache lines after an XRAM GET. 3820 // The cache line has been written in the TRT by the IXR_CMD_FSM. 3821 // As the IXR_RSP FSM and the XRAM_RSP FSM are running in parallel, 3822 // there is as many flip-flops r_ixr_rsp_to_xram_rsp_rok[i] as the number 3823 // of entries in the TRT, that are handled with a round-robin priority... 3824 // 3825 // The FSM takes the lock protecting TRT, and the lock protecting DIR. 3826 // The selected TRT entry is copied in the local buffer r_xram_rsp_trt_buf. 3827 // It selects a cache slot and save the victim line in another local buffer 3828 // r_xram_rsp_victim_***. 3829 // It writes the line extracted from TRT in the cache. 3830 // If it was a read MISS, the XRAM_RSP FSM send a request to the TGT_RSP 3831 // FSM to return the cache line to the registered processor. 3832 // If there is no empty slot, a victim line is evicted, and 3833 // invalidate requests are sent to the L1 caches containing copies. 3834 // If this line is dirty, the XRAM_RSP FSM send a request to the IXR_CMD 3835 // FSM to save the victim line to the XRAM, and register the write transaction 3836 // in the TRT (using the entry previously used by the read transaction). 3837 /////////////////////////////////////////////////////////////////////////////// 3838 3839 //std::cout << std::endl << "xram_rsp_fsm" << std::endl; 3840 3841 switch(r_xram_rsp_fsm.read()) 3842 { 3843 /////////////////// 3844 case XRAM_RSP_IDLE: // scan the XRAM responses / select a TRT index (round robin) 3845 { 3846 size_t old = r_xram_rsp_trt_index.read(); 3847 size_t lines = m_trt_lines; 3848 for(size_t i=0 ; i<lines ; i++) 3849 { 3850 size_t index = (i+old+1) %lines; 3851 if(r_ixr_rsp_to_xram_rsp_rok[index]) 3852 { 3853 r_xram_rsp_trt_index = index; 3854 r_ixr_rsp_to_xram_rsp_rok[index] = false; 3855 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 3856 3857 #if DEBUG_MEMC_XRAM_RSP 3858 if(m_debug) 3859 std::cout << " <MEMC " << name() << " XRAM_RSP_IDLE>" 3860 << " Available cache line in TRT:" 3861 << " index = " << std::dec << index << std::endl; 3862 #endif 3863 break; 3864 } 3865 } 3866 break; 3867 } 3868 /////////////////////// 3869 case XRAM_RSP_DIR_LOCK: // Takes the DIR lock and the TRT lock 3870 // Copy the TRT entry in a local buffer 3871 { 3872 if( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 3873 (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) ) 3874 { 3875 // copy the TRT entry in the r_xram_rsp_trt_buf local buffer 3876 size_t index = r_xram_rsp_trt_index.read(); 3877 r_xram_rsp_trt_buf.copy( m_trt.read(index) ); 3878 r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; 3879 3880 #if DEBUG_MEMC_XRAM_RSP 3881 if(m_debug) 3882 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_LOCK>" 3883 << " Get access to DIR and TRT" << std::endl; 3884 #endif 3885 } 3886 break; 3887 } 3888 /////////////////////// 3889 case XRAM_RSP_TRT_COPY: // Select a victim cache line 3890 // and copy it in a local buffer 3891 { 3892 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 3893 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad DIR allocation"); 3894 3895 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 3896 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad TRT allocation"); 3897 3898 // selects & extracts a victim line from cache 3899 size_t way = 0; 3900 size_t set = m_y[(addr_t)(r_xram_rsp_trt_buf.nline * m_words * 4)]; 3901 3902 DirectoryEntry victim(m_cache_directory.select(set, way)); 3903 3904 bool inval = (victim.count and victim.valid) ; 3905 3906 // copy the victim line in a local buffer (both data dir) 3907 m_cache_data.read_line(way, set, r_xram_rsp_victim_data); 3908 3909 r_xram_rsp_victim_copy = victim.owner.srcid; 3910 r_xram_rsp_victim_copy_inst = victim.owner.inst; 3911 r_xram_rsp_victim_count = victim.count; 3912 r_xram_rsp_victim_ptr = victim.ptr; 3913 r_xram_rsp_victim_way = way; 3914 r_xram_rsp_victim_set = set; 3915 r_xram_rsp_victim_nline = (addr_t)victim.tag*m_sets + set; 3916 r_xram_rsp_victim_is_cnt = victim.is_cnt; 3917 r_xram_rsp_victim_inval = inval ; 3918 r_xram_rsp_victim_dirty = victim.dirty; 3919 3920 if( not r_xram_rsp_trt_buf.rerror ) r_xram_rsp_fsm = XRAM_RSP_IVT_LOCK; 3921 else r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 3922 3923 #if DEBUG_MEMC_XRAM_RSP 3924 if(m_debug) 3925 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_COPY>" 3926 << " Select a victim slot: " 3927 << " way = " << std::dec << way 3928 << " / set = " << set 3929 << " / inval_required = " << inval << std::endl; 3930 #endif 3931 break; 3932 } 3933 /////////////////////// 3934 case XRAM_RSP_IVT_LOCK: // Keep DIR and TRT locks and take the IVT lock 3935 // to check a possible pending inval 3936 { 3937 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 3938 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad DIR allocation"); 3939 3940 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 3941 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad TRT allocation"); 3942 3943 if(r_alloc_ivt_fsm == ALLOC_IVT_XRAM_RSP) 3944 { 3945 size_t index = 0; 3946 if(m_ivt.search_inval(r_xram_rsp_trt_buf.nline, index)) // pending inval 3947 { 3948 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 3949 3950 #if DEBUG_MEMC_XRAM_RSP 3951 if(m_debug) 3952 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 3953 << " Get acces to IVT, but line invalidation registered" 3954 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline*m_words*4 3955 << " / index = " << std::dec << index << std::endl; 3956 #endif 3957 3958 } 3959 else if(m_ivt.is_full() and r_xram_rsp_victim_inval.read()) // IVT full 3960 { 3961 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 3962 3963 #if DEBUG_MEMC_XRAM_RSP 3964 if(m_debug) 3965 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 3966 << " Get acces to IVT, but inval required and IVT full" << std::endl; 3967 #endif 3968 } 3969 else 3970 { 3971 r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; 3972 3973 #if DEBUG_MEMC_XRAM_RSP 3974 if(m_debug) 3975 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 3976 << " Get acces to IVT / no pending inval request" << std::endl; 3977 #endif 3978 } 3979 } 3980 break; 3981 } 3982 ///////////////////////// 3983 case XRAM_RSP_INVAL_WAIT: // release all locks and returns to DIR_LOCK to retry 3984 { 3985 3986 #if DEBUG_MEMC_XRAM_RSP 3987 if(m_debug) 3988 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL_WAIT>" 3989 << " Release all locks and retry" << std::endl; 3990 #endif 3991 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 3992 break; 3993 } 3994 /////////////////////// 3995 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory), 3996 // erases the TRT entry if victim not dirty, 3997 // and set inval request in IVT if required 3998 { 3999 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4000 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad DIR allocation"); 4001 4002 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4003 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad TRT allocation"); 4004 4005 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_XRAM_RSP) and 4006 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad IVT allocation"); 4007 4008 // check if this is an instruction read, this means pktid is either 4009 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 4010 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4011 bool inst_read = (r_xram_rsp_trt_buf.pktid & 0x2) and r_xram_rsp_trt_buf.proc_read; 4012 4013 // check if this is a cached read, this means pktid is either 4014 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 4015 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4016 bool cached_read = (r_xram_rsp_trt_buf.pktid & 0x1) and r_xram_rsp_trt_buf.proc_read; 4017 4018 bool dirty = false; 4019 4020 // update cache data 4021 size_t set = r_xram_rsp_victim_set.read(); 4022 size_t way = r_xram_rsp_victim_way.read(); 4023 4024 for(size_t word=0; word<m_words ; word++) 4025 { 4026 m_cache_data.write(way, set, word, r_xram_rsp_trt_buf.wdata[word]); 4027 dirty = dirty or (r_xram_rsp_trt_buf.wdata_be[word] != 0); 4028 } 4029 4030 // update cache directory 4031 DirectoryEntry entry; 4032 entry.valid = true; 4033 entry.is_cnt = false; 4034 entry.lock = false; 4035 entry.dirty = dirty; 4036 entry.tag = r_xram_rsp_trt_buf.nline / m_sets; 4037 entry.ptr = 0; 4038 if(cached_read) 4039 { 4040 entry.owner.srcid = r_xram_rsp_trt_buf.srcid; 4041 entry.owner.inst = inst_read; 4042 entry.count = 1; 4043 } 4044 else 4045 { 4046 entry.owner.srcid = 0; 4047 entry.owner.inst = 0; 4048 entry.count = 0; 4049 } 4050 m_cache_directory.write(set, way, entry); 4051 4052 // register invalid request in IVT for victim line if required 4053 if(r_xram_rsp_victim_inval.read()) 4054 { 4055 bool broadcast = r_xram_rsp_victim_is_cnt.read(); 4056 size_t index = 0; 4057 size_t count_copies = r_xram_rsp_victim_count.read(); 4058 4059 bool wok = m_ivt.set(false, // it's an inval transaction 4060 broadcast, // set broadcast bit 4061 false, // no response required 4062 false, // no acknowledge required 4063 0, // srcid 4064 0, // trdid 4065 0, // pktid 4066 r_xram_rsp_victim_nline.read(), 4067 count_copies, 4068 index); 4069 4070 r_xram_rsp_ivt_index = index; 4071 4072 assert( wok and 4073 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: IVT should not be full"); 4074 } 4075 4076 #if DEBUG_MEMC_XRAM_RSP 4077 if(m_debug) 4078 { 4079 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_UPDT>" 4080 << " Cache update: " 4081 << " way = " << std::dec << way 4082 << " / set = " << set 4083 << " / owner_id = " << std::hex << entry.owner.srcid 4084 << " / owner_ins = " << std::dec << entry.owner.inst 4085 << " / count = " << entry.count 4086 << " / is_cnt = " << entry.is_cnt << std::endl; 4087 if(r_xram_rsp_victim_inval.read()) 4088 std::cout << " Invalidation request for address " 4089 << std::hex << r_xram_rsp_victim_nline.read()*m_words*4 4090 << " / broadcast = " << r_xram_rsp_victim_is_cnt.read() << std::endl; 4091 } 4092 #endif 4093 4094 // If the victim is not dirty, we don't need to reuse the TRT entry for 4095 // another PUT transaction, and we can erase the TRT entry 4096 if( not r_xram_rsp_victim_dirty.read() ) 4097 { 4098 m_trt.erase(r_xram_rsp_trt_index.read()); 4099 } 4100 4101 // Next state 4102 if(r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; 4103 else if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4104 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4105 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4106 break; 4107 } 4108 //////////////////////// 4109 case XRAM_RSP_TRT_DIRTY: // set the TRT entry (PUT to XRAM) if the victim is dirty 4110 { 4111 if(r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) 4112 { 4113 std::vector<data_t> data_vector; 4114 data_vector.clear(); 4115 for(size_t i=0; i<m_words; i++) 4116 { 4117 data_vector.push_back(r_xram_rsp_victim_data[i].read()); 4118 } 4119 m_trt.set( r_xram_rsp_trt_index.read(), 4120 false, // PUT 4121 r_xram_rsp_victim_nline.read(), // line index 4122 0, // unused 4123 0, // unused 4124 0, // unused 4125 false, // not proc_read 4126 0, // unused 4127 0, // unused 4128 std::vector<be_t>(m_words,0xF), 4129 data_vector); 4130 4131 #if DEBUG_MEMC_XRAM_RSP 4132 if(m_debug) 4133 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_DIRTY>" 4134 << " Set TRT entry for the put transaction" 4135 << " / address = " << (r_xram_rsp_victim_nline.read()*m_words*4) << std::endl; 4136 #endif 4137 if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4138 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4139 else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4140 } 4141 break; 4142 } 4143 ////////////////////// 4144 case XRAM_RSP_DIR_RSP: // Request a response to TGT_RSP FSM 4145 { 4146 if ( not r_xram_rsp_to_tgt_rsp_req.read() ) 4147 { 4148 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 4149 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 4150 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 4151 for(size_t i=0; i < m_words; i++) 4152 { 4153 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 4154 } 4155 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 4156 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 4157 r_xram_rsp_to_tgt_rsp_ll_key = r_xram_rsp_trt_buf.ll_key; 4158 r_xram_rsp_to_tgt_rsp_rerror = false; 4159 r_xram_rsp_to_tgt_rsp_req = true; 4160 4161 if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4162 else if(r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4163 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4164 4165 #if DEBUG_MEMC_XRAM_RSP 4166 if(m_debug) 4167 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_RSP>" 4168 << " Request the TGT_RSP FSM to return data:" 4169 << " rsrcid = " << std::hex << r_xram_rsp_trt_buf.srcid 4170 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline*m_words*4 4171 << " / nwords = " << std::dec << r_xram_rsp_trt_buf.read_length << std::endl; 4172 #endif 4173 } 4174 break; 4175 } 4176 //////////////////// 4177 case XRAM_RSP_INVAL: // send invalidate request to CC_SEND FSM 4178 { 4179 if(!r_xram_rsp_to_cc_send_multi_req.read() and 4180 !r_xram_rsp_to_cc_send_brdcast_req.read()) 4181 { 4182 bool multi_req = !r_xram_rsp_victim_is_cnt.read(); 4183 bool last_multi_req = multi_req and (r_xram_rsp_victim_count.read() == 1); 4184 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 4185 4186 r_xram_rsp_to_cc_send_multi_req = last_multi_req; 4187 r_xram_rsp_to_cc_send_brdcast_req = r_xram_rsp_victim_is_cnt.read(); 4188 r_xram_rsp_to_cc_send_nline = r_xram_rsp_victim_nline.read(); 4189 r_xram_rsp_to_cc_send_trdid = r_xram_rsp_ivt_index; 4190 xram_rsp_to_cc_send_fifo_srcid = r_xram_rsp_victim_copy.read(); 4191 xram_rsp_to_cc_send_fifo_inst = r_xram_rsp_victim_copy_inst.read(); 4192 xram_rsp_to_cc_send_fifo_put = multi_req; 4193 r_xram_rsp_next_ptr = r_xram_rsp_victim_ptr.read(); 4194 4195 if(r_xram_rsp_victim_dirty) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4196 else if(not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 4197 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4198 4199 #if DEBUG_MEMC_XRAM_RSP 4200 if(m_debug) 4201 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL>" 4202 << " Send an inval request to CC_SEND FSM" 4203 << " / address = " << r_xram_rsp_victim_nline.read()*m_words*4 << std::endl; 4204 #endif 4205 } 4206 break; 4207 } 4208 ////////////////////////// 4209 case XRAM_RSP_WRITE_DIRTY: // send a write request to IXR_CMD FSM 4210 { 4211 if ( not r_xram_rsp_to_ixr_cmd_req.read() ) 4212 { 4213 r_xram_rsp_to_ixr_cmd_req = true; 4214 r_xram_rsp_to_ixr_cmd_index = r_xram_rsp_trt_index.read(); 4215 4216 m_cpt_write_dirty++; 4217 4218 bool multi_req = not r_xram_rsp_victim_is_cnt.read() and 4219 r_xram_rsp_victim_inval.read(); 4220 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 4221 4222 if(not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 4223 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4224 4225 #if DEBUG_MEMC_XRAM_RSP 4226 if(m_debug) 4227 std::cout << " <MEMC " << name() << " XRAM_RSP_WRITE_DIRTY>" 4228 << " Send the put request to IXR_CMD FSM" 4229 << " / address = " << r_xram_rsp_victim_nline.read()*m_words*4 << std::endl; 4230 #endif 4231 } 4232 break; 4233 } 4234 ///////////////////////// 4235 case XRAM_RSP_HEAP_REQ: // Get the lock to the HEAP 4236 { 4237 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 4238 { 4239 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4240 } 4241 4242 #if DEBUG_MEMC_XRAM_RSP 4243 if(m_debug) 4244 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_REQ>" 4245 << " Requesting HEAP lock" << std::endl; 4246 #endif 4247 break; 4248 } 4249 ///////////////////////// 4250 case XRAM_RSP_HEAP_ERASE: // erase the copies and send invalidations 4251 { 4252 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 4253 { 4254 HeapEntry entry = m_heap.read(r_xram_rsp_next_ptr.read()); 4255 4256 xram_rsp_to_cc_send_fifo_srcid = entry.owner.srcid; 4257 xram_rsp_to_cc_send_fifo_inst = entry.owner.inst; 4258 xram_rsp_to_cc_send_fifo_put = true; 4259 if(m_xram_rsp_to_cc_send_inst_fifo.wok()) 4260 { 4261 r_xram_rsp_next_ptr = entry.next; 4262 if(entry.next == r_xram_rsp_next_ptr.read()) // last copy 4263 { 4264 r_xram_rsp_to_cc_send_multi_req = true; 4265 r_xram_rsp_fsm = XRAM_RSP_HEAP_LAST; 4266 } 4267 else 4268 { 4269 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4270 } 4271 } 4272 else 4273 { 4274 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4275 } 4276 4277 #if DEBUG_MEMC_XRAM_RSP 4278 if(m_debug) 4279 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_ERASE>" 4280 << " Erase copy:" 4281 << " srcid = " << std::hex << entry.owner.srcid 4282 << " / inst = " << std::dec << entry.owner.inst << std::endl; 4283 #endif 4284 } 4285 break; 4286 } 4287 ///////////////////////// 4288 case XRAM_RSP_HEAP_LAST: // last copy 4289 { 4290 if(r_alloc_heap_fsm.read() != ALLOC_HEAP_XRAM_RSP) 4291 { 4292 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_HEAP_LAST" 4293 << " bad HEAP allocation" << std::endl; 4294 exit(0); 4295 } 4296 size_t free_pointer = m_heap.next_free_ptr(); 4297 4298 HeapEntry last_entry; 4299 last_entry.owner.srcid = 0; 4300 last_entry.owner.inst = false; 4301 if(m_heap.is_full()) 4302 { 4303 last_entry.next = r_xram_rsp_next_ptr.read(); 4304 m_heap.unset_full(); 4305 } 4306 else 4307 { 4308 last_entry.next = free_pointer; 4309 } 4310 4311 m_heap.write_free_ptr(r_xram_rsp_victim_ptr.read()); 4312 m_heap.write(r_xram_rsp_next_ptr.read(),last_entry); 4313 4314 r_xram_rsp_fsm = XRAM_RSP_IDLE; 4315 4316 #if DEBUG_MEMC_XRAM_RSP 4317 if(m_debug) 4318 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_LAST>" 4319 << " Heap housekeeping" << std::endl; 4320 #endif 4321 break; 4322 } 4323 ////////////////////////// 4324 case XRAM_RSP_ERROR_ERASE: // erase TRT entry in case of error 4325 { 4326 m_trt.erase(r_xram_rsp_trt_index.read()); 4327 4328 // Next state 4329 if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_ERROR_RSP; 4330 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4331 4332 #if DEBUG_MEMC_XRAM_RSP 4333 if(m_debug) 4334 std::cout << " <MEMC " << name() << " XRAM_RSP_ERROR_ERASE>" 4335 << " Error reported by XRAM / erase the TRT entry" << std::endl; 4336 #endif 4337 break; 4338 } 4339 //////////////////////// 4340 case XRAM_RSP_ERROR_RSP: // Request an error response to TGT_RSP FSM 4341 { 4342 if(!r_xram_rsp_to_tgt_rsp_req.read()) 4343 { 4344 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 4345 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 4346 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 4347 for(size_t i=0; i < m_words; i++) 4348 { 4349 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 4350 } 4351 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 4352 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 4353 r_xram_rsp_to_tgt_rsp_rerror = true; 4354 r_xram_rsp_to_tgt_rsp_req = true; 4355 4356 r_xram_rsp_fsm = XRAM_RSP_IDLE; 4357 4358 #if DEBUG_MEMC_XRAM_RSP 4359 if(m_debug) 4360 std::cout << " <MEMC " << name() 4361 << " XRAM_RSP_ERROR_RSP> Request a response error to TGT_RSP FSM:" 4362 << " srcid = " << std::dec << r_xram_rsp_trt_buf.srcid << std::endl; 4363 #endif 4364 } 4365 break; 4366 } 4367 } // end swich r_xram_rsp_fsm 4368 4369 //////////////////////////////////////////////////////////////////////////////////// 4370 // CLEANUP FSM 4371 //////////////////////////////////////////////////////////////////////////////////// 4372 // The CLEANUP FSM handles the cleanup request from L1 caches. 4373 // It accesses the cache directory and the heap to update the list of copies. 4374 //////////////////////////////////////////////////////////////////////////////////// 4375 4376 //std::cout << std::endl << "cleanup_fsm" << std::endl; 4377 4378 switch(r_cleanup_fsm.read()) 4379 { 4380 ////////////////// 4381 case CLEANUP_IDLE: // Get first DSPIN flit of the CLEANUP command 4382 { 4383 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 4384 4385 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 4386 4387 uint32_t srcid = DspinDhccpParam::dspin_get( flit, 4388 DspinDhccpParam::CLEANUP_SRCID); 4389 4390 uint8_t type = DspinDhccpParam::dspin_get( flit, 4391 DspinDhccpParam::P2M_TYPE); 4392 4393 r_cleanup_way_index = DspinDhccpParam::dspin_get( flit, 4394 DspinDhccpParam::CLEANUP_WAY_INDEX); 4395 4396 r_cleanup_nline = DspinDhccpParam::dspin_get( flit, 4397 DspinDhccpParam::CLEANUP_NLINE_MSB) << 32; 4398 4399 r_cleanup_inst = (type == DspinDhccpParam::TYPE_CLEANUP_INST); 4400 r_cleanup_srcid = srcid; 4401 4402 assert( (srcid < m_initiators) and 4403 "MEMC ERROR in CLEANUP_IDLE state : illegal SRCID value"); 4404 4405 m_cpt_cleanup++; 4406 cc_receive_to_cleanup_fifo_get = true; 4407 r_cleanup_fsm = CLEANUP_GET_NLINE; 4408 4409 #if DEBUG_MEMC_CLEANUP 4410 if(m_debug) 4411 std::cout << " <MEMC " << name() 4412 << " CLEANUP_IDLE> Cleanup request:" << std::hex 4413 << " owner_id = " << srcid 4414 << " / owner_ins = " << (type == DspinDhccpParam::TYPE_CLEANUP_INST) << std::endl; 4415 #endif 4416 break; 4417 } 4418 /////////////////////// 4419 case CLEANUP_GET_NLINE: // GET second DSPIN flit of the cleanup command 4420 { 4421 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 4422 4423 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 4424 4425 addr_t nline = r_cleanup_nline.read() | 4426 DspinDhccpParam::dspin_get(flit, DspinDhccpParam::CLEANUP_NLINE_LSB); 4427 4428 cc_receive_to_cleanup_fifo_get = true; 4429 r_cleanup_nline = nline; 4430 r_cleanup_fsm = CLEANUP_DIR_REQ; 4431 4432 #if DEBUG_MEMC_CLEANUP 4433 if(m_debug) 4434 std::cout << " <MEMC " << name() 4435 << " CLEANUP_GET_NLINE> Cleanup request:" 4436 << " address = " << std::hex << nline * m_words * 4 << std::endl; 4437 #endif 4438 break; 4439 } 4440 ///////////////////// 4441 case CLEANUP_DIR_REQ: // Get the lock to the directory 4442 { 4443 if(r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) break; 4444 4445 r_cleanup_fsm = CLEANUP_DIR_LOCK; 4446 4447 #if DEBUG_MEMC_CLEANUP 4448 if(m_debug) 4449 std::cout << " <MEMC " << name() << " CLEANUP_DIR_REQ> Requesting DIR lock" << std::endl; 4450 #endif 4451 break; 4452 } 4453 ////////////////////// 4454 case CLEANUP_DIR_LOCK: // test directory status 4455 { 4456 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP) and 4457 "MEMC ERROR in CLEANUP_DIR_LOCK: bad DIR allocation"); 4458 4459 // Read the directory 4460 size_t way = 0; 4461 addr_t cleanup_address = r_cleanup_nline.read() * m_words * 4; 4462 DirectoryEntry entry = m_cache_directory.read(cleanup_address , way); 4463 r_cleanup_is_cnt = entry.is_cnt; 4464 r_cleanup_dirty = entry.dirty; 4465 r_cleanup_tag = entry.tag; 4466 r_cleanup_lock = entry.lock; 4467 r_cleanup_way = way; 4468 r_cleanup_count = entry.count; 4469 r_cleanup_ptr = entry.ptr; 4470 r_cleanup_copy = entry.owner.srcid; 4471 r_cleanup_copy_inst = entry.owner.inst; 4472 4473 if(entry.valid) // hit : the copy must be cleared 4474 { 4475 assert( (entry.count > 0) and 4476 "MEMC ERROR in CLEANUP_DIR_LOCK state, CLEANUP on valid entry with no copies"); 4477 4478 if((entry.count == 1) or (entry.is_cnt)) // no access to the heap 4479 { 4480 r_cleanup_fsm = CLEANUP_DIR_WRITE; 4481 } 4482 else // access to the heap 4483 { 4484 r_cleanup_fsm = CLEANUP_HEAP_REQ; 4485 } 4486 } 4487 else // miss : check IVT for a pending inval 4488 { 4489 r_cleanup_fsm = CLEANUP_IVT_LOCK; 4490 } 4491 4492 #if DEBUG_MEMC_CLEANUP 4493 if(m_debug) 4494 std::cout << " <MEMC " << name() 4495 << " CLEANUP_DIR_LOCK> Test directory status: " 4496 << std::hex << " address = " << cleanup_address 4497 << " / hit = " << entry.valid 4498 << " / dir_id = " << entry.owner.srcid 4499 << " / dir_ins = " << entry.owner.inst 4500 << " / search_id = " << r_cleanup_srcid.read() 4501 << " / search_ins = " << r_cleanup_inst.read() 4502 << " / count = " << entry.count 4503 << " / is_cnt = " << entry.is_cnt << std::endl; 4504 #endif 4505 break; 4506 } 4507 /////////////////////// 4508 case CLEANUP_DIR_WRITE: // Update the directory entry without heap access 4509 { 4510 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP) and 4511 "MEMC ERROR in CLEANUP_DIR_LOCK: bad DIR allocation"); 4512 4513 size_t way = r_cleanup_way.read(); 4514 size_t set = m_y[(addr_t)(r_cleanup_nline.read()*m_words*4)]; 4515 bool match_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 4516 bool match_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 4517 bool match = match_srcid and match_inst; 4518 4519 assert( (r_cleanup_is_cnt.read() or match) and 4520 "MEMC ERROR in CLEANUP_DIR_LOCK: illegal CLEANUP on valid entry"); 4521 4522 // update the cache directory (for the copies) 4523 DirectoryEntry entry; 4524 entry.valid = true; 4525 entry.is_cnt = r_cleanup_is_cnt.read(); 4526 entry.dirty = r_cleanup_dirty.read(); 4527 entry.tag = r_cleanup_tag.read(); 4528 entry.lock = r_cleanup_lock.read(); 4529 entry.ptr = r_cleanup_ptr.read(); 4530 entry.count = r_cleanup_count.read() - 1; 4531 entry.owner.srcid = 0; 4532 entry.owner.inst = 0; 4533 4534 m_cache_directory.write(set, way, entry); 4535 4536 r_cleanup_fsm = CLEANUP_SEND_CLACK; 4537 4538 #if DEBUG_MEMC_CLEANUP 4539 if(m_debug) 4540 std::cout << " <MEMC " << name() 4541 << " CLEANUP_DIR_WRITE> Update directory:" 4542 << std::hex << " address = " << r_cleanup_nline.read() * m_words * 4 4543 << " / dir_id = " << entry.owner.srcid 4544 << " / dir_ins = " << entry.owner.inst 4545 << " / count = " << entry.count 4546 << " / is_cnt = " << entry.is_cnt << std::endl; 4547 #endif 4548 4549 break; 4550 } 4551 ////////////////////// 4552 case CLEANUP_HEAP_REQ: // get the lock to the HEAP directory 4553 { 4554 if(r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP) break; 4555 4556 r_cleanup_fsm = CLEANUP_HEAP_LOCK; 4557 4558 #if DEBUG_MEMC_CLEANUP 4559 if(m_debug) 4560 std::cout << " <MEMC " << name() 4561 << " CLEANUP_HEAP_REQ> HEAP lock acquired " << std::endl; 4562 #endif 4563 break; 4564 } 4565 ////////////////////// 4566 case CLEANUP_HEAP_LOCK: // two cases are handled in this state : 4567 // 1. the matching copy is directly in the directory 4568 // 2. the matching copy is the first copy in the heap 4569 { 4570 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 4571 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 4572 4573 size_t way = r_cleanup_way.read(); 4574 size_t set = m_y[(addr_t)(r_cleanup_nline.read() *m_words*4)]; 4575 4576 HeapEntry heap_entry = m_heap.read(r_cleanup_ptr.read()); 4577 bool last = (heap_entry.next == r_cleanup_ptr.read()); 4578 4579 // match_dir computation 4580 bool match_dir_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 4581 bool match_dir_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 4582 bool match_dir = match_dir_srcid and match_dir_inst; 4583 4584 // match_heap computation 4585 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 4586 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 4587 bool match_heap = match_heap_srcid and match_heap_inst; 4588 4589 r_cleanup_prev_ptr = r_cleanup_ptr.read(); 4590 r_cleanup_prev_srcid = heap_entry.owner.srcid; 4591 r_cleanup_prev_inst = heap_entry.owner.inst; 4592 4593 assert( (not last or match_dir or match_heap) and 4594 "MEMC ERROR in CLEANUP_HEAP_LOCK state: hit but no copy found"); 4595 4596 assert( (not match_dir or not match_heap) and 4597 "MEMC ERROR in CLEANUP_HEAP_LOCK state: two matching copies found"); 4598 4599 DirectoryEntry dir_entry; 4600 dir_entry.valid = true; 4601 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 4602 dir_entry.dirty = r_cleanup_dirty.read(); 4603 dir_entry.tag = r_cleanup_tag.read(); 4604 dir_entry.lock = r_cleanup_lock.read(); 4605 dir_entry.count = r_cleanup_count.read()-1; 4606 4607 // the matching copy is registered in the directory and 4608 // it must be replaced by the first copy registered in 4609 // the heap. The corresponding entry must be freed 4610 if(match_dir) 4611 { 4612 dir_entry.ptr = heap_entry.next; 4613 dir_entry.owner.srcid = heap_entry.owner.srcid; 4614 dir_entry.owner.inst = heap_entry.owner.inst; 4615 r_cleanup_next_ptr = r_cleanup_ptr.read(); 4616 r_cleanup_fsm = CLEANUP_HEAP_FREE; 4617 } 4618 4619 // the matching copy is the first copy in the heap 4620 // It must be freed and the copy registered in directory 4621 // must point to the next copy in heap 4622 else if(match_heap) 4623 { 4624 dir_entry.ptr = heap_entry.next; 4625 dir_entry.owner.srcid = r_cleanup_copy.read(); 4626 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 4627 r_cleanup_next_ptr = r_cleanup_ptr.read(); 4628 r_cleanup_fsm = CLEANUP_HEAP_FREE; 4629 } 4630 4631 // The matching copy is in the heap, but is not the first copy 4632 // The directory entry must be modified to decrement count 4633 else 4634 { 4635 dir_entry.ptr = r_cleanup_ptr.read(); 4636 dir_entry.owner.srcid = r_cleanup_copy.read(); 4637 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 4638 r_cleanup_next_ptr = heap_entry.next; 4639 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 4640 } 4641 4642 m_cache_directory.write(set,way,dir_entry); 4643 4644 #if DEBUG_MEMC_CLEANUP 4645 if(m_debug) 4646 std::cout << " <MEMC " << name() 4647 << " CLEANUP_HEAP_LOCK> Checks matching:" 4648 << " address = " << r_cleanup_nline.read() * m_words * 4 4649 << " / dir_id = " << r_cleanup_copy.read() 4650 << " / dir_ins = " << r_cleanup_copy_inst.read() 4651 << " / heap_id = " << heap_entry.owner.srcid 4652 << " / heap_ins = " << heap_entry.owner.inst 4653 << " / search_id = " << r_cleanup_srcid.read() 4654 << " / search_ins = " << r_cleanup_inst.read() << std::endl; 4655 #endif 4656 break; 4657 } 4658 //////////////////////// 4659 case CLEANUP_HEAP_SEARCH: // This state is handling the case where the copy 4660 // is in the heap, but not the first in linked list 4661 { 4662 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 4663 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 4664 4665 HeapEntry heap_entry = m_heap.read(r_cleanup_next_ptr.read()); 4666 4667 bool last = (heap_entry.next == r_cleanup_next_ptr.read()); 4668 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 4669 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 4670 bool match_heap = match_heap_srcid and match_heap_inst; 4671 4672 assert( (not last or match_heap) and 4673 "MEMC ERROR in CLEANUP_HEAP_SEARCH state: no copy found"); 4674 4675 // the matching copy must be removed 4676 if(match_heap) 4677 { 4678 // re-use ressources 4679 r_cleanup_ptr = heap_entry.next; 4680 r_cleanup_fsm = CLEANUP_HEAP_CLEAN; 4681 } 4682 // test the next in the linked list 4683 else 4684 { 4685 r_cleanup_prev_ptr = r_cleanup_next_ptr.read(); 4686 r_cleanup_prev_srcid = heap_entry.owner.srcid; 4687 r_cleanup_prev_inst = heap_entry.owner.inst; 4688 r_cleanup_next_ptr = heap_entry.next; 4689 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 4690 } 4691 4692 #if DEBUG_MEMC_CLEANUP 4693 if(m_debug) 4694 { 4695 if(not match_heap) 4696 { 4697 std::cout 4698 << " <MEMC " << name() 4699 << " CLEANUP_HEAP_SEARCH> Matching copy not found, search next:" 4700 << std::endl; 4701 } 4702 else 4703 { 4704 std::cout 4705 << " <MEMC " << name() 4706 << " CLEANUP_HEAP_SEARCH> Matching copy found:" 4707 << std::endl; 4708 } 4709 std::cout 4710 << " address = " << r_cleanup_nline.read() * m_words * 4 4711 << " / heap_id = " << heap_entry.owner.srcid 4712 << " / heap_ins = " << heap_entry.owner.inst 4713 << " / search_id = " << r_cleanup_srcid.read() 4714 << " / search_ins = " << r_cleanup_inst.read() 4715 << " / last = " << last 4716 << std::endl; 4717 } 4718 #endif 4719 break; 4720 } 4721 //////////////////////// 4722 case CLEANUP_HEAP_CLEAN: // remove a copy in the linked list 4723 { 4724 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 4725 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 4726 4727 HeapEntry heap_entry; 4728 heap_entry.owner.srcid = r_cleanup_prev_srcid.read(); 4729 heap_entry.owner.inst = r_cleanup_prev_inst.read(); 4730 bool last = (r_cleanup_next_ptr.read() == r_cleanup_ptr.read()); 4731 4732 if (last) // this is the last entry of the list of copies 4733 { 4734 heap_entry.next = r_cleanup_prev_ptr.read(); 4735 } 4736 else // this is not the last entry 4737 { 4738 heap_entry.next = r_cleanup_ptr.read(); 4739 } 4740 4741 m_heap.write(r_cleanup_prev_ptr.read(), heap_entry); 4742 4743 r_cleanup_fsm = CLEANUP_HEAP_FREE; 4744 4745 #if DEBUG_MEMC_CLEANUP 4746 if(m_debug) 4747 std::cout << " <MEMC " << name() << " CLEANUP_HEAP_SEARCH>" 4748 << " Remove the copy in the linked list" << std::endl; 4749 #endif 4750 break; 4751 } 4752 /////////////////////// 4753 case CLEANUP_HEAP_FREE: // The heap entry pointed by r_cleanup_next_ptr is freed 4754 // and becomes the head of the list of free entries 4755 { 4756 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 4757 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 4758 4759 HeapEntry heap_entry; 4760 heap_entry.owner.srcid = 0; 4761 heap_entry.owner.inst = false; 4762 4763 if(m_heap.is_full()) 4764 { 4765 heap_entry.next = r_cleanup_next_ptr.read(); 4766 } 4767 else 4768 { 4769 heap_entry.next = m_heap.next_free_ptr(); 4770 } 4771 4772 m_heap.write(r_cleanup_next_ptr.read(),heap_entry); 4773 m_heap.write_free_ptr(r_cleanup_next_ptr.read()); 4774 m_heap.unset_full(); 4775 4776 r_cleanup_fsm = CLEANUP_SEND_CLACK; 4777 4778 #if DEBUG_MEMC_CLEANUP 4779 if(m_debug) 4780 std::cout << " <MEMC " << name() << " CLEANUP_HEAP_FREE>" 4781 << " Update the list of free entries" << std::endl; 4782 #endif 4783 break; 4784 } 4785 ////////////////////// 4786 case CLEANUP_IVT_LOCK: // get the lock protecting the IVT to search a pending 4787 // invalidate transaction matching the cleanup 4788 { 4789 if(r_alloc_ivt_fsm.read() != ALLOC_IVT_CLEANUP) break; 4790 4791 size_t index = 0; 4792 bool match_inval; 4793 4794 match_inval = m_ivt.search_inval(r_cleanup_nline.read(), index); 4795 4796 if ( not match_inval ) // no pending inval in IVT 4797 { 4798 r_cleanup_fsm = CLEANUP_SEND_CLACK; 4799 4800 #if DEBUG_MEMC_CLEANUP 4801 if(m_debug) 4802 std::cout << " <MEMC " << name() << " CLEANUP_IVT_LOCK>" 4803 << " Unexpected cleanup with no corresponding IVT entry:" 4804 << " address = " << std::hex << (r_cleanup_nline.read()*4*m_words) << std::endl; 4805 #endif 4806 } 4807 else // pending inval in IVT 4808 { 4809 r_cleanup_write_srcid = m_ivt.srcid(index); 4810 r_cleanup_write_trdid = m_ivt.trdid(index); 4811 r_cleanup_write_pktid = m_ivt.pktid(index); 4812 r_cleanup_need_rsp = m_ivt.need_rsp(index); 4813 r_cleanup_need_ack = m_ivt.need_ack(index); 4814 r_cleanup_index = index; 4815 r_cleanup_fsm = CLEANUP_IVT_DECREMENT; 4816 4817 #if DEBUG_MEMC_CLEANUP 4818 if(m_debug) 4819 std::cout << " <MEMC " << name() << " CLEANUP_IVT_LOCK>" 4820 << " Cleanup matching pending invalidate transaction on IVT:" 4821 << " address = " << std::hex << (r_cleanup_nline.read()*m_words*4) 4822 << " / ivt_entry = " << index << std::endl; 4823 #endif 4824 } 4825 break; 4826 } 4827 /////////////////////////// 4828 case CLEANUP_IVT_DECREMENT: // decrement response counter in IVT matching entry 4829 // and test if last 4830 { 4831 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) and 4832 "MEMC ERROR in CLEANUP_IVT_DECREMENT state: Bad IVT allocation"); 4833 4834 size_t count = 0; 4835 m_ivt.decrement(r_cleanup_index.read(), count); 4836 4837 if(count == 0) r_cleanup_fsm = CLEANUP_IVT_CLEAR; 4838 else r_cleanup_fsm = CLEANUP_SEND_CLACK ; 4839 4840 #if DEBUG_MEMC_CLEANUP 4841 if(m_debug) 4842 std::cout << " <MEMC " << name() << " CLEANUP_IVT_DECREMENT>" 4843 << " Decrement response counter in IVT:" 4844 << " IVT_index = " << r_cleanup_index.read() 4845 << " / rsp_count = " << count << std::endl; 4846 #endif 4847 break; 4848 } 4849 /////////////////////// 4850 case CLEANUP_IVT_CLEAR: // Clear IVT entry 4851 // Acknowledge CONFIG FSM if required 4852 { 4853 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) and 4854 "MEMC ERROR in CLEANUP_IVT_CLEAR state : bad IVT allocation"); 4855 4856 m_ivt.clear(r_cleanup_index.read()); 4857 4858 if ( r_cleanup_need_ack.read() ) 4859 { 4860 assert( (r_config_rsp_lines.read() > 0) and 4861 "MEMC ERROR in CLEANUP_IVT_CLEAR state"); 4862 4863 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 4864 } 4865 4866 if ( r_cleanup_need_rsp.read() ) r_cleanup_fsm = CLEANUP_WRITE_RSP; 4867 else r_cleanup_fsm = CLEANUP_SEND_CLACK; 4868 4869 #if DEBUG_MEMC_CLEANUP 4870 if(m_debug) 4871 std::cout << " <MEMC " << name() 4872 << " CLEANUP_IVT_CLEAR> Clear entry in IVT:" 4873 << " IVT_index = " << r_cleanup_index.read() << std::endl; 4874 #endif 4875 break; 4876 } 4877 /////////////////////// 4878 case CLEANUP_WRITE_RSP: // response to a previous write on the direct network 4879 // wait if pending request to the TGT_RSP FSM 4880 { 4881 if(r_cleanup_to_tgt_rsp_req.read()) break; 4882 4883 // no pending request 4884 r_cleanup_to_tgt_rsp_req = true; 4885 r_cleanup_to_tgt_rsp_srcid = r_cleanup_write_srcid.read(); 4886 r_cleanup_to_tgt_rsp_trdid = r_cleanup_write_trdid.read(); 4887 r_cleanup_to_tgt_rsp_pktid = r_cleanup_write_pktid.read(); 4888 r_cleanup_fsm = CLEANUP_SEND_CLACK; 4889 4890 #if DEBUG_MEMC_CLEANUP 4891 if(m_debug) 4892 std::cout << " <MEMC " << name() << " CLEANUP_WRITE_RSP>" 4893 << " Send a response to a previous write request: " 4894 << " rsrcid = " << std::hex << r_cleanup_write_srcid.read() 4895 << " / rtrdid = " << r_cleanup_write_trdid.read() 4896 << " / rpktid = " << r_cleanup_write_pktid.read() << std::endl; 4897 #endif 4898 break; 4899 } 4900 //////////////////////// 4901 case CLEANUP_SEND_CLACK: // acknowledgement to a cleanup command 4902 // on the coherence CLACK network. 4903 { 4904 if(not p_dspin_clack.read) break; 4905 4906 r_cleanup_fsm = CLEANUP_IDLE; 4907 4908 #if DEBUG_MEMC_CLEANUP 4909 if(m_debug) 4910 std::cout << " <MEMC " << name() 4911 << " CLEANUP_SEND_CLACK> Send the response to a cleanup request:" 4912 << " address = " << std::hex << r_cleanup_nline.read()*m_words*4 4913 << " / way = " << std::dec << r_cleanup_way.read() 4914 << " / srcid = " << std::dec << r_cleanup_srcid.read() 4915 << std::endl; 4916 #endif 4917 break; 4918 } 4919 } // end switch cleanup fsm 4920 4921 //////////////////////////////////////////////////////////////////////////////////// 4922 // CAS FSM 4923 //////////////////////////////////////////////////////////////////////////////////// 4924 // The CAS FSM handles the CAS (Compare And Swap) atomic commands. 4925 // 4926 // This command contains two or four flits: 4927 // - In case of 32 bits atomic access, the first flit contains the value read 4928 // by a previous READ instruction, the second flit contains the value to be writen. 4929 // - In case of 64 bits atomic access, the 2 first flits contains the value read 4930 // by a previous READ instruction, the 2 next flits contains the value to be writen. 4931 // 4932 // The target address is cachable. If it is replicated in other L1 caches 4933 // than the writer, a coherence operation is done. 4934 // 4935 // It access the directory to check hit / miss. 4936 // - In case of miss, the CAS FSM must register a GET transaction in TRT. 4937 // If a read transaction to the XRAM for this line already exists, 4938 // or if the transaction table is full, it goes to the WAIT state 4939 // to release the locks and try again. When the GET transaction has been 4940 // launched, it goes to the WAIT state and try again. 4941 // The CAS request is not consumed in the FIFO until a HIT is obtained. 4942 // - In case of hit... 4943 /////////////////////////////////////////////////////////////////////////////////// 4944 4945 //std::cout << std::endl << "cas_fsm" << std::endl; 4946 4947 switch(r_cas_fsm.read()) 4948 { 4949 //////////// 4950 case CAS_IDLE: // fill the local rdata buffers 4951 { 4952 if (m_cmd_cas_addr_fifo.rok() ) 4953 { 4954 4955 #if DEBUG_MEMC_CAS 4956 if(m_debug) 4957 std::cout << " <MEMC " << name() << " CAS_IDLE> CAS command: " << std::hex 4958 << " srcid = " << std::dec << m_cmd_cas_srcid_fifo.read() 4959 << " addr = " << std::hex << m_cmd_cas_addr_fifo.read() 4960 << " wdata = " << m_cmd_cas_wdata_fifo.read() 4961 << " eop = " << std::dec << m_cmd_cas_eop_fifo.read() 4962 << " cpt = " << std::dec << r_cas_cpt.read() << std::endl; 4963 #endif 4964 if(m_cmd_cas_eop_fifo.read()) 4965 { 4966 m_cpt_cas++; 4967 r_cas_fsm = CAS_DIR_REQ; 4968 } 4969 else // we keep the last word in the FIFO 4970 { 4971 cmd_cas_fifo_get = true; 4972 } 4973 4974 // We fill the two buffers 4975 if(r_cas_cpt.read() < 2) // 32 bits access 4976 r_cas_rdata[r_cas_cpt.read()] = m_cmd_cas_wdata_fifo.read(); 4977 4978 if((r_cas_cpt.read() == 1) and m_cmd_cas_eop_fifo.read()) 4979 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 4980 4981 assert( (r_cas_cpt.read() <= 3) and // no more than 4 flits... 4982 "MEMC ERROR in CAS_IDLE state: illegal CAS command"); 4983 4984 if(r_cas_cpt.read() ==2) 4985 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 4986 4987 r_cas_cpt = r_cas_cpt.read() +1; 4988 } 4989 break; 4990 } 4991 ///////////////// 4992 case CAS_DIR_REQ: 4993 { 4994 if(r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) 4995 { 4996 r_cas_fsm = CAS_DIR_LOCK; 4997 } 4998 4999 #if DEBUG_MEMC_CAS 5000 if(m_debug) 5001 std::cout << " <MEMC " << name() << " CAS_DIR_REQ> Requesting DIR lock " << std::endl; 5002 #endif 5003 break; 5004 } 5005 ///////////////// 5006 case CAS_DIR_LOCK: // Read the directory 5007 { 5008 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5009 "MEMC ERROR in CAS_DIR_LOCK: Bad DIR allocation"); 5010 5011 size_t way = 0; 5012 DirectoryEntry entry(m_cache_directory.read(m_cmd_cas_addr_fifo.read(), way)); 5013 5014 r_cas_is_cnt = entry.is_cnt; 5015 r_cas_dirty = entry.dirty; 5016 r_cas_tag = entry.tag; 5017 r_cas_way = way; 5018 r_cas_copy = entry.owner.srcid; 5019 r_cas_copy_inst = entry.owner.inst; 5020 r_cas_ptr = entry.ptr; 5021 r_cas_count = entry.count; 5022 5023 if(entry.valid) r_cas_fsm = CAS_DIR_HIT_READ; 5024 else r_cas_fsm = CAS_MISS_TRT_LOCK; 5025 5026 #if DEBUG_MEMC_CAS 5027 if(m_debug) 5028 std::cout << " <MEMC " << name() << " CAS_DIR_LOCK> Directory acces" 5029 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 5030 << " / hit = " << std::dec << entry.valid 5031 << " / count = " << entry.count 5032 << " / is_cnt = " << entry.is_cnt << std::endl; 5033 #endif 5034 5035 break; 5036 } 5037 ///////////////////// 5038 case CAS_DIR_HIT_READ: // update directory for lock and dirty bit 5039 // and check data change in cache 5040 { 5041 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5042 "MEMC ERROR in CAS_DIR_HIT_READ: Bad DIR allocation"); 5043 5044 size_t way = r_cas_way.read(); 5045 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5046 5047 // update directory (lock & dirty bits) 5048 DirectoryEntry entry; 5049 entry.valid = true; 5050 entry.is_cnt = r_cas_is_cnt.read(); 5051 entry.dirty = true; 5052 entry.lock = true; 5053 entry.tag = r_cas_tag.read(); 5054 entry.owner.srcid = r_cas_copy.read(); 5055 entry.owner.inst = r_cas_copy_inst.read(); 5056 entry.count = r_cas_count.read(); 5057 entry.ptr = r_cas_ptr.read(); 5058 5059 m_cache_directory.write(set, way, entry); 5060 5061 // Store data from cache in buffer to do the comparison in next state 5062 m_cache_data.read_line(way, set, r_cas_data); 5063 5064 r_cas_fsm = CAS_DIR_HIT_COMPARE; 5065 5066 #if DEBUG_MEMC_CAS 5067 if(m_debug) 5068 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_READ> Read data from " 5069 << " cache and store it in buffer" << std::endl; 5070 #endif 5071 break; 5072 } 5073 //////////////////////// 5074 case CAS_DIR_HIT_COMPARE: 5075 { 5076 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5077 5078 // check data change 5079 bool ok = (r_cas_rdata[0].read() == r_cas_data[word].read()); 5080 5081 if(r_cas_cpt.read() == 4) // 64 bits CAS 5082 ok &= (r_cas_rdata[1] == r_cas_data[word+1]); 5083 5084 // to avoid livelock, force the atomic access to fail pseudo-randomly 5085 bool forced_fail = ((r_cas_lfsr % (64) == 0) and RANDOMIZE_CAS); 5086 r_cas_lfsr = (r_cas_lfsr >> 1) ^ ((- (r_cas_lfsr & 1)) & 0xd0000001); 5087 5088 if(ok and not forced_fail) r_cas_fsm = CAS_DIR_HIT_WRITE; 5089 else r_cas_fsm = CAS_RSP_FAIL; 5090 5091 #if DEBUG_MEMC_CAS 5092 if(m_debug) 5093 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_COMPARE> Compare old and new data" 5094 << " / expected value = " << std::hex << r_cas_rdata[0].read() 5095 << " / actual value = " << std::hex << r_cas_data[word].read() 5096 << " / forced_fail = " << std::dec << forced_fail << std::endl; 5097 #endif 5098 break; 5099 } 5100 ////////////////////// 5101 case CAS_DIR_HIT_WRITE: // test if a CC transaction is required 5102 // write data in cache if no CC request 5103 { 5104 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5105 "MEMC ERROR in CAS_DIR_HIT_WRITE: Bad DIR allocation"); 5106 5107 // The CAS is a success => sw access to the llsc_global_table 5108 m_llsc_table.sw( m_nline[(addr_t)m_cmd_cas_addr_fifo.read()], 5109 m_x[(addr_t)(m_cmd_cas_addr_fifo.read())], 5110 m_x[(addr_t)(m_cmd_cas_addr_fifo.read())] ); 5111 5112 // test coherence request 5113 if(r_cas_count.read()) // replicated line 5114 { 5115 if(r_cas_is_cnt.read()) 5116 { 5117 r_cas_fsm = CAS_BC_TRT_LOCK; // broadcast invalidate required 5118 5119 #if DEBUG_MEMC_CAS 5120 if(m_debug) 5121 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 5122 << " Broacast Inval required" 5123 << " / copies = " << r_cas_count.read() << std::endl; 5124 #endif 5125 } 5126 else if( not r_cas_to_cc_send_multi_req.read() and 5127 not r_cas_to_cc_send_brdcast_req.read() ) 5128 { 5129 r_cas_fsm = CAS_UPT_LOCK; // multi update required 5130 5131 #if DEBUG_MEMC_CAS 5132 if(m_debug) 5133 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 5134 << " Multi Inval required" 5135 << " / copies = " << r_cas_count.read() << std::endl; 5136 #endif 5137 } 5138 else 5139 { 5140 r_cas_fsm = CAS_WAIT; 5141 5142 #if DEBUG_MEMC_CAS 5143 if(m_debug) 5144 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 5145 << " CC_SEND FSM busy: release all locks and retry" << std::endl; 5146 #endif 5147 } 5148 } 5149 else // no copies 5150 { 5151 size_t way = r_cas_way.read(); 5152 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5153 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5154 5155 // cache update 5156 m_cache_data.write(way, set, word, r_cas_wdata.read()); 5157 if(r_cas_cpt.read() == 4) 5158 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 5159 5160 r_cas_fsm = CAS_RSP_SUCCESS; 5161 5162 #if DEBUG_MEMC_CAS 5163 if(m_debug) 5164 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE> Update cache:" 5165 << " way = " << std::dec << way 5166 << " / set = " << set 5167 << " / word = " << word 5168 << " / value = " << r_cas_wdata.read() 5169 << " / count = " << r_cas_count.read() 5170 << " / global_llsc_table access" << std::endl; 5171 #endif 5172 } 5173 break; 5174 } 5175 ///////////////// 5176 case CAS_UPT_LOCK: // try to register the transaction in UPT 5177 // and write data in cache if successful registration 5178 // releases locks to retry later if UPT full 5179 { 5180 if(r_alloc_upt_fsm.read() == ALLOC_UPT_CAS) 5181 { 5182 bool wok = false; 5183 size_t index = 0; 5184 size_t srcid = m_cmd_cas_srcid_fifo.read(); 5185 size_t trdid = m_cmd_cas_trdid_fifo.read(); 5186 size_t pktid = m_cmd_cas_pktid_fifo.read(); 5187 addr_t nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5188 size_t nb_copies = r_cas_count.read(); 5189 5190 wok = m_upt.set( true, // it's an update transaction 5191 false, // it's not a broadcast 5192 true, // response required 5193 false, // no acknowledge required 5194 srcid, 5195 trdid, 5196 pktid, 5197 nline, 5198 nb_copies, 5199 index); 5200 if(wok) // coherence transaction registered in UPT 5201 { 5202 // cache update 5203 size_t way = r_cas_way.read(); 5204 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5205 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5206 5207 m_cache_data.write(way, set, word, r_cas_wdata.read()); 5208 if(r_cas_cpt.read() ==4) 5209 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 5210 5211 r_cas_upt_index = index; 5212 r_cas_fsm = CAS_UPT_HEAP_LOCK; 5213 } 5214 else // releases the locks protecting UPT and DIR UPT full 5215 { 5216 r_cas_fsm = CAS_WAIT; 5217 } 5218 5219 #if DEBUG_MEMC_CAS 5220 if(m_debug) 5221 std::cout << " <MEMC " << name() 5222 << " CAS_UPT_LOCK> Register multi-update transaction in UPT" 5223 << " / wok = " << wok 5224 << " / address = " << std::hex << nline*m_words*4 5225 << " / count = " << nb_copies << std::endl; 5226 #endif 5227 } 5228 break; 5229 } 5230 ///////////// 5231 case CAS_WAIT: // release all locks and retry from beginning 5232 { 5233 5234 #if DEBUG_MEMC_CAS 5235 if(m_debug) 5236 std::cout << " <MEMC " << name() << " CAS_WAIT> Release all locks" << std::endl; 5237 #endif 5238 r_cas_fsm = CAS_DIR_REQ; 5239 break; 5240 } 5241 ////////////////////// 5242 case CAS_UPT_HEAP_LOCK: // lock the heap 5243 { 5244 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) 5245 { 5246 5247 #if DEBUG_MEMC_CAS 5248 if(m_debug) 5249 { 5250 std::cout << " <MEMC " << name() 5251 << " CAS_UPT_HEAP_LOCK> Get access to the heap" << std::endl; 5252 } 5253 #endif 5254 r_cas_fsm = CAS_UPT_REQ; 5255 } 5256 break; 5257 } 5258 //////////////// 5259 case CAS_UPT_REQ: // send a first update request to CC_SEND FSM 5260 { 5261 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) and 5262 "VCI_MEM_CACHE ERROR : bad HEAP allocation"); 5263 5264 if(!r_cas_to_cc_send_multi_req.read() and !r_cas_to_cc_send_brdcast_req.read()) 5265 { 5266 r_cas_to_cc_send_brdcast_req = false; 5267 r_cas_to_cc_send_trdid = r_cas_upt_index.read(); 5268 r_cas_to_cc_send_nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5269 r_cas_to_cc_send_index = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5270 r_cas_to_cc_send_wdata = r_cas_wdata.read(); 5271 5272 if(r_cas_cpt.read() == 4) 5273 { 5274 r_cas_to_cc_send_is_long = true; 5275 r_cas_to_cc_send_wdata_high = m_cmd_cas_wdata_fifo.read(); 5276 } 5277 else 5278 { 5279 r_cas_to_cc_send_is_long = false; 5280 r_cas_to_cc_send_wdata_high = 0; 5281 } 5282 5283 // We put the first copy in the fifo 5284 cas_to_cc_send_fifo_put = true; 5285 cas_to_cc_send_fifo_inst = r_cas_copy_inst.read(); 5286 cas_to_cc_send_fifo_srcid = r_cas_copy.read(); 5287 if(r_cas_count.read() == 1) // one single copy 5288 { 5289 r_cas_fsm = CAS_IDLE; // Response will be sent after receiving 5290 // update responses 5291 cmd_cas_fifo_get = true; 5292 r_cas_to_cc_send_multi_req = true; 5293 r_cas_cpt = 0; 5294 } 5295 else // several copies 5296 { 5297 r_cas_fsm = CAS_UPT_NEXT; 5298 } 5299 5300 #if DEBUG_MEMC_CAS 5301 if(m_debug) 5302 { 5303 std::cout << " <MEMC " << name() << " CAS_UPT_REQ> Send the first update request to CC_SEND FSM " 5304 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 5305 << " / wdata = " << std::hex << r_cas_wdata.read() 5306 << " / srcid = " << std::dec << r_cas_copy.read() 5307 << " / inst = " << std::dec << r_cas_copy_inst.read() << std::endl; 5308 } 5309 #endif 5310 } 5311 break; 5312 } 5313 ///////////////// 5314 case CAS_UPT_NEXT: // send a multi-update request to CC_SEND FSM 5315 { 5316 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) 5317 and "VCI_MEM_CACHE ERROR : bad HEAP allocation"); 5318 5319 HeapEntry entry = m_heap.read(r_cas_ptr.read()); 5320 cas_to_cc_send_fifo_srcid = entry.owner.srcid; 5321 cas_to_cc_send_fifo_inst = entry.owner.inst; 5322 cas_to_cc_send_fifo_put = true; 5323 5324 if(m_cas_to_cc_send_inst_fifo.wok()) // request accepted by CC_SEND FSM 5325 { 5326 r_cas_ptr = entry.next; 5327 if(entry.next == r_cas_ptr.read()) // last copy 5328 { 5329 r_cas_to_cc_send_multi_req = true; 5330 r_cas_fsm = CAS_IDLE; // Response will be sent after receiving 5331 // all update responses 5332 cmd_cas_fifo_get = true; 5333 r_cas_cpt = 0; 5334 } 5335 } 5336 5337 #if DEBUG_MEMC_CAS 5338 if(m_debug) 5339 { 5340 std::cout << " <MEMC " << name() << " CAS_UPT_NEXT> Send the next update request to CC_SEND FSM " 5341 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 5342 << " / wdata = " << std::hex << r_cas_wdata.read() 5343 << " / srcid = " << std::dec << entry.owner.srcid 5344 << " / inst = " << std::dec << entry.owner.inst << std::endl; 5345 } 5346 #endif 5347 break; 5348 } 5349 ///////////////////// 5350 case CAS_BC_TRT_LOCK: // get TRT lock to check TRT not full 5351 { 5352 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5353 "MEMC ERROR in CAS_BC_TRT_LOCK state: Bas DIR allocation"); 5354 5355 if(r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 5356 { 5357 size_t wok_index = 0; 5358 bool wok = !m_trt.full(wok_index); 5359 if( wok ) 5360 { 5361 r_cas_trt_index = wok_index; 5362 r_cas_fsm = CAS_BC_IVT_LOCK; 5363 } 5364 else 5365 { 5366 r_cas_fsm = CAS_WAIT; 5367 } 5368 5369 #if DEBUG_MEMC_CAS 5370 if(m_debug) 5371 std::cout << " <MEMC " << name() << " CAS_BC_TRT_LOCK> Check TRT" 5372 << " : wok = " << wok << " / index = " << wok_index << std::endl; 5373 #endif 5374 } 5375 break; 5376 } 5377 ///////////////////// 5378 case CAS_BC_IVT_LOCK: // get IVT lock and register BC transaction in IVT 5379 { 5380 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5381 "MEMC ERROR in CAS_BC_IVT_LOCK state: Bas DIR allocation"); 5382 5383 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 5384 "MEMC ERROR in CAS_BC_IVT_LOCK state: Bas TRT allocation"); 5385 5386 if( r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS ) 5387 { 5388 // register broadcast inval transaction in IVT 5389 bool wok = false; 5390 size_t index = 0; 5391 size_t srcid = m_cmd_cas_srcid_fifo.read(); 5392 size_t trdid = m_cmd_cas_trdid_fifo.read(); 5393 size_t pktid = m_cmd_cas_pktid_fifo.read(); 5394 addr_t nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5395 size_t nb_copies = r_cas_count.read(); 5396 5397 wok = m_ivt.set( false, // it's an inval transaction 5398 true, // it's a broadcast 5399 true, // response required 5400 false, // no acknowledge required 5401 srcid, 5402 trdid, 5403 pktid, 5404 nline, 5405 nb_copies, 5406 index); 5407 #if DEBUG_MEMC_CAS 5408 if( m_debug and wok ) 5409 std::cout << " <MEMC " << name() << " CAS_BC_IVT_LOCK> Register broadcast inval in IVT" 5410 << " / copies = " << r_cas_count.read() << std::endl; 5411 #endif 5412 r_cas_upt_index = index; 5413 if( wok ) r_cas_fsm = CAS_BC_DIR_INVAL; 5414 else r_cas_fsm = CAS_WAIT; 5415 } 5416 break; 5417 } 5418 ////////////////////// 5419 case CAS_BC_DIR_INVAL: // Register PUT transaction in TRT, 5420 // and inval the DIR entry 5421 { 5422 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5423 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad DIR allocation"); 5424 5425 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 5426 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad TRT allocation"); 5427 5428 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS) and 5429 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad IVT allocation"); 5430 5431 // set TRT 5432 std::vector<data_t> data_vector; 5433 data_vector.clear(); 5434 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5435 for(size_t i=0; i<m_words; i++) 5436 { 5437 if(i == word) // first modified word 5438 data_vector.push_back( r_cas_wdata.read() ); 5439 else if((i == word+1) and (r_cas_cpt.read() == 4)) // second modified word 5440 data_vector.push_back( m_cmd_cas_wdata_fifo.read() ); 5441 else // unmodified words 5442 data_vector.push_back( r_cas_data[i].read() ); 5443 } 5444 m_trt.set( r_cas_trt_index.read(), 5445 false, // PUT request 5446 m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())], 5447 0, 5448 0, 5449 0, 5450 false, // not a processor read 5451 0, 5452 0, 5453 std::vector<be_t> (m_words,0), 5454 data_vector ); 5455 5456 // invalidate directory entry 5457 DirectoryEntry entry; 5458 entry.valid = false; 5459 entry.dirty = false; 5460 entry.tag = 0; 5461 entry.is_cnt = false; 5462 entry.lock = false; 5463 entry.count = 0; 5464 entry.owner.srcid = 0; 5465 entry.owner.inst = false; 5466 entry.ptr = 0; 5467 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5468 size_t way = r_cas_way.read(); 5469 5470 m_cache_directory.write(set, way, entry); 5471 5472 r_cas_fsm = CAS_BC_CC_SEND; 5473 5474 #if DEBUG_MEMC_CAS 5475 if(m_debug) 5476 std::cout << " <MEMC " << name() << " CAS_BC_DIR_INVAL> Inval DIR & register in TRT:" 5477 << " address = " << m_cmd_cas_addr_fifo.read() << std::endl; 5478 #endif 5479 break; 5480 } 5481 /////////////////// 5482 case CAS_BC_CC_SEND: // Request the broadcast inval to CC_SEND FSM 5483 { 5484 if( not r_cas_to_cc_send_multi_req.read() and 5485 not r_cas_to_cc_send_brdcast_req.read() ) 5486 { 5487 r_cas_to_cc_send_multi_req = false; 5488 r_cas_to_cc_send_brdcast_req = true; 5489 r_cas_to_cc_send_trdid = r_cas_upt_index.read(); 5490 r_cas_to_cc_send_nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5491 r_cas_to_cc_send_index = 0; 5492 r_cas_to_cc_send_wdata = 0; 5493 5494 r_cas_fsm = CAS_BC_XRAM_REQ; 5495 5496 #if DEBUG_MEMC_CAS 5497 if(m_debug) 5498 std::cout << " <MEMC " << name() 5499 << " CAS_BC_CC_SEND> Post a broadcast request to CC_SEND FSM" << std::endl; 5500 #endif 5501 } 5502 break; 5503 } 5504 //////////////////// 5505 case CAS_BC_XRAM_REQ: // request the IXR FSM to start a PUT transaction 5506 { 5507 if( not r_cas_to_ixr_cmd_req.read() ) 5508 { 5509 r_cas_to_ixr_cmd_req = true; 5510 r_cas_to_ixr_cmd_put = true; 5511 r_cas_to_ixr_cmd_index = r_cas_trt_index.read(); 5512 r_cas_fsm = CAS_IDLE; 5513 cmd_cas_fifo_get = true; 5514 r_cas_cpt = 0; 5515 5516 #if DEBUG_MEMC_CAS 5517 if(m_debug) 5518 std::cout << " <MEMC " << name() 5519 << " CAS_BC_XRAM_REQ> Request a PUT transaction to IXR_CMD FSM" << std::hex 5520 << " / address = " << (addr_t) m_cmd_cas_addr_fifo.read() 5521 << " / trt_index = " << r_cas_trt_index.read() << std::endl; 5522 #endif 5523 } 5524 break; 5525 } 5526 ///////////////// 5527 case CAS_RSP_FAIL: // request TGT_RSP FSM to send a failure response 5528 { 5529 if( not r_cas_to_tgt_rsp_req.read() ) 5530 { 5531 cmd_cas_fifo_get = true; 5532 r_cas_cpt = 0; 5533 r_cas_to_tgt_rsp_req = true; 5534 r_cas_to_tgt_rsp_data = 1; 5535 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read(); 5536 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read(); 5537 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read(); 5538 r_cas_fsm = CAS_IDLE; 5539 5540 #if DEBUG_MEMC_CAS 5541 if(m_debug) 5542 std::cout << " <MEMC " << name() 5543 << " CAS_RSP_FAIL> Request TGT_RSP to send a failure response" << std::endl; 5544 #endif 5545 } 5546 break; 5547 } 5548 //////////////////// 5549 case CAS_RSP_SUCCESS: // request TGT_RSP FSM to send a success response 5550 { 5551 if( not r_cas_to_tgt_rsp_req.read() ) 5552 { 5553 cmd_cas_fifo_get = true; 5554 r_cas_cpt = 0; 5555 r_cas_to_tgt_rsp_req = true; 5556 r_cas_to_tgt_rsp_data = 0; 5557 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read(); 5558 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read(); 5559 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read(); 5560 r_cas_fsm = CAS_IDLE; 5561 5562 #if DEBUG_MEMC_CAS 5563 if(m_debug) 5564 std::cout << " <MEMC " << name() 5565 << " CAS_RSP_SUCCESS> Request TGT_RSP to send a success response" << std::endl; 5566 #endif 5567 } 5568 break; 5569 } 5570 /////////////////////// 5571 case CAS_MISS_TRT_LOCK: // cache miss : request access to transaction Table 5572 { 5573 if(r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 5574 { 5575 size_t index = 0; 5576 bool hit_read = m_trt.hit_read( 5577 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()],index); 5578 bool hit_write = m_trt.hit_write( 5579 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()]); 5580 bool wok = not m_trt.full(index); 5581 5582 #if DEBUG_MEMC_CAS 5583 if(m_debug) 5584 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_LOCK> Check TRT state" 5585 << " / hit_read = " << hit_read 5586 << " / hit_write = " << hit_write 5587 << " / wok = " << wok 5588 << " / index = " << index << std::endl; 5589 #endif 5590 5591 if(hit_read or !wok or hit_write) // missing line already requested or TRT full 5592 { 5593 r_cas_fsm = CAS_WAIT; 5594 } 5595 else 5596 { 5597 r_cas_trt_index = index; 5598 r_cas_fsm = CAS_MISS_TRT_SET; 5599 } 5600 } 5601 break; 5602 } 5603 ////////////////////// 5604 case CAS_MISS_TRT_SET: // register the GET transaction in TRT 5605 { 5606 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 5607 "MEMC ERROR in CAS_MISS_TRT_SET state: Bad TRT allocation"); 5608 5609 std::vector<be_t> be_vector; 5610 std::vector<data_t> data_vector; 5611 be_vector.clear(); 5612 data_vector.clear(); 5613 for(size_t i=0; i<m_words; i++) 5614 { 5615 be_vector.push_back(0); 5616 data_vector.push_back(0); 5617 } 5618 5619 m_trt.set( r_cas_trt_index.read(), 5620 true, // GET 5621 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()], 5622 m_cmd_cas_srcid_fifo.read(), 5623 m_cmd_cas_trdid_fifo.read(), 5624 m_cmd_cas_pktid_fifo.read(), 5625 false, // write request from processor 5626 0, 5627 0, 5628 std::vector<be_t>(m_words,0), 5629 std::vector<data_t>(m_words,0) ); 5630 5631 r_cas_fsm = CAS_MISS_XRAM_REQ; 5632 5633 #if DEBUG_MEMC_CAS 5634 if(m_debug) 5635 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_SET> Register GET transaction in TRT" 5636 << " / address = " << std::hex << (addr_t)m_cmd_cas_addr_fifo.read() 5637 << " / trt_index = " << std::dec << r_cas_trt_index.read() << std::endl; 5638 #endif 5639 break; 5640 } 5641 ////////////////////// 5642 case CAS_MISS_XRAM_REQ: // request the IXR_CMD FSM a GET request 5643 { 5644 if( not r_cas_to_ixr_cmd_req.read() ) 5645 { 5646 r_cas_to_ixr_cmd_req = true; 5647 r_cas_to_ixr_cmd_put = false; 5648 r_cas_to_ixr_cmd_index = r_cas_trt_index.read(); 5649 r_cas_fsm = CAS_WAIT; 5650 5651 #if DEBUG_MEMC_CAS 5652 if(m_debug) 5653 std::cout << " <MEMC " << name() << " CAS_MISS_XRAM_REQ> Request a GET transaction" 5654 << " / address = " << std::hex << (addr_t) m_cmd_cas_addr_fifo.read() 5655 << " / trt_index = " << std::dec << r_cas_trt_index.read() << std::endl; 5656 #endif 5657 } 5658 break; 5659 } 5660 } // end switch r_cas_fsm 5661 5662 5663 ////////////////////////////////////////////////////////////////////////////// 5664 // CC_SEND FSM 5665 ////////////////////////////////////////////////////////////////////////////// 5666 // The CC_SEND fsm controls the DSPIN initiator port on the coherence 5667 // network, used to update or invalidate cache lines in L1 caches. 5668 // 5669 // It implements a round-robin priority between the four possible client FSMs 5670 // XRAM_RSP > CAS > WRITE > CONFIG 5671 // 5672 // Each FSM can request the next services: 5673 // - r_xram_rsp_to_cc_send_multi_req : multi-inval 5674 // r_xram_rsp_to_cc_send_brdcast_req : broadcast-inval 5675 // - r_write_to_cc_send_multi_req : multi-update 5676 // r_write_to_cc_send_brdcast_req : broadcast-inval 5677 // - r_cas_to_cc_send_multi_req : multi-update 5678 // r_cas_to_cc_send_brdcast_req : broadcast-inval 5679 // - r_config_to_cc_send_multi_req : multi-inval 5680 // r_config_to_cc_send_brdcast_req : broadcast-inval 5681 // 5682 // An inval request is a double DSPIN flit command containing: 5683 // 1. the index of the line to be invalidated. 5684 // 5685 // An update request is a multi-flit DSPIN command containing: 5686 // 1. the index of the cache line to be updated. 5687 // 2. the index of the first modified word in the line. 5688 // 3. the data to update 5689 /////////////////////////////////////////////////////////////////////////////// 5690 5691 //std::cout << std::endl << "cc_send_fsm" << std::endl; 5692 5693 switch(r_cc_send_fsm.read()) 5694 { 5695 ///////////////////////// 5696 case CC_SEND_CONFIG_IDLE: // XRAM_RSP FSM has highest priority 5697 { 5698 // XRAM_RSP 5699 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 5700 r_xram_rsp_to_cc_send_multi_req.read()) 5701 { 5702 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 5703 m_cpt_inval++; 5704 break; 5705 } 5706 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 5707 { 5708 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 5709 m_cpt_inval++; 5710 break; 5711 } 5712 // CAS 5713 if(m_cas_to_cc_send_inst_fifo.rok() or 5714 r_cas_to_cc_send_multi_req.read()) 5715 { 5716 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 5717 m_cpt_update++; 5718 break; 5719 } 5720 if(r_cas_to_cc_send_brdcast_req.read()) 5721 { 5722 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 5723 m_cpt_inval++; 5724 break; 5725 } 5726 // WRITE 5727 if(m_write_to_cc_send_inst_fifo.rok() or 5728 r_write_to_cc_send_multi_req.read()) 5729 { 5730 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 5731 m_cpt_update++; 5732 break; 5733 } 5734 if(r_write_to_cc_send_brdcast_req.read()) 5735 { 5736 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 5737 m_cpt_inval++; 5738 break; 5739 } 5740 // CONFIG 5741 if(r_config_to_cc_send_multi_req.read()) 5742 { 5743 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 5744 m_cpt_inval++; 5745 break; 5746 } 5747 if(r_config_to_cc_send_brdcast_req.read()) 5748 { 5749 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 5750 m_cpt_inval++; 5751 break; 5752 } 5753 break; 5754 } 5755 //////////////////////// 5756 case CC_SEND_WRITE_IDLE: // CONFIG FSM has highest priority 5757 { 5758 // CONFIG 5759 if(r_config_to_cc_send_multi_req.read()) 5760 { 5761 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 5762 m_cpt_inval++; 5763 break; 5764 } 5765 if(r_config_to_cc_send_brdcast_req.read()) 5766 { 5767 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 5768 m_cpt_inval++; 5769 break; 5770 } 5771 // XRAM_RSP 5772 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 5773 r_xram_rsp_to_cc_send_multi_req.read()) 5774 { 5775 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 5776 m_cpt_inval++; 5777 break; 5778 } 5779 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 5780 { 5781 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 5782 m_cpt_inval++; 5783 break; 5784 } 5785 // CAS 5786 if(m_cas_to_cc_send_inst_fifo.rok() or 5787 r_cas_to_cc_send_multi_req.read()) 5788 { 5789 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 5790 m_cpt_update++; 5791 break; 5792 } 5793 if(r_cas_to_cc_send_brdcast_req.read()) 5794 { 5795 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 5796 m_cpt_inval++; 5797 break; 5798 } 5799 // WRITE 5800 if(m_write_to_cc_send_inst_fifo.rok() or 5801 r_write_to_cc_send_multi_req.read()) 5802 { 5803 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 5804 m_cpt_update++; 5805 break; 5806 } 5807 if(r_write_to_cc_send_brdcast_req.read()) 5808 { 5809 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 5810 m_cpt_inval++; 5811 break; 5812 } 5813 break; 5814 } 5815 /////////////////////////// 5816 case CC_SEND_XRAM_RSP_IDLE: // CAS FSM has highest priority 5817 { 5818 // CAS 5819 if(m_cas_to_cc_send_inst_fifo.rok() or 5820 r_cas_to_cc_send_multi_req.read()) 5821 { 5822 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 5823 m_cpt_update++; 5824 break; 5825 } 5826 if(r_cas_to_cc_send_brdcast_req.read()) 5827 { 5828 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 5829 m_cpt_inval++; 5830 break; 5831 } 5832 // WRITE 5833 if(m_write_to_cc_send_inst_fifo.rok() or 5834 r_write_to_cc_send_multi_req.read()) 5835 { 5836 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 5837 m_cpt_update++; 5838 break; 5839 } 5840 5841 if(r_write_to_cc_send_brdcast_req.read()) 5842 { 5843 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 5844 m_cpt_inval++; 5845 break; 5846 } 5847 // CONFIG 5848 if(r_config_to_cc_send_multi_req.read()) 5849 { 5850 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 5851 m_cpt_inval++; 5852 break; 5853 } 5854 if(r_config_to_cc_send_brdcast_req.read()) 5855 { 5856 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 5857 m_cpt_inval++; 5858 break; 5859 } 5860 // XRAM_RSP 5861 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 5862 r_xram_rsp_to_cc_send_multi_req.read()) 5863 { 5864 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 5865 m_cpt_inval++; 5866 break; 5867 } 5868 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 5869 { 5870 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 5871 m_cpt_inval++; 5872 break; 5873 } 5874 break; 5875 } 5876 ////////////////////// 5877 case CC_SEND_CAS_IDLE: // CLEANUP FSM has highest priority 5878 { 5879 if(m_write_to_cc_send_inst_fifo.rok() or 5880 r_write_to_cc_send_multi_req.read()) 5881 { 5882 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 5883 m_cpt_update++; 5884 break; 5885 } 5886 if(r_write_to_cc_send_brdcast_req.read()) 5887 { 5888 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 5889 m_cpt_inval++; 5890 break; 5891 } 5892 // CONFIG 5893 if(r_config_to_cc_send_multi_req.read()) 5894 { 5895 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 5896 m_cpt_inval++; 5897 break; 5898 } 5899 if(r_config_to_cc_send_brdcast_req.read()) 5900 { 5901 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 5902 m_cpt_inval++; 5903 break; 5904 } 5905 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 5906 r_xram_rsp_to_cc_send_multi_req.read()) 5907 { 5908 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 5909 m_cpt_inval++; 5910 break; 5911 } 5912 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 5913 { 5914 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 5915 m_cpt_inval++; 5916 break; 5917 } 5918 if(m_cas_to_cc_send_inst_fifo.rok() or 5919 r_cas_to_cc_send_multi_req.read()) 5920 { 5921 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 5922 m_cpt_update++; 5923 break; 5924 } 5925 if(r_cas_to_cc_send_brdcast_req.read()) 5926 { 5927 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 5928 m_cpt_inval++; 5929 break; 5930 } 5931 break; 5932 } 5933 ///////////////////////////////// 5934 case CC_SEND_CONFIG_INVAL_HEADER: // send first flit multi-inval (from CONFIG FSM) 5935 { 5936 if(m_config_to_cc_send_inst_fifo.rok()) 5937 { 5938 if(not p_dspin_m2p.read) break; 5939 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_NLINE; 5940 break; 5941 } 5942 if(r_config_to_cc_send_multi_req.read()) r_config_to_cc_send_multi_req = false; 5943 r_cc_send_fsm = CC_SEND_CONFIG_IDLE; 5944 break; 5945 } 5946 //////////////////////////////// 5947 case CC_SEND_CONFIG_INVAL_NLINE: // send second flit multi-inval (from CONFIG FSM) 5948 { 5949 if(not p_dspin_m2p.read) break; 5950 m_cpt_inval_mult++; 5951 config_to_cc_send_fifo_get = true; 5952 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 5953 5954 #if DEBUG_MEMC_CC_SEND 5955 if(m_debug) 5956 std::cout << " <MEMC " << name() 5957 << " CC_SEND_CONFIG_INVAL_NLINE> multi-inval for line " 5958 << std::hex << r_config_to_cc_send_nline.read() << std::endl; 5959 #endif 5960 break; 5961 } 5962 /////////////////////////////////// 5963 case CC_SEND_CONFIG_BRDCAST_HEADER: // send first flit BC-inval (from CONFIG FSM) 5964 { 5965 if(not p_dspin_m2p.read) break; 5966 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_NLINE; 5967 break; 5968 } 5969 ////////////////////////////////// 5970 case CC_SEND_CONFIG_BRDCAST_NLINE: // send second flit BC-inval (from CONFIG FSM) 5971 { 5972 if(not p_dspin_m2p.read) break; 5973 m_cpt_inval_brdcast++; 5974 r_config_to_cc_send_brdcast_req = false; 5975 r_cc_send_fsm = CC_SEND_CONFIG_IDLE; 5976 5977 #if DEBUG_MEMC_CC_SEND 5978 if(m_debug) 5979 std::cout << " <MEMC " << name() 5980 << " CC_SEND_CONFIG_BRDCAST_NLINE> BC-Inval for line " 5981 << std::hex << r_config_to_cc_send_nline.read() << std::endl; 5982 #endif 5983 break; 5984 } 5985 /////////////////////////////////// 5986 case CC_SEND_XRAM_RSP_INVAL_HEADER: // send first flit multi-inval (from XRAM_RSP FSM) 5987 { 5988 if(m_xram_rsp_to_cc_send_inst_fifo.rok()) 5989 { 5990 if(not p_dspin_m2p.read) break; 5991 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_NLINE; 5992 break; 5993 } 5994 if(r_xram_rsp_to_cc_send_multi_req.read()) r_xram_rsp_to_cc_send_multi_req = false; 5995 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 5996 break; 5997 } 5998 ////////////////////////////////// 5999 case CC_SEND_XRAM_RSP_INVAL_NLINE: // send second flit multi-inval (from XRAM_RSP FSM) 6000 { 6001 if(not p_dspin_m2p.read) break; 6002 m_cpt_inval_mult++; 6003 xram_rsp_to_cc_send_fifo_get = true; 6004 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 6005 6006 #if DEBUG_MEMC_CC_SEND 6007 if(m_debug) 6008 std::cout << " <MEMC " << name() 6009 << " CC_SEND_XRAM_RSP_INVAL_NLINE> Multicast-Inval for line " 6010 << std::hex << r_xram_rsp_to_cc_send_nline.read() << std::endl; 6011 #endif 6012 break; 6013 } 6014 ///////////////////////////////////// 6015 case CC_SEND_XRAM_RSP_BRDCAST_HEADER: // send first flit broadcast-inval (from XRAM_RSP FSM) 6016 { 6017 if(not p_dspin_m2p.read) break; 6018 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_NLINE; 6019 break; 6020 } 6021 //////////////////////////////////// 6022 case CC_SEND_XRAM_RSP_BRDCAST_NLINE: // send second flit broadcast-inval (from XRAM_RSP FSM) 6023 { 6024 if(not p_dspin_m2p.read) break; 6025 m_cpt_inval_brdcast++; 6026 r_xram_rsp_to_cc_send_brdcast_req = false; 6027 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 6028 6029 #if DEBUG_MEMC_CC_SEND 6030 if(m_debug) 6031 std::cout << " <MEMC " << name() 6032 << " CC_SEND_XRAM_RSP_BRDCAST_NLINE> BC-Inval for line " 6033 << std::hex << r_xram_rsp_to_cc_send_nline.read() << std::endl; 6034 #endif 6035 break; 6036 } 6037 ////////////////////////////////// 6038 case CC_SEND_WRITE_BRDCAST_HEADER: // send first flit broadcast-inval (from WRITE FSM) 6039 { 6040 if(not p_dspin_m2p.read) break; 6041 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_NLINE; 6042 break; 6043 } 6044 ///////////////////////////////// 6045 case CC_SEND_WRITE_BRDCAST_NLINE: // send second flit broadcast-inval (from WRITE FSM) 6046 { 6047 if(not p_dspin_m2p.read) break; 6048 6049 m_cpt_inval_brdcast++; 6050 6051 r_write_to_cc_send_brdcast_req = false; 6052 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 6053 6054 #if DEBUG_MEMC_CC_SEND 6055 if(m_debug) 6056 std::cout << " <MEMC " << name() 6057 << " CC_SEND_WRITE_BRDCAST_NLINE> BC-Inval for line " 6058 << std::hex << r_write_to_cc_send_nline.read() << std::endl; 6059 #endif 6060 break; 6061 } 6062 /////////////////////////////// 6063 case CC_SEND_WRITE_UPDT_HEADER: // send first flit for a multi-update (from WRITE FSM) 6064 { 6065 if(m_write_to_cc_send_inst_fifo.rok()) 6066 { 6067 if(not p_dspin_m2p.read) break; 6068 6069 r_cc_send_fsm = CC_SEND_WRITE_UPDT_NLINE; 6070 break; 6071 } 6072 6073 if(r_write_to_cc_send_multi_req.read()) 6074 { 6075 r_write_to_cc_send_multi_req = false; 6076 } 6077 6078 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 6079 break; 6080 } 6081 ////////////////////////////// 6082 case CC_SEND_WRITE_UPDT_NLINE: // send second flit for a multi-update (from WRITE FSM) 6083 { 6084 if(not p_dspin_m2p.read) break; 6085 m_cpt_update_mult++; 6086 6087 r_cc_send_cpt = 0; 6088 r_cc_send_fsm = CC_SEND_WRITE_UPDT_DATA; 6089 6090 #if DEBUG_MEMC_CC_SEND 6091 if(m_debug) 6092 std::cout << " <MEMC " << name() 6093 << " CC_SEND_WRITE_UPDT_NLINE> Multicast-Update for address " 6094 << r_write_to_cc_send_nline.read()*m_words*4 << std::endl; 6095 #endif 6096 break; 6097 } 6098 ///////////////////////////// 6099 case CC_SEND_WRITE_UPDT_DATA: // send data flits for multi-update (from WRITE FSM) 6100 { 6101 if(not p_dspin_m2p.read) break; 6102 if(r_cc_send_cpt.read() == (r_write_to_cc_send_count.read()-1)) 6103 { 6104 write_to_cc_send_fifo_get = true; 6105 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 6106 break; 6107 } 6108 6109 r_cc_send_cpt = r_cc_send_cpt.read() + 1; 6110 break; 6111 } 6112 //////////////////////////////// 6113 case CC_SEND_CAS_BRDCAST_HEADER: // send first flit broadcast-inval (from CAS FSM) 6114 { 6115 if(not p_dspin_m2p.read) break; 6116 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_NLINE; 6117 break; 6118 } 6119 /////////////////////////////// 6120 case CC_SEND_CAS_BRDCAST_NLINE: // send second flit broadcast-inval (from CAS FSM) 6121 { 6122 if(not p_dspin_m2p.read) break; 6123 m_cpt_inval_brdcast++; 6124 6125 r_cas_to_cc_send_brdcast_req = false; 6126 r_cc_send_fsm = CC_SEND_CAS_IDLE; 6127 6128 #if DEBUG_MEMC_CC_SEND 6129 if(m_debug) 6130 std::cout << " <MEMC " << name() 6131 << " CC_SEND_CAS_BRDCAST_NLINE> Broadcast-Inval for address: " 6132 << r_cas_to_cc_send_nline.read()*m_words*4 << std::endl; 6133 #endif 6134 break; 6135 } 6136 ///////////////////////////// 6137 case CC_SEND_CAS_UPDT_HEADER: // send first flit for a multi-update (from CAS FSM) 6138 { 6139 if(m_cas_to_cc_send_inst_fifo.rok()) 6140 { 6141 if(not p_dspin_m2p.read) break; 6142 6143 r_cc_send_fsm = CC_SEND_CAS_UPDT_NLINE; 6144 break; 6145 } 6146 6147 // no more packets to send for the multi-update 6148 if(r_cas_to_cc_send_multi_req.read()) 6149 { 6150 r_cas_to_cc_send_multi_req = false; 6151 } 6152 6153 r_cc_send_fsm = CC_SEND_CAS_IDLE; 6154 break; 6155 } 6156 //////////////////////////// 6157 case CC_SEND_CAS_UPDT_NLINE: // send second flit for a multi-update (from CAS FSM) 6158 { 6159 if(not p_dspin_m2p.read) break; 6160 6161 m_cpt_update_mult++; 6162 6163 r_cc_send_cpt = 0; 6164 r_cc_send_fsm = CC_SEND_CAS_UPDT_DATA; 6165 6166 #if DEBUG_MEMC_CC_SEND 6167 if(m_debug) 6168 std::cout << " <MEMC " << name() 6169 << " CC_SEND_CAS_UPDT_NLINE> Multicast-Update for address " 6170 << r_cas_to_cc_send_nline.read()*m_words*4 << std::endl; 6171 #endif 6172 break; 6173 } 6174 /////////////////////////// 6175 case CC_SEND_CAS_UPDT_DATA: // send first data for a multi-update (from CAS FSM) 6176 { 6177 if(not p_dspin_m2p.read) break; 6178 6179 if(r_cas_to_cc_send_is_long.read()) 6180 { 6181 r_cc_send_fsm = CC_SEND_CAS_UPDT_DATA_HIGH; 6182 break; 6183 } 6184 6185 cas_to_cc_send_fifo_get = true; 6186 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6187 break; 6188 } 6189 //////////////////////////////// 6190 case CC_SEND_CAS_UPDT_DATA_HIGH: // send second data for multi-update (from CAS FSM) 6191 { 6192 if(not p_dspin_m2p.read) break; 6193 cas_to_cc_send_fifo_get = true; 6194 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6195 break; 6196 } 6197 } 6198 // end switch r_cc_send_fsm 6199 6200 ////////////////////////////////////////////////////////////////////////////// 6201 // CC_RECEIVE FSM 6202 ////////////////////////////////////////////////////////////////////////////// 6203 // The CC_RECEIVE fsm controls the DSPIN target port on the coherence 6204 // network. 6205 ////////////////////////////////////////////////////////////////////////////// 6206 6207 //std::cout << std::endl << "cc_receive_fsm" << std::endl; 6208 6209 switch(r_cc_receive_fsm.read()) 6210 { 6211 ///////////////////// 6212 case CC_RECEIVE_IDLE: 6213 { 6214 if(not p_dspin_p2m.write) break; 6215 6216 uint8_t type = 6217 DspinDhccpParam::dspin_get( 6218 p_dspin_p2m.data.read(), 6219 DspinDhccpParam::P2M_TYPE); 6220 6221 if((type == DspinDhccpParam::TYPE_CLEANUP_DATA) or 6222 (type == DspinDhccpParam::TYPE_CLEANUP_INST)) 6223 { 6224 r_cc_receive_fsm = CC_RECEIVE_CLEANUP; 6225 break; 6226 } 6227 6228 if(type == DspinDhccpParam::TYPE_MULTI_ACK) 6229 { 6230 r_cc_receive_fsm = CC_RECEIVE_MULTI_ACK; 6231 break; 6232 } 6233 6234 assert(false and 6235 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 6236 "Illegal type in coherence request"); 6237 6238 break; 6239 } 6240 //////////////////////// 6241 case CC_RECEIVE_CLEANUP: 6242 { 6243 // write first CLEANUP flit in CC_RECEIVE to CLEANUP fifo 6244 6245 if(not p_dspin_p2m.write or not m_cc_receive_to_cleanup_fifo.wok()) 6246 break; 6247 6248 assert(not p_dspin_p2m.eop.read() and 6249 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 6250 "CLEANUP command must have two flits"); 6251 6252 cc_receive_to_cleanup_fifo_put = true; 6253 r_cc_receive_fsm = CC_RECEIVE_CLEANUP_EOP; 6254 6255 break; 6256 } 6257 //////////////////////////// 6258 case CC_RECEIVE_CLEANUP_EOP: 6259 { 6260 // write second CLEANUP flit in CC_RECEIVE to CLEANUP fifo 6261 6262 if(not p_dspin_p2m.write or not m_cc_receive_to_cleanup_fifo.wok()) 6263 break; 6264 6265 assert(p_dspin_p2m.eop.read() and 6266 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 6267 "CLEANUP command must have two flits"); 6268 6269 cc_receive_to_cleanup_fifo_put = true; 6270 r_cc_receive_fsm = CC_RECEIVE_IDLE; 6271 6272 break; 6273 } 6274 6275 ////////////////////////// 6276 case CC_RECEIVE_MULTI_ACK: 6277 { 6278 // write MULTI_ACK flit in CC_RECEIVE to MULTI_ACK fifo 6279 6280 // wait for a WOK in the CC_RECEIVE to MULTI_ACK fifo 6281 if(not p_dspin_p2m.write or not m_cc_receive_to_multi_ack_fifo.wok()) 6282 break; 6283 6284 assert(p_dspin_p2m.eop.read() and 6285 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 6286 "MULTI_ACK command must have one flit"); 6287 6288 cc_receive_to_multi_ack_fifo_put = true; 6289 r_cc_receive_fsm = CC_RECEIVE_IDLE; 6290 break; 6291 } 6292 } 6293 6294 ////////////////////////////////////////////////////////////////////////// 6295 // TGT_RSP FSM 6296 ////////////////////////////////////////////////////////////////////////// 6297 // The TGT_RSP fsm sends the responses on the VCI target port 6298 // with a round robin priority between eigth requests : 6299 // - r_config_to_tgt_rsp_req 6300 // - r_tgt_cmd_to_tgt_rsp_req 6301 // - r_read_to_tgt_rsp_req 6302 // - r_write_to_tgt_rsp_req 6303 // - r_cas_to_tgt_rsp_req 6304 // - r_cleanup_to_tgt_rsp_req 6305 // - r_xram_rsp_to_tgt_rsp_req 6306 // - r_multi_ack_to_tgt_rsp_req 6307 // 6308 // The ordering is : 6309 // config >tgt_cmd > read > write > cas > xram > multi_ack > cleanup 6310 ////////////////////////////////////////////////////////////////////////// 6311 6312 //std::cout << std::endl << "tgt_rsp_fsm" << std::endl; 6313 6314 switch(r_tgt_rsp_fsm.read()) 6315 { 6316 ///////////////////////// 6317 case TGT_RSP_CONFIG_IDLE: // tgt_cmd requests have the highest priority 6318 { 6319 if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6320 else if(r_read_to_tgt_rsp_req) 6321 { 6322 r_tgt_rsp_fsm = TGT_RSP_READ; 6323 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6324 } 6325 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6326 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 6327 else if(r_xram_rsp_to_tgt_rsp_req) 6328 { 6329 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6330 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6331 } 6332 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 6333 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6334 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6335 break; 6336 } 6337 ////////////////////////// 6338 case TGT_RSP_TGT_CMD_IDLE: // read requests have the highest priority 6339 { 6340 if(r_read_to_tgt_rsp_req) 6341 { 6342 r_tgt_rsp_fsm = TGT_RSP_READ; 6343 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6344 } 6345 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6346 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 6347 else if(r_xram_rsp_to_tgt_rsp_req) 6348 { 6349 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6350 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6351 } 6352 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 6353 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6354 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6355 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6356 break; 6357 } 6358 /////////////////////// 6359 case TGT_RSP_READ_IDLE: // write requests have the highest priority 6360 { 6361 if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6362 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 6363 else if(r_xram_rsp_to_tgt_rsp_req) 6364 { 6365 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6366 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6367 } 6368 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 6369 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6370 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6371 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6372 else if(r_read_to_tgt_rsp_req) 6373 { 6374 r_tgt_rsp_fsm = TGT_RSP_READ; 6375 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6376 } 6377 break; 6378 } 6379 //////////////////////// 6380 case TGT_RSP_WRITE_IDLE: // cas requests have the highest priority 6381 { 6382 if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 6383 else if(r_xram_rsp_to_tgt_rsp_req) 6384 { 6385 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6386 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6387 } 6388 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 6389 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6390 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6391 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6392 else if(r_read_to_tgt_rsp_req) 6393 { 6394 r_tgt_rsp_fsm = TGT_RSP_READ; 6395 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6396 } 6397 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6398 break; 6399 } 6400 /////////////////////// 6401 case TGT_RSP_CAS_IDLE: // xram_rsp requests have the highest priority 6402 { 6403 if(r_xram_rsp_to_tgt_rsp_req) 6404 { 6405 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6406 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6407 } 6408 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK ; 6409 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6410 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6411 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6412 else if(r_read_to_tgt_rsp_req) 6413 { 6414 r_tgt_rsp_fsm = TGT_RSP_READ; 6415 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6416 } 6417 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6418 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 6419 break; 6420 } 6421 /////////////////////// 6422 case TGT_RSP_XRAM_IDLE: // multi ack requests have the highest priority 6423 { 6424 6425 if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK ; 6426 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6427 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6428 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6429 else if(r_read_to_tgt_rsp_req) 6430 { 6431 r_tgt_rsp_fsm = TGT_RSP_READ; 6432 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6433 } 6434 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6435 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 6436 else if(r_xram_rsp_to_tgt_rsp_req) 6437 { 6438 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6439 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6440 } 6441 break; 6442 } 6443 //////////////////////////// 6444 case TGT_RSP_MULTI_ACK_IDLE: // cleanup requests have the highest priority 6445 { 6446 if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6447 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6448 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6449 else if(r_read_to_tgt_rsp_req) 6450 { 6451 r_tgt_rsp_fsm = TGT_RSP_READ; 6452 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6453 } 6454 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6455 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 6456 else if(r_xram_rsp_to_tgt_rsp_req) 6457 { 6458 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6459 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6460 } 6461 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 6462 break; 6463 } 6464 ////////////////////////// 6465 case TGT_RSP_CLEANUP_IDLE: // tgt cmd requests have the highest priority 6466 { 6467 if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6468 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6469 else if(r_read_to_tgt_rsp_req) 6470 { 6471 r_tgt_rsp_fsm = TGT_RSP_READ; 6472 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6473 } 6474 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6475 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 6476 else if(r_xram_rsp_to_tgt_rsp_req) 6477 { 6478 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6479 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6480 } 6481 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK ; 6482 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6483 break; 6484 } 6485 //////////////////// 6486 case TGT_RSP_CONFIG: // send the response for a config transaction 6487 { 6488 if ( p_vci_tgt.rspack ) 6489 { 6490 r_config_to_tgt_rsp_req = false; 6491 r_tgt_rsp_fsm = TGT_RSP_CONFIG_IDLE; 6492 6493 #if DEBUG_MEMC_TGT_RSP 6494 if( m_debug ) 6495 { 6496 std::cout 6497 << " <MEMC " << name() 6498 << " TGT_RSP_CONFIG> Config transaction completed response" 6499 << " / rsrcid = " << std::hex << r_config_to_tgt_rsp_srcid.read() 6500 << " / rtrdid = " << r_config_to_tgt_rsp_trdid.read() 6501 << " / rpktid = " << r_config_to_tgt_rsp_pktid.read() 6502 << std::endl; 6503 } 6504 #endif 6505 } 6506 break; 6507 } 6508 ///////////////////// 6509 case TGT_RSP_TGT_CMD: // send the response for a configuration access 6510 { 6511 if ( p_vci_tgt.rspack ) 6512 { 6513 r_tgt_cmd_to_tgt_rsp_req = false; 6514 r_tgt_rsp_fsm = TGT_RSP_TGT_CMD_IDLE; 6515 6516 #if DEBUG_MEMC_TGT_RSP 6517 if( m_debug ) 6518 { 6519 std::cout 6520 << " <MEMC " << name() 6521 << " TGT_RSP_TGT_CMD> Send response for a configuration access" 6522 << " / rsrcid = " << std::hex << r_tgt_cmd_to_tgt_rsp_srcid.read() 6523 << " / rtrdid = " << r_tgt_cmd_to_tgt_rsp_trdid.read() 6524 << " / rpktid = " << r_tgt_cmd_to_tgt_rsp_pktid.read() 6525 << " / error = " << r_tgt_cmd_to_tgt_rsp_error.read() 6526 << std::endl; 6527 } 6528 #endif 6529 } 6530 break; 6531 } 6532 ////////////////// 6533 case TGT_RSP_READ: // send the response to a read 6534 { 6535 if ( p_vci_tgt.rspack ) 6536 { 6537 6538 #if DEBUG_MEMC_TGT_RSP 6539 if( m_debug ) 6540 { 6541 std::cout 6542 << " <MEMC " << name() << " TGT_RSP_READ> Read response" 6543 << " / rsrcid = " << std::hex << r_read_to_tgt_rsp_srcid.read() 6544 << " / rtrdid = " << r_read_to_tgt_rsp_trdid.read() 6545 << " / rpktid = " << r_read_to_tgt_rsp_pktid.read() 6546 << " / rdata = " << r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() 6547 << " / cpt = " << std::dec << r_tgt_rsp_cpt.read() << std::endl; 6548 } 6549 #endif 6550 6551 uint32_t last_word_idx = r_read_to_tgt_rsp_word.read() + 6552 r_read_to_tgt_rsp_length.read() - 1; 6553 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 6554 bool is_ll = ((r_read_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 6555 6556 if ((is_last_word and not is_ll) or 6557 (r_tgt_rsp_key_sent.read() and is_ll)) 6558 { 6559 // Last word in case of READ or second flit in case if LL 6560 r_tgt_rsp_key_sent = false; 6561 r_read_to_tgt_rsp_req = false; 6562 r_tgt_rsp_fsm = TGT_RSP_READ_IDLE; 6563 } 6564 else 6565 { 6566 if (is_ll) 6567 { 6568 r_tgt_rsp_key_sent = true; // Send second flit of ll 6569 } 6570 else 6571 { 6572 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; // Send next word of read 6573 } 6574 } 6575 } 6576 break; 6577 } 6578 ////////////////// 6579 case TGT_RSP_WRITE: // send the write acknowledge 6580 { 6581 if(p_vci_tgt.rspack) 6582 { 6583 6584 #if DEBUG_MEMC_TGT_RSP 6585 if(m_debug) 6586 std::cout << " <MEMC " << name() << " TGT_RSP_WRITE> Write response" 6587 << " / rsrcid = " << std::hex << r_write_to_tgt_rsp_srcid.read() 6588 << " / rtrdid = " << r_write_to_tgt_rsp_trdid.read() 6589 << " / rpktid = " << r_write_to_tgt_rsp_pktid.read() << std::endl; 6590 #endif 6591 r_tgt_rsp_fsm = TGT_RSP_WRITE_IDLE; 6592 r_write_to_tgt_rsp_req = false; 6593 } 6594 break; 6595 } 6596 ///////////////////// 6597 case TGT_RSP_CLEANUP: // pas clair pour moi (AG) 6598 { 6599 if(p_vci_tgt.rspack) 6600 { 6601 6602 #if DEBUG_MEMC_TGT_RSP 6603 if(m_debug) 6604 std::cout << " <MEMC " << name() << " TGT_RSP_CLEANUP> Cleanup response" 6605 << " / rsrcid = " << std::hex << r_cleanup_to_tgt_rsp_srcid.read() 6606 << " / rtrdid = " << r_cleanup_to_tgt_rsp_trdid.read() 6607 << " / rpktid = " << r_cleanup_to_tgt_rsp_pktid.read() << std::endl; 6608 #endif 6609 r_tgt_rsp_fsm = TGT_RSP_CLEANUP_IDLE; 6610 r_cleanup_to_tgt_rsp_req = false; 6611 } 6612 break; 6613 } 6614 ///////////////// 6615 case TGT_RSP_CAS: // send one atomic word response 6616 { 6617 if(p_vci_tgt.rspack) 6618 { 6619 6620 #if DEBUG_MEMC_TGT_RSP 6621 if(m_debug) 6622 std::cout << " <MEMC " << name() << " TGT_RSP_CAS> CAS response" 6623 << " / rsrcid = " << std::hex << r_cas_to_tgt_rsp_srcid.read() 6624 << " / rtrdid = " << r_cas_to_tgt_rsp_trdid.read() 6625 << " / rpktid = " << r_cas_to_tgt_rsp_pktid.read() << std::endl; 6626 #endif 6627 r_tgt_rsp_fsm = TGT_RSP_CAS_IDLE; 6628 r_cas_to_tgt_rsp_req = false; 6629 } 6630 break; 6631 } 6632 ////////////////// 6633 case TGT_RSP_XRAM: // send the response after XRAM access 6634 { 6635 if ( p_vci_tgt.rspack ) 6636 { 6637 6638 #if DEBUG_MEMC_TGT_RSP 6639 if( m_debug ) 6640 std::cout << " <MEMC " << name() << " TGT_RSP_XRAM> Response following XRAM access" 6641 << " / rsrcid = " << std::hex << r_xram_rsp_to_tgt_rsp_srcid.read() 6642 << " / rtrdid = " << r_xram_rsp_to_tgt_rsp_trdid.read() 6643 << " / rpktid = " << r_xram_rsp_to_tgt_rsp_pktid.read() 6644 << " / rdata = " << r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() 6645 << " / cpt = " << std::dec << r_tgt_rsp_cpt.read() << std::endl; 6646 #endif 6647 uint32_t last_word_idx = r_xram_rsp_to_tgt_rsp_word.read() + 6648 r_xram_rsp_to_tgt_rsp_length.read() - 1; 6649 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 6650 bool is_ll = ((r_xram_rsp_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 6651 bool is_error = r_xram_rsp_to_tgt_rsp_rerror.read(); 6652 6653 if (((is_last_word or is_error) and not is_ll) or 6654 (r_tgt_rsp_key_sent.read() and is_ll)) 6655 { 6656 // Last word sent in case of READ or second flit sent in case if LL 6657 r_tgt_rsp_key_sent = false; 6658 r_xram_rsp_to_tgt_rsp_req = false; 6659 r_tgt_rsp_fsm = TGT_RSP_XRAM_IDLE; 6660 } 6661 else 6662 { 6663 if (is_ll) 6664 { 6665 r_tgt_rsp_key_sent = true; // Send second flit of ll 6666 } 6667 else 6668 { 6669 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; // Send next word of read 6670 } 6671 } 6672 } 6673 break; 6674 } 6675 /////////////////////// 6676 case TGT_RSP_MULTI_ACK: // send the write response after coherence transaction 6677 { 6678 if(p_vci_tgt.rspack) 6679 { 6680 6681 #if DEBUG_MEMC_TGT_RSP 6682 if(m_debug) 6683 std::cout << " <MEMC " << name() << " TGT_RSP_MULTI_ACK> Write response after coherence transaction" 6684 << " / rsrcid = " << std::hex << r_multi_ack_to_tgt_rsp_srcid.read() 6685 << " / rtrdid = " << r_multi_ack_to_tgt_rsp_trdid.read() 6686 << " / rpktid = " << r_multi_ack_to_tgt_rsp_pktid.read() << std::endl; 6687 #endif 6688 r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK_IDLE; 6689 r_multi_ack_to_tgt_rsp_req = false; 6690 } 6691 break; 6692 } 6693 } // end switch tgt_rsp_fsm 6694 6695 //////////////////////////////////////////////////////////////////////////////////// 6696 // ALLOC_UPT FSM 6697 //////////////////////////////////////////////////////////////////////////////////// 6698 // The ALLOC_UPT FSM allocates the access to the Update Table (UPT), 6699 // with a round robin priority between three FSMs, with the following order: 6700 // WRITE -> CAS -> MULTI_ACK 6701 // - The WRITE FSM initiates update transaction and sets a new entry in UPT. 6702 // - The CAS FSM does the same thing as the WRITE FSM. 6703 // - The MULTI_ACK FSM complete those trasactions and erase the UPT entry. 6704 // The resource is always allocated. 6705 ///////////////////////////////////////////////////////////////////////////////////// 6706 6707 //std::cout << std::endl << "alloc_upt_fsm" << std::endl; 6708 6709 switch(r_alloc_upt_fsm.read()) 6710 { 6711 ///////////////////////// 6712 case ALLOC_UPT_WRITE: // allocated to WRITE FSM 6713 if (r_write_fsm.read() != WRITE_UPT_LOCK) 6714 { 6715 if (r_cas_fsm.read() == CAS_UPT_LOCK) 6716 r_alloc_upt_fsm = ALLOC_UPT_CAS; 6717 6718 else if (r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK) 6719 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK; 6720 } 6721 break; 6722 6723 ///////////////////////// 6724 case ALLOC_UPT_CAS: // allocated to CAS FSM 6725 if (r_cas_fsm.read() != CAS_UPT_LOCK) 6726 { 6727 if (r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK) 6728 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK; 6729 6730 else if (r_write_fsm.read() == WRITE_UPT_LOCK) 6731 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 6732 } 6733 break; 6734 6735 ///////////////////////// 6736 case ALLOC_UPT_MULTI_ACK: // allocated to MULTI_ACK FSM 6737 if (r_multi_ack_fsm.read() != MULTI_ACK_UPT_LOCK) 6738 { 6739 if (r_write_fsm.read() == WRITE_UPT_LOCK) 6740 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 6741 6742 else if (r_cas_fsm.read() == CAS_UPT_LOCK) 6743 r_alloc_upt_fsm = ALLOC_UPT_CAS; 6744 } 6745 break; 6746 } // end switch r_alloc_upt_fsm 6747 6748 //////////////////////////////////////////////////////////////////////////////////// 6749 // ALLOC_IVT FSM 6750 //////////////////////////////////////////////////////////////////////////////////// 6751 // The ALLOC_IVT FSM allocates the access to the Invalidate Table (IVT), 6752 // with a round robin priority between five FSMs, with the following order: 6753 // WRITE -> XRAM_RSP -> CLEANUP -> CAS -> CONFIG 6754 // - The WRITE FSM initiates broadcast invalidate transactions and sets a new entry 6755 // in IVT. 6756 // - The CAS FSM does the same thing as the WRITE FSM. 6757 // - The XRAM_RSP FSM initiates broadcast/multicast invalidate transaction and sets 6758 // a new entry in the IVT 6759 // - The CONFIG FSM does the same thing as the XRAM_RSP FSM 6760 // - The CLEANUP FSM complete those trasactions and erase the IVT entry. 6761 // The resource is always allocated. 6762 ///////////////////////////////////////////////////////////////////////////////////// 6763 6764 //std::cout << std::endl << "alloc_ivt_fsm" << std::endl; 6765 6766 switch(r_alloc_ivt_fsm.read()) 6767 { 6768 ///////////////////// 6769 case ALLOC_IVT_WRITE: // allocated to WRITE FSM 6770 if (r_write_fsm.read() != WRITE_BC_IVT_LOCK) 6771 { 6772 if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 6773 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 6774 6775 else if (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 6776 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 6777 6778 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 6779 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 6780 6781 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 6782 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 6783 } 6784 break; 6785 6786 //////////////////////// 6787 case ALLOC_IVT_XRAM_RSP: // allocated to XRAM_RSP FSM 6788 if(r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK) 6789 { 6790 if(r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 6791 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 6792 6793 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 6794 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 6795 6796 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 6797 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 6798 6799 else if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 6800 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 6801 } 6802 break; 6803 6804 /////////////////////// 6805 case ALLOC_IVT_CLEANUP: // allocated to CLEANUP FSM 6806 if ((r_cleanup_fsm.read() != CLEANUP_IVT_LOCK ) and 6807 (r_cleanup_fsm.read() != CLEANUP_IVT_DECREMENT)) 6808 { 6809 if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 6810 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 6811 6812 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 6813 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 6814 6815 else if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 6816 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 6817 6818 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 6819 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 6820 } 6821 break; 6822 6823 ////////////////////////// 6824 case ALLOC_IVT_CAS: // allocated to CAS FSM 6825 if (r_cas_fsm.read() != CAS_BC_IVT_LOCK) 6826 { 6827 if (r_config_fsm.read() == CONFIG_IVT_LOCK) 6828 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 6829 6830 else if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 6831 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 6832 6833 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 6834 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 6835 6836 else if (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 6837 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 6838 } 6839 break; 6840 6841 ////////////////////////// 6842 case ALLOC_IVT_CONFIG: // allocated to CONFIG FSM 6843 if (r_config_fsm.read() != CONFIG_IVT_LOCK) 6844 { 6845 if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 6846 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 6847 6848 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 6849 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 6850 6851 else if (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 6852 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 6853 6854 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 6855 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 6856 } 6857 break; 6858 6859 } // end switch r_alloc_ivt_fsm 6860 6861 //////////////////////////////////////////////////////////////////////////////////// 6862 // ALLOC_DIR FSM 6863 //////////////////////////////////////////////////////////////////////////////////// 6864 // The ALLOC_DIR FSM allocates the access to the directory and 6865 // the data cache with a round robin priority between 6 user FSMs : 6866 // The cyclic ordering is CONFIG > READ > WRITE > CAS > CLEANUP > XRAM_RSP 6867 // The ressource is always allocated. 6868 ///////////////////////////////////////////////////////////////////////////////////// 6869 6870 //std::cout << std::endl << "alloc_dir_fsm" << std::endl; 6871 6872 switch(r_alloc_dir_fsm.read()) 6873 { 6874 ///////////////////// 6875 case ALLOC_DIR_RESET: // Initializes the directory one SET per cycle. 6876 // All the WAYS of a SET initialized in parallel 6877 6878 r_alloc_dir_reset_cpt.write(r_alloc_dir_reset_cpt.read() + 1); 6879 6880 if(r_alloc_dir_reset_cpt.read() == (m_sets - 1)) 6881 { 6882 m_cache_directory.init(); 6883 r_alloc_dir_fsm = ALLOC_DIR_READ; 6884 } 6885 break; 6886 6887 ////////////////////// 6888 case ALLOC_DIR_CONFIG: // allocated to CONFIG FSM 6889 if ( (r_config_fsm.read() != CONFIG_DIR_REQ) and 6890 (r_config_fsm.read() != CONFIG_DIR_ACCESS) and 6891 (r_config_fsm.read() != CONFIG_TRT_LOCK) and 6892 (r_config_fsm.read() != CONFIG_TRT_SET) and 6893 (r_config_fsm.read() != CONFIG_IVT_LOCK) ) 6894 { 6895 if(r_read_fsm.read() == READ_DIR_REQ) 6896 r_alloc_dir_fsm = ALLOC_DIR_READ; 6897 6898 else if(r_write_fsm.read() == WRITE_DIR_REQ) 6899 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 6900 6901 else if(r_cas_fsm.read() == CAS_DIR_REQ) 6902 r_alloc_dir_fsm = ALLOC_DIR_CAS; 6903 6904 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 6905 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 6906 6907 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 6908 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 6909 } 6910 break; 6911 6912 //////////////////// 6913 case ALLOC_DIR_READ: // allocated to READ FSM 6914 if( ((r_read_fsm.read() != READ_DIR_REQ) and 6915 (r_read_fsm.read() != READ_DIR_LOCK) and 6916 (r_read_fsm.read() != READ_TRT_LOCK) and 6917 (r_read_fsm.read() != READ_HEAP_REQ)) 6918 or 6919 ((r_read_fsm.read() == READ_TRT_LOCK) and 6920 (r_alloc_trt_fsm.read() == ALLOC_TRT_READ)) ) 6921 { 6922 if(r_write_fsm.read() == WRITE_DIR_REQ) 6923 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 6924 6925 else if(r_cas_fsm.read() == CAS_DIR_REQ) 6926 r_alloc_dir_fsm = ALLOC_DIR_CAS; 6927 6928 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 6929 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 6930 6931 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 6932 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 6933 6934 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 6935 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 6936 } 6937 break; 6938 6939 ///////////////////// 6940 case ALLOC_DIR_WRITE: // allocated to WRITE FSM 6941 if(((r_write_fsm.read() != WRITE_DIR_REQ) and 6942 (r_write_fsm.read() != WRITE_DIR_LOCK) and 6943 (r_write_fsm.read() != WRITE_BC_DIR_READ) and 6944 (r_write_fsm.read() != WRITE_DIR_HIT) and 6945 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 6946 (r_write_fsm.read() != WRITE_BC_IVT_LOCK) and 6947 (r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 6948 (r_write_fsm.read() != WRITE_UPT_LOCK) and 6949 (r_write_fsm.read() != WRITE_UPT_HEAP_LOCK)) 6950 or 6951 ((r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) and 6952 (r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE)) 6953 or 6954 ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) and 6955 (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE))) 6956 { 6957 if(r_cas_fsm.read() == CAS_DIR_REQ) 6958 r_alloc_dir_fsm = ALLOC_DIR_CAS; 6959 6960 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 6961 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 6962 6963 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 6964 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 6965 6966 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 6967 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 6968 6969 else if(r_read_fsm.read() == READ_DIR_REQ) 6970 r_alloc_dir_fsm = ALLOC_DIR_READ; 6971 } 6972 break; 6973 6974 /////////////////// 6975 case ALLOC_DIR_CAS: // allocated to CAS FSM 6976 if(((r_cas_fsm.read() != CAS_DIR_REQ) and 6977 (r_cas_fsm.read() != CAS_DIR_LOCK) and 6978 (r_cas_fsm.read() != CAS_DIR_HIT_READ) and 6979 (r_cas_fsm.read() != CAS_DIR_HIT_COMPARE) and 6980 (r_cas_fsm.read() != CAS_DIR_HIT_WRITE) and 6981 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 6982 (r_cas_fsm.read() != CAS_BC_IVT_LOCK) and 6983 (r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 6984 (r_cas_fsm.read() != CAS_UPT_LOCK) and 6985 (r_cas_fsm.read() != CAS_UPT_HEAP_LOCK)) 6986 or 6987 ((r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) and 6988 (r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS)) 6989 or 6990 ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) and 6991 (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS))) 6992 { 6993 if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 6994 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 6995 6996 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 6997 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 6998 6999 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 7000 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 7001 7002 else if(r_read_fsm.read() == READ_DIR_REQ) 7003 r_alloc_dir_fsm = ALLOC_DIR_READ; 7004 7005 else if(r_write_fsm.read() == WRITE_DIR_REQ) 7006 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 7007 } 7008 break; 7009 7010 /////////////////////// 7011 case ALLOC_DIR_CLEANUP: // allocated to CLEANUP FSM 7012 if((r_cleanup_fsm.read() != CLEANUP_DIR_REQ) and 7013 (r_cleanup_fsm.read() != CLEANUP_DIR_LOCK) and 7014 (r_cleanup_fsm.read() != CLEANUP_HEAP_REQ) and 7015 (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK)) 7016 { 7017 if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 7018 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 7019 7020 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 7021 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 7022 7023 else if(r_read_fsm.read() == READ_DIR_REQ) 7024 r_alloc_dir_fsm = ALLOC_DIR_READ; 7025 7026 else if(r_write_fsm.read() == WRITE_DIR_REQ) 7027 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 7028 7029 else if(r_cas_fsm.read() == CAS_DIR_REQ) 7030 r_alloc_dir_fsm = ALLOC_DIR_CAS; 7031 } 7032 break; 7033 7034 //////////////////////// 7035 case ALLOC_DIR_XRAM_RSP: // allocated to XRAM_RSP FSM 7036 if( (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) and 7037 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) and 7038 (r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK)) 7039 { 7040 if(r_config_fsm.read() == CONFIG_DIR_REQ) 7041 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 7042 7043 else if(r_read_fsm.read() == READ_DIR_REQ) 7044 r_alloc_dir_fsm = ALLOC_DIR_READ; 7045 7046 else if(r_write_fsm.read() == WRITE_DIR_REQ) 7047 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 7048 7049 else if(r_cas_fsm.read() == CAS_DIR_REQ) 7050 r_alloc_dir_fsm = ALLOC_DIR_CAS; 7051 7052 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 7053 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 7054 } 7055 break; 7056 7057 } // end switch alloc_dir_fsm 7058 7059 //////////////////////////////////////////////////////////////////////////////////// 7060 // ALLOC_TRT FSM 7061 //////////////////////////////////////////////////////////////////////////////////// 7062 // The ALLOC_TRT fsm allocates the access to the Transaction Table (write buffer) 7063 // with a round robin priority between 7 user FSMs : 7064 // The priority is READ > WRITE > CAS > IXR_CMD > XRAM_RSP > IXR_RSP > CONFIG 7065 // The ressource is always allocated. 7066 /////////////////////////////////////////////////////////////////////////////////// 7067 7068 //std::cout << std::endl << "alloc_trt_fsm" << std::endl; 7069 7070 switch(r_alloc_trt_fsm.read()) 7071 { 7072 //////////////////// 7073 case ALLOC_TRT_READ: 7074 if(r_read_fsm.read() != READ_TRT_LOCK) 7075 { 7076 if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7077 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7078 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7079 7080 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7081 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7082 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7083 7084 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7085 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7086 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7087 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7088 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7089 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7090 7091 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7092 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7093 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7094 7095 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7096 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7097 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7098 7099 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7100 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7101 } 7102 break; 7103 7104 ///////////////////// 7105 case ALLOC_TRT_WRITE: 7106 if((r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 7107 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 7108 (r_write_fsm.read() != WRITE_BC_IVT_LOCK)) 7109 { 7110 if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7111 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7112 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7113 7114 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7115 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7116 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7117 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7118 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7119 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7120 7121 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7122 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7123 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7124 7125 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7126 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7127 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7128 7129 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7130 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7131 7132 else if(r_read_fsm.read() == READ_TRT_LOCK) 7133 r_alloc_trt_fsm = ALLOC_TRT_READ; 7134 } 7135 break; 7136 7137 /////////////////// 7138 case ALLOC_TRT_CAS: 7139 if((r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 7140 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 7141 (r_cas_fsm.read() != CAS_BC_IVT_LOCK)) 7142 { 7143 if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7144 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7145 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7146 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7147 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7148 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7149 7150 if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7151 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7152 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7153 7154 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7155 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7156 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7157 7158 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7159 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7160 7161 else if(r_read_fsm.read() == READ_TRT_LOCK) 7162 r_alloc_trt_fsm = ALLOC_TRT_READ; 7163 7164 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7165 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7166 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7167 } 7168 break; 7169 7170 /////////////////////// 7171 case ALLOC_TRT_IXR_CMD: 7172 if((r_ixr_cmd_fsm.read() != IXR_CMD_READ_TRT) and 7173 (r_ixr_cmd_fsm.read() != IXR_CMD_WRITE_TRT) and 7174 (r_ixr_cmd_fsm.read() != IXR_CMD_CAS_TRT) and 7175 (r_ixr_cmd_fsm.read() != IXR_CMD_XRAM_TRT) and 7176 (r_ixr_cmd_fsm.read() != IXR_CMD_CONFIG_TRT)) 7177 { 7178 if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7179 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7180 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7181 7182 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7183 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7184 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7185 7186 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7187 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7188 7189 else if(r_read_fsm.read() == READ_TRT_LOCK) 7190 r_alloc_trt_fsm = ALLOC_TRT_READ; 7191 7192 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7193 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7194 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7195 7196 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7197 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7198 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7199 } 7200 break; 7201 7202 //////////////////////// 7203 case ALLOC_TRT_XRAM_RSP: 7204 if(((r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) or 7205 (r_alloc_dir_fsm.read() != ALLOC_DIR_XRAM_RSP)) and 7206 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) and 7207 (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_UPDT) and 7208 (r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK)) 7209 { 7210 if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7211 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7212 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7213 7214 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7215 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7216 7217 else if(r_read_fsm.read() == READ_TRT_LOCK) 7218 r_alloc_trt_fsm = ALLOC_TRT_READ; 7219 7220 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7221 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7222 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7223 7224 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7225 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7226 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7227 7228 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7229 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7230 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7231 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7232 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7233 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7234 7235 } 7236 break; 7237 7238 /////////////////////// 7239 case ALLOC_TRT_IXR_RSP: 7240 if((r_ixr_rsp_fsm.read() != IXR_RSP_TRT_ERASE) and 7241 (r_ixr_rsp_fsm.read() != IXR_RSP_TRT_READ)) 7242 { 7243 if(r_config_fsm.read() == CONFIG_TRT_LOCK) 7244 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7245 7246 else if(r_read_fsm.read() == READ_TRT_LOCK) 7247 r_alloc_trt_fsm = ALLOC_TRT_READ; 7248 7249 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7250 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7251 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7252 7253 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7254 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7255 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7256 7257 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7258 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7259 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7260 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7261 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7262 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7263 7264 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7265 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7266 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7267 } 7268 break; 7269 7270 ////////////////////// 7271 case ALLOC_TRT_CONFIG: 7272 if((r_config_fsm.read() != CONFIG_TRT_LOCK) and 7273 (r_config_fsm.read() != CONFIG_TRT_SET)) 7274 { 7275 if(r_read_fsm.read() == READ_TRT_LOCK) 7276 r_alloc_trt_fsm = ALLOC_TRT_READ; 7277 7278 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7279 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7280 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7281 7282 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7283 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7284 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7285 7286 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7287 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7288 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7289 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7290 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7291 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7292 7293 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7294 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7295 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7296 7297 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7298 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7299 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7300 } 7301 break; 7302 7303 } // end switch alloc_trt_fsm 7304 7305 //////////////////////////////////////////////////////////////////////////////////// 7306 // ALLOC_HEAP FSM 7307 //////////////////////////////////////////////////////////////////////////////////// 7308 // The ALLOC_HEAP FSM allocates the access to the heap 7309 // with a round robin priority between 6 user FSMs : 7310 // The cyclic ordering is READ > WRITE > CAS > CLEANUP > XRAM_RSP > CONFIG 7311 // The ressource is always allocated. 7312 ///////////////////////////////////////////////////////////////////////////////////// 7313 7314 //std::cout << std::endl << "alloc_heap_fsm" << std::endl; 7315 7316 switch(r_alloc_heap_fsm.read()) 7317 { 7318 //////////////////// 7319 case ALLOC_HEAP_RESET: 7320 // Initializes the heap one ENTRY each cycle. 7321 7322 r_alloc_heap_reset_cpt.write(r_alloc_heap_reset_cpt.read() + 1); 7323 7324 if(r_alloc_heap_reset_cpt.read() == (m_heap_size-1)) 7325 { 7326 m_heap.init(); 7327 7328 r_alloc_heap_fsm = ALLOC_HEAP_READ; 7329 } 7330 break; 7331 7332 //////////////////// 7333 case ALLOC_HEAP_READ: 7334 if((r_read_fsm.read() != READ_HEAP_REQ) and 7335 (r_read_fsm.read() != READ_HEAP_LOCK) and 7336 (r_read_fsm.read() != READ_HEAP_ERASE)) 7337 { 7338 if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 7339 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 7340 7341 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 7342 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 7343 7344 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 7345 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 7346 7347 else if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 7348 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 7349 7350 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 7351 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 7352 } 7353 break; 7354 7355 ///////////////////// 7356 case ALLOC_HEAP_WRITE: 7357 if((r_write_fsm.read() != WRITE_UPT_HEAP_LOCK) and 7358 (r_write_fsm.read() != WRITE_UPT_REQ) and 7359 (r_write_fsm.read() != WRITE_UPT_NEXT)) 7360 { 7361 if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 7362 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 7363 7364 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 7365 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 7366 7367 else if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 7368 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 7369 7370 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 7371 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 7372 7373 else if(r_read_fsm.read() == READ_HEAP_REQ) 7374 r_alloc_heap_fsm = ALLOC_HEAP_READ; 7375 } 7376 break; 7377 7378 //////////////////// 7379 case ALLOC_HEAP_CAS: 7380 if((r_cas_fsm.read() != CAS_UPT_HEAP_LOCK) and 7381 (r_cas_fsm.read() != CAS_UPT_REQ) and 7382 (r_cas_fsm.read() != CAS_UPT_NEXT)) 7383 { 7384 if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 7385 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 7386 7387 else if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 7388 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 7389 7390 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 7391 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 7392 7393 else if(r_read_fsm.read() == READ_HEAP_REQ) 7394 r_alloc_heap_fsm = ALLOC_HEAP_READ; 7395 7396 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 7397 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 7398 } 7399 break; 7400 7401 /////////////////////// 7402 case ALLOC_HEAP_CLEANUP: 7403 if((r_cleanup_fsm.read() != CLEANUP_HEAP_REQ) and 7404 (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK) and 7405 (r_cleanup_fsm.read() != CLEANUP_HEAP_SEARCH) and 7406 (r_cleanup_fsm.read() != CLEANUP_HEAP_CLEAN)) 7407 { 7408 if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 7409 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 7410 7411 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 7412 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 7413 7414 else if(r_read_fsm.read() == READ_HEAP_REQ) 7415 r_alloc_heap_fsm = ALLOC_HEAP_READ; 7416 7417 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 7418 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 7419 7420 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 7421 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 7422 } 7423 break; 7424 7425 //////////////////////// 7426 case ALLOC_HEAP_XRAM_RSP: 7427 if((r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_REQ) and 7428 (r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_ERASE)) 7429 { 7430 if(r_config_fsm.read() == CONFIG_HEAP_REQ) 7431 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 7432 7433 else if(r_read_fsm.read() == READ_HEAP_REQ) 7434 r_alloc_heap_fsm = ALLOC_HEAP_READ; 7435 7436 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 7437 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 7438 7439 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 7440 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 7441 7442 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 7443 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 7444 7445 } 7446 break; 7447 7448 /////////////////////// 7449 case ALLOC_HEAP_CONFIG: 7450 if((r_config_fsm.read() != CONFIG_HEAP_REQ) and 7451 (r_config_fsm.read() != CONFIG_HEAP_SCAN)) 7452 { 7453 if(r_read_fsm.read() == READ_HEAP_REQ) 7454 r_alloc_heap_fsm = ALLOC_HEAP_READ; 7455 7456 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 7457 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 7458 7459 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 7460 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 7461 7462 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 7463 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 7464 7465 if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 7466 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 7467 } 7468 break; 7469 7470 } // end switch alloc_heap_fsm 7471 7472 //std::cout << std::endl << "fifo_update" << std::endl; 7473 7474 ///////////////////////////////////////////////////////////////////// 7475 // TGT_CMD to READ FIFO 7476 ///////////////////////////////////////////////////////////////////// 7477 7478 m_cmd_read_addr_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 7479 p_vci_tgt.address.read() ); 7480 m_cmd_read_length_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 7481 p_vci_tgt.plen.read()>>2 ); 7482 m_cmd_read_srcid_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 7483 p_vci_tgt.srcid.read() ); 7484 m_cmd_read_trdid_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 7485 p_vci_tgt.trdid.read() ); 7486 m_cmd_read_pktid_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 7487 p_vci_tgt.pktid.read() ); 7488 7489 ///////////////////////////////////////////////////////////////////// 7490 // TGT_CMD to WRITE FIFO 7491 ///////////////////////////////////////////////////////////////////// 7492 7493 m_cmd_write_addr_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 7494 (addr_t)p_vci_tgt.address.read() ); 7495 m_cmd_write_eop_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 7496 p_vci_tgt.eop.read() ); 7497 m_cmd_write_srcid_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 7498 p_vci_tgt.srcid.read() ); 7499 m_cmd_write_trdid_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 7500 p_vci_tgt.trdid.read() ); 7501 m_cmd_write_pktid_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 7502 p_vci_tgt.pktid.read() ); 7503 m_cmd_write_data_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 7504 p_vci_tgt.wdata.read() ); 7505 m_cmd_write_be_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 7506 p_vci_tgt.be.read() ); 7507 7508 //////////////////////////////////////////////////////////////////////////////////// 7509 // TGT_CMD to CAS FIFO 7510 //////////////////////////////////////////////////////////////////////////////////// 7511 7512 m_cmd_cas_addr_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 7513 (addr_t)p_vci_tgt.address.read() ); 7514 m_cmd_cas_eop_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 7515 p_vci_tgt.eop.read() ); 7516 m_cmd_cas_srcid_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 7517 p_vci_tgt.srcid.read() ); 7518 m_cmd_cas_trdid_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 7519 p_vci_tgt.trdid.read() ); 7520 m_cmd_cas_pktid_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 7521 p_vci_tgt.pktid.read() ); 7522 m_cmd_cas_wdata_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 7523 p_vci_tgt.wdata.read() ); 7524 7525 //////////////////////////////////////////////////////////////////////////////////// 7526 // CC_RECEIVE to CLEANUP FIFO 7527 //////////////////////////////////////////////////////////////////////////////////// 7528 7529 m_cc_receive_to_cleanup_fifo.update( cc_receive_to_cleanup_fifo_get, 7530 cc_receive_to_cleanup_fifo_put, 7531 p_dspin_p2m.data.read() ); 7532 7533 //////////////////////////////////////////////////////////////////////////////////// 7534 // CC_RECEIVE to MULTI_ACK FIFO 7535 //////////////////////////////////////////////////////////////////////////////////// 7536 7537 m_cc_receive_to_multi_ack_fifo.update( cc_receive_to_multi_ack_fifo_get, 7538 cc_receive_to_multi_ack_fifo_put, 7539 p_dspin_p2m.data.read() ); 7540 7541 //////////////////////////////////////////////////////////////////////////////////// 7542 // WRITE to CC_SEND FIFO 7543 //////////////////////////////////////////////////////////////////////////////////// 7544 7545 m_write_to_cc_send_inst_fifo.update( write_to_cc_send_fifo_get, 7546 write_to_cc_send_fifo_put, 7547 write_to_cc_send_fifo_inst ); 7548 m_write_to_cc_send_srcid_fifo.update( write_to_cc_send_fifo_get, 7549 write_to_cc_send_fifo_put, 7550 write_to_cc_send_fifo_srcid ); 7551 7552 //////////////////////////////////////////////////////////////////////////////////// 7553 // CONFIG to CC_SEND FIFO 7554 //////////////////////////////////////////////////////////////////////////////////// 7555 7556 m_config_to_cc_send_inst_fifo.update( config_to_cc_send_fifo_get, 7557 config_to_cc_send_fifo_put, 7558 config_to_cc_send_fifo_inst ); 7559 m_config_to_cc_send_srcid_fifo.update( config_to_cc_send_fifo_get, 7560 config_to_cc_send_fifo_put, 7561 config_to_cc_send_fifo_srcid ); 7562 7563 //////////////////////////////////////////////////////////////////////////////////// 7564 // XRAM_RSP to CC_SEND FIFO 7565 //////////////////////////////////////////////////////////////////////////////////// 7566 7567 m_xram_rsp_to_cc_send_inst_fifo.update( xram_rsp_to_cc_send_fifo_get, 7568 xram_rsp_to_cc_send_fifo_put, 7569 xram_rsp_to_cc_send_fifo_inst ); 7570 m_xram_rsp_to_cc_send_srcid_fifo.update( xram_rsp_to_cc_send_fifo_get, 7571 xram_rsp_to_cc_send_fifo_put, 7572 xram_rsp_to_cc_send_fifo_srcid ); 7573 7574 //////////////////////////////////////////////////////////////////////////////////// 7575 // CAS to CC_SEND FIFO 7576 //////////////////////////////////////////////////////////////////////////////////// 7577 7578 m_cas_to_cc_send_inst_fifo.update( cas_to_cc_send_fifo_get, 7579 cas_to_cc_send_fifo_put, 7580 cas_to_cc_send_fifo_inst ); 7581 m_cas_to_cc_send_srcid_fifo.update( cas_to_cc_send_fifo_get, 7582 cas_to_cc_send_fifo_put, 7583 cas_to_cc_send_fifo_srcid ); 7584 m_cpt_cycles++; 7585 7586 } // end transition() 7587 7588 ///////////////////////////// 7589 tmpl(void)::genMoore() 7590 ///////////////////////////// 7591 { 7592 //////////////////////////////////////////////////////////// 7593 // Command signals on the p_vci_ixr port 7594 //////////////////////////////////////////////////////////// 7595 7596 // DATA width is 8 bytes 7597 // The following values are not transmitted to XRAM 7598 // p_vci_ixr.be 7599 // p_vci_ixr.pktid 7600 // p_vci_ixr.cons 7601 // p_vci_ixr.wrap 7602 // p_vci_ixr.contig 7603 // p_vci_ixr.clen 7604 // p_vci_ixr.cfixed 7605 7606 p_vci_ixr.plen = 64; 7607 p_vci_ixr.srcid = m_srcid_x; 7608 p_vci_ixr.trdid = r_ixr_cmd_trdid.read(); 7609 p_vci_ixr.address = (addr_t)r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2); 7610 p_vci_ixr.be = 0xFF; 7611 p_vci_ixr.pktid = 0; 7612 p_vci_ixr.cons = false; 7613 p_vci_ixr.wrap = false; 7614 p_vci_ixr.contig = true; 7615 p_vci_ixr.clen = 0; 7616 p_vci_ixr.cfixed = false; 7617 7618 if ( (r_ixr_cmd_fsm.read() == IXR_CMD_READ_SEND) or 7619 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_SEND) or 7620 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_SEND) or 7621 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_SEND) or 7622 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_SEND) ) 7623 { 7624 p_vci_ixr.cmdval = true; 7625 7626 if ( r_ixr_cmd_get.read() ) // GET 7627 { 7628 p_vci_ixr.cmd = vci_param_ext::CMD_READ; 7629 p_vci_ixr.wdata = 0; 7630 p_vci_ixr.eop = true; 7631 } 7632 else // PUT 7633 { 7634 size_t word = r_ixr_cmd_word.read(); 7635 p_vci_ixr.cmd = vci_param_ext::CMD_WRITE; 7636 p_vci_ixr.wdata = ((wide_data_t)(r_ixr_cmd_wdata[word].read())) | 7637 ((wide_data_t)(r_ixr_cmd_wdata[word+1].read()) << 32); 7638 p_vci_ixr.eop = (word == (m_words-2)); 7639 } 7640 } 7641 else 7642 { 7643 p_vci_ixr.cmdval = false; 7644 } 7645 7646 //////////////////////////////////////////////////// 7647 // Response signals on the p_vci_ixr port 7648 //////////////////////////////////////////////////// 7649 7650 if( (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ) or 7651 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) ) 7652 { 7653 p_vci_ixr.rspack = (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP); 7654 } 7655 else // r_ixr_rsp_fsm == IXR_RSP_IDLE 7656 { 7657 p_vci_ixr.rspack = false; 7658 } 7659 7660 //////////////////////////////////////////////////// 7661 // Command signals on the p_vci_tgt port 7662 //////////////////////////////////////////////////// 7663 7664 switch((tgt_cmd_fsm_state_e) r_tgt_cmd_fsm.read()) 7665 { 7666 case TGT_CMD_IDLE: 7667 p_vci_tgt.cmdack = false; 7668 break; 7669 7670 case TGT_CMD_CONFIG: 7671 case TGT_CMD_ERROR: 7672 p_vci_tgt.cmdack = not r_tgt_cmd_to_tgt_rsp_req.read(); 7673 break; 7674 7675 case TGT_CMD_READ: 7676 p_vci_tgt.cmdack = m_cmd_read_addr_fifo.wok(); 7677 break; 7678 7679 case TGT_CMD_WRITE: 7680 p_vci_tgt.cmdack = m_cmd_write_addr_fifo.wok(); 7681 break; 7682 7683 case TGT_CMD_CAS: 7684 p_vci_tgt.cmdack = m_cmd_cas_addr_fifo.wok(); 7685 break; 7686 } 7687 7688 //////////////////////////////////////////////////// 7689 // Response signals on the p_vci_tgt port 7690 //////////////////////////////////////////////////// 7691 7692 switch(r_tgt_rsp_fsm.read()) 7693 { 7694 case TGT_RSP_CONFIG_IDLE: 7695 case TGT_RSP_TGT_CMD_IDLE: 7696 case TGT_RSP_READ_IDLE: 7697 case TGT_RSP_WRITE_IDLE: 7698 case TGT_RSP_CAS_IDLE: 7699 case TGT_RSP_XRAM_IDLE: 7700 case TGT_RSP_MULTI_ACK_IDLE: 7701 case TGT_RSP_CLEANUP_IDLE: 7702 { 7703 p_vci_tgt.rspval = false; 7704 p_vci_tgt.rsrcid = 0; 7705 p_vci_tgt.rdata = 0; 7706 p_vci_tgt.rpktid = 0; 7707 p_vci_tgt.rtrdid = 0; 7708 p_vci_tgt.rerror = 0; 7709 p_vci_tgt.reop = false; 7710 break; 7711 } 7712 case TGT_RSP_CONFIG: 7713 { 7714 p_vci_tgt.rspval = true; 7715 p_vci_tgt.rdata = 0; 7716 p_vci_tgt.rsrcid = r_config_to_tgt_rsp_srcid.read(); 7717 p_vci_tgt.rtrdid = r_config_to_tgt_rsp_trdid.read(); 7718 p_vci_tgt.rpktid = r_config_to_tgt_rsp_pktid.read(); 7719 p_vci_tgt.rerror = r_config_to_tgt_rsp_error.read(); 7720 p_vci_tgt.reop = true; 7721 7722 break; 7723 } 7724 case TGT_RSP_TGT_CMD: 7725 { 7726 p_vci_tgt.rspval = true; 7727 p_vci_tgt.rdata = r_tgt_cmd_to_tgt_rsp_rdata.read(); 7728 p_vci_tgt.rsrcid = r_tgt_cmd_to_tgt_rsp_srcid.read(); 7729 p_vci_tgt.rtrdid = r_tgt_cmd_to_tgt_rsp_trdid.read(); 7730 p_vci_tgt.rpktid = r_tgt_cmd_to_tgt_rsp_pktid.read(); 7731 p_vci_tgt.rerror = r_tgt_cmd_to_tgt_rsp_error.read(); 7732 p_vci_tgt.reop = true; 7733 7734 break; 7735 } 7736 case TGT_RSP_READ: 7737 { 7738 uint32_t last_word_idx = r_read_to_tgt_rsp_word.read() + r_read_to_tgt_rsp_length - 1; 7739 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 7740 bool is_ll = ((r_read_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 7741 7742 p_vci_tgt.rspval = true; 7743 7744 if ( is_ll and not r_tgt_rsp_key_sent.read() ) 7745 { 7746 // LL response first flit 7747 p_vci_tgt.rdata = r_read_to_tgt_rsp_ll_key.read(); 7748 } 7749 else 7750 { 7751 // LL response second flit or READ response 7752 p_vci_tgt.rdata = r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 7753 } 7754 7755 p_vci_tgt.rsrcid = r_read_to_tgt_rsp_srcid.read(); 7756 p_vci_tgt.rtrdid = r_read_to_tgt_rsp_trdid.read(); 7757 p_vci_tgt.rpktid = r_read_to_tgt_rsp_pktid.read(); 7758 p_vci_tgt.rerror = 0; 7759 p_vci_tgt.reop = (is_last_word and not is_ll) or (r_tgt_rsp_key_sent.read() and is_ll); 7760 break; 7761 } 7762 7763 case TGT_RSP_WRITE: 7764 p_vci_tgt.rspval = true; 7765 if(((r_write_to_tgt_rsp_pktid.read() & 0x7) == TYPE_SC) and r_write_to_tgt_rsp_sc_fail.read()) 7766 p_vci_tgt.rdata = 1; 7767 else 7768 p_vci_tgt.rdata = 0; 7769 p_vci_tgt.rsrcid = r_write_to_tgt_rsp_srcid.read(); 7770 p_vci_tgt.rtrdid = r_write_to_tgt_rsp_trdid.read(); 7771 p_vci_tgt.rpktid = r_write_to_tgt_rsp_pktid.read(); 7772 p_vci_tgt.rerror = 0; 7773 p_vci_tgt.reop = true; 7774 break; 7775 7776 case TGT_RSP_CLEANUP: 7777 p_vci_tgt.rspval = true; 7778 p_vci_tgt.rdata = 0; 7779 p_vci_tgt.rsrcid = r_cleanup_to_tgt_rsp_srcid.read(); 7780 p_vci_tgt.rtrdid = r_cleanup_to_tgt_rsp_trdid.read(); 7781 p_vci_tgt.rpktid = r_cleanup_to_tgt_rsp_pktid.read(); 7782 p_vci_tgt.rerror = 0; // Can be a CAS rsp 7783 p_vci_tgt.reop = true; 7784 break; 7785 7786 case TGT_RSP_CAS: 7787 p_vci_tgt.rspval = true; 7788 p_vci_tgt.rdata = r_cas_to_tgt_rsp_data.read(); 7789 p_vci_tgt.rsrcid = r_cas_to_tgt_rsp_srcid.read(); 7790 p_vci_tgt.rtrdid = r_cas_to_tgt_rsp_trdid.read(); 7791 p_vci_tgt.rpktid = r_cas_to_tgt_rsp_pktid.read(); 7792 p_vci_tgt.rerror = 0; 7793 p_vci_tgt.reop = true; 7794 break; 7795 7796 case TGT_RSP_XRAM: 7797 { 7798 uint32_t last_word_idx = r_xram_rsp_to_tgt_rsp_word.read() + r_xram_rsp_to_tgt_rsp_length.read() - 1; 7799 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 7800 bool is_ll = ((r_xram_rsp_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 7801 bool is_error = r_xram_rsp_to_tgt_rsp_rerror.read(); 7802 7803 p_vci_tgt.rspval = true; 7804 7805 if( is_ll and not r_tgt_rsp_key_sent.read() ) { 7806 // LL response first flit 7807 p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_ll_key.read(); 7808 } 7809 else { 7810 // LL response second flit or READ response 7811 p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 7812 } 7813 7814 p_vci_tgt.rsrcid = r_xram_rsp_to_tgt_rsp_srcid.read(); 7815 p_vci_tgt.rtrdid = r_xram_rsp_to_tgt_rsp_trdid.read(); 7816 p_vci_tgt.rpktid = r_xram_rsp_to_tgt_rsp_pktid.read(); 7817 p_vci_tgt.rerror = is_error; 7818 p_vci_tgt.reop = (((is_last_word or is_error) and not is_ll) or 7819 (r_tgt_rsp_key_sent.read() and is_ll)); 7820 break; 7821 } 7822 7823 case TGT_RSP_MULTI_ACK: 7824 p_vci_tgt.rspval = true; 7825 p_vci_tgt.rdata = 0; // Can be a CAS or SC rsp 7826 p_vci_tgt.rsrcid = r_multi_ack_to_tgt_rsp_srcid.read(); 7827 p_vci_tgt.rtrdid = r_multi_ack_to_tgt_rsp_trdid.read(); 7828 p_vci_tgt.rpktid = r_multi_ack_to_tgt_rsp_pktid.read(); 7829 p_vci_tgt.rerror = 0; 7830 p_vci_tgt.reop = true; 7831 break; 7832 } // end switch r_tgt_rsp_fsm 7833 7834 //////////////////////////////////////////////////////////////////// 7835 // p_dspin_m2p port (CC_SEND FSM) 7836 //////////////////////////////////////////////////////////////////// 7837 7838 p_dspin_m2p.write = false; 7839 p_dspin_m2p.eop = false; 7840 p_dspin_m2p.data = 0; 7841 7842 switch(r_cc_send_fsm.read()) 7843 { 7844 /////////////////////////// 7845 case CC_SEND_CONFIG_IDLE: 7846 case CC_SEND_XRAM_RSP_IDLE: 7847 case CC_SEND_WRITE_IDLE: 7848 case CC_SEND_CAS_IDLE: 7849 { 7850 break; 7851 } 7852 //////////////////////////////// 7853 case CC_SEND_CONFIG_INVAL_HEADER: 7854 { 7855 uint8_t multi_inval_type; 7856 if(m_config_to_cc_send_inst_fifo.read()) 7857 { 7858 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_INST; 7859 } 7860 else 7861 { 7862 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_DATA; 7863 } 7864 7865 uint64_t flit = 0; 7866 uint64_t dest = m_config_to_cc_send_srcid_fifo.read() << 7867 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 7868 7869 DspinDhccpParam::dspin_set( flit, 7870 dest, 7871 DspinDhccpParam::MULTI_INVAL_DEST); 7872 7873 DspinDhccpParam::dspin_set( flit, 7874 m_cc_global_id, 7875 DspinDhccpParam::MULTI_INVAL_SRCID); 7876 7877 DspinDhccpParam::dspin_set( flit, 7878 r_config_to_cc_send_trdid.read(), 7879 DspinDhccpParam::MULTI_INVAL_UPDT_INDEX); 7880 7881 DspinDhccpParam::dspin_set( flit, 7882 multi_inval_type, 7883 DspinDhccpParam::M2P_TYPE); 7884 p_dspin_m2p.write = true; 7885 p_dspin_m2p.data = flit; 7886 break; 7887 } 7888 //////////////////////////////// 7889 case CC_SEND_CONFIG_INVAL_NLINE: 7890 { 7891 uint64_t flit = 0; 7892 DspinDhccpParam::dspin_set( flit, 7893 r_config_to_cc_send_nline.read(), 7894 DspinDhccpParam::MULTI_INVAL_NLINE); 7895 p_dspin_m2p.eop = true; 7896 p_dspin_m2p.write = true; 7897 p_dspin_m2p.data = flit; 7898 break; 7899 } 7900 /////////////////////////////////// 7901 case CC_SEND_XRAM_RSP_INVAL_HEADER: 7902 { 7903 if(not m_xram_rsp_to_cc_send_inst_fifo.rok()) break; 7904 7905 uint8_t multi_inval_type; 7906 if(m_xram_rsp_to_cc_send_inst_fifo.read()) 7907 { 7908 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_INST; 7909 } 7910 else 7911 { 7912 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_DATA; 7913 } 7914 7915 uint64_t flit = 0; 7916 uint64_t dest = m_xram_rsp_to_cc_send_srcid_fifo.read() << 7917 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 7918 7919 DspinDhccpParam::dspin_set( flit, 7920 dest, 7921 DspinDhccpParam::MULTI_INVAL_DEST); 7922 7923 DspinDhccpParam::dspin_set( flit, 7924 m_cc_global_id, 7925 DspinDhccpParam::MULTI_INVAL_SRCID); 7926 7927 DspinDhccpParam::dspin_set( flit, 7928 r_xram_rsp_to_cc_send_trdid.read(), 7929 DspinDhccpParam::MULTI_INVAL_UPDT_INDEX); 7930 7931 DspinDhccpParam::dspin_set( flit, 7932 multi_inval_type, 7933 DspinDhccpParam::M2P_TYPE); 7934 p_dspin_m2p.write = true; 7935 p_dspin_m2p.data = flit; 7936 break; 7937 } 7938 7939 ////////////////////////////////// 7940 case CC_SEND_XRAM_RSP_INVAL_NLINE: 7941 { 7942 uint64_t flit = 0; 7943 7944 DspinDhccpParam::dspin_set( flit, 7945 r_xram_rsp_to_cc_send_nline.read(), 7946 DspinDhccpParam::MULTI_INVAL_NLINE); 7947 p_dspin_m2p.eop = true; 7948 p_dspin_m2p.write = true; 7949 p_dspin_m2p.data = flit; 7950 break; 7951 } 7952 7953 ///////////////////////////////////// 7954 case CC_SEND_CONFIG_BRDCAST_HEADER: 7955 case CC_SEND_XRAM_RSP_BRDCAST_HEADER: 7956 case CC_SEND_WRITE_BRDCAST_HEADER: 7957 case CC_SEND_CAS_BRDCAST_HEADER: 7958 { 7959 uint64_t flit = 0; 7960 7961 DspinDhccpParam::dspin_set( flit, 7962 m_broadcast_boundaries, 7963 DspinDhccpParam::BROADCAST_BOX); 7964 7965 DspinDhccpParam::dspin_set( flit, 7966 m_cc_global_id, 7967 DspinDhccpParam::BROADCAST_SRCID); 7968 7969 DspinDhccpParam::dspin_set( flit, 7970 1ULL, 7971 DspinDhccpParam::M2P_BC); 7972 p_dspin_m2p.write = true; 7973 p_dspin_m2p.data = flit; 7974 break; 7975 } 7976 //////////////////////////////////// 7977 case CC_SEND_XRAM_RSP_BRDCAST_NLINE: 7978 { 7979 uint64_t flit = 0; 7980 DspinDhccpParam::dspin_set( flit, 7981 r_xram_rsp_to_cc_send_nline.read(), 7982 DspinDhccpParam::BROADCAST_NLINE); 7983 p_dspin_m2p.write = true; 7984 p_dspin_m2p.eop = true; 7985 p_dspin_m2p.data = flit; 7986 break; 7987 } 7988 ////////////////////////////////// 7989 case CC_SEND_CONFIG_BRDCAST_NLINE: 7990 { 7991 uint64_t flit = 0; 7992 DspinDhccpParam::dspin_set( flit, 7993 r_config_to_cc_send_nline.read(), 7994 DspinDhccpParam::BROADCAST_NLINE); 7995 p_dspin_m2p.write = true; 7996 p_dspin_m2p.eop = true; 7997 p_dspin_m2p.data = flit; 7998 break; 7999 } 8000 ///////////////////////////////// 8001 case CC_SEND_WRITE_BRDCAST_NLINE: 8002 { 8003 uint64_t flit = 0; 8004 DspinDhccpParam::dspin_set( flit, 8005 r_write_to_cc_send_nline.read(), 8006 DspinDhccpParam::BROADCAST_NLINE); 8007 p_dspin_m2p.write = true; 8008 p_dspin_m2p.eop = true; 8009 p_dspin_m2p.data = flit; 8010 break; 8011 } 8012 /////////////////////////////// 8013 case CC_SEND_CAS_BRDCAST_NLINE: 8014 { 8015 uint64_t flit = 0; 8016 DspinDhccpParam::dspin_set( flit, 8017 r_cas_to_cc_send_nline.read(), 8018 DspinDhccpParam::BROADCAST_NLINE); 8019 p_dspin_m2p.write = true; 8020 p_dspin_m2p.eop = true; 8021 p_dspin_m2p.data = flit; 8022 break; 8023 } 8024 /////////////////////////////// 8025 case CC_SEND_WRITE_UPDT_HEADER: 8026 { 8027 if(not m_write_to_cc_send_inst_fifo.rok()) break; 8028 8029 uint8_t multi_updt_type; 8030 if(m_write_to_cc_send_inst_fifo.read()) 8031 { 8032 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_INST; 8033 } 8034 else 8035 { 8036 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_DATA; 8037 } 8038 8039 uint64_t flit = 0; 8040 uint64_t dest = 8041 m_write_to_cc_send_srcid_fifo.read() << 8042 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 8043 8044 DspinDhccpParam::dspin_set( 8045 flit, 8046 dest, 8047 DspinDhccpParam::MULTI_UPDT_DEST); 8048 8049 DspinDhccpParam::dspin_set( 8050 flit, 8051 m_cc_global_id, 8052 DspinDhccpParam::MULTI_UPDT_SRCID); 8053 8054 DspinDhccpParam::dspin_set( 8055 flit, 8056 r_write_to_cc_send_trdid.read(), 8057 DspinDhccpParam::MULTI_UPDT_UPDT_INDEX); 8058 8059 DspinDhccpParam::dspin_set( 8060 flit, 8061 multi_updt_type, 8062 DspinDhccpParam::M2P_TYPE); 8063 8064 p_dspin_m2p.write = true; 8065 p_dspin_m2p.data = flit; 8066 8067 break; 8068 } 8069 ////////////////////////////// 8070 case CC_SEND_WRITE_UPDT_NLINE: 8071 { 8072 uint64_t flit = 0; 8073 8074 DspinDhccpParam::dspin_set( 8075 flit, 8076 r_write_to_cc_send_index.read(), 8077 DspinDhccpParam::MULTI_UPDT_WORD_INDEX); 8078 8079 DspinDhccpParam::dspin_set( 8080 flit, 8081 r_write_to_cc_send_nline.read(), 8082 DspinDhccpParam::MULTI_UPDT_NLINE); 8083 8084 p_dspin_m2p.write = true; 8085 p_dspin_m2p.data = flit; 8086 8087 break; 8088 } 8089 ///////////////////////////// 8090 case CC_SEND_WRITE_UPDT_DATA: 8091 { 8092 8093 uint8_t multi_updt_cpt = 8094 r_cc_send_cpt.read() + r_write_to_cc_send_index.read(); 8095 8096 uint8_t multi_updt_be = r_write_to_cc_send_be[multi_updt_cpt].read(); 8097 uint32_t multi_updt_data = r_write_to_cc_send_data[multi_updt_cpt].read(); 8098 8099 uint64_t flit = 0; 8100 8101 DspinDhccpParam::dspin_set( 8102 flit, 8103 multi_updt_be, 8104 DspinDhccpParam::MULTI_UPDT_BE); 8105 8106 DspinDhccpParam::dspin_set( 8107 flit, 8108 multi_updt_data, 8109 DspinDhccpParam::MULTI_UPDT_DATA); 8110 8111 p_dspin_m2p.write = true; 8112 p_dspin_m2p.eop = (r_cc_send_cpt.read() == (r_write_to_cc_send_count.read()-1)); 8113 p_dspin_m2p.data = flit; 8114 8115 break; 8116 } 8117 //////////////////////////// 8118 case CC_SEND_CAS_UPDT_HEADER: 8119 { 8120 if (not m_cas_to_cc_send_inst_fifo.rok()) break; 8121 8122 uint8_t multi_updt_type; 8123 if(m_cas_to_cc_send_inst_fifo.read()) 8124 { 8125 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_INST; 8126 } 8127 else 8128 { 8129 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_DATA; 8130 } 8131 8132 uint64_t flit = 0; 8133 uint64_t dest = 8134 m_cas_to_cc_send_srcid_fifo.read() << 8135 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 8136 8137 DspinDhccpParam::dspin_set( 8138 flit, 8139 dest, 8140 DspinDhccpParam::MULTI_UPDT_DEST); 8141 8142 DspinDhccpParam::dspin_set( 8143 flit, 8144 m_cc_global_id, 8145 DspinDhccpParam::MULTI_UPDT_SRCID); 8146 8147 DspinDhccpParam::dspin_set( 8148 flit, 8149 r_cas_to_cc_send_trdid.read(), 8150 DspinDhccpParam::MULTI_UPDT_UPDT_INDEX); 8151 8152 DspinDhccpParam::dspin_set( 8153 flit, 8154 multi_updt_type, 8155 DspinDhccpParam::M2P_TYPE); 8156 8157 p_dspin_m2p.write = true; 8158 p_dspin_m2p.data = flit; 8159 8160 break; 8161 } 8162 //////////////////////////// 8163 case CC_SEND_CAS_UPDT_NLINE: 8164 { 8165 uint64_t flit = 0; 8166 8167 DspinDhccpParam::dspin_set( 8168 flit, 8169 r_cas_to_cc_send_index.read(), 8170 DspinDhccpParam::MULTI_UPDT_WORD_INDEX); 8171 8172 DspinDhccpParam::dspin_set( 8173 flit, 8174 r_cas_to_cc_send_nline.read(), 8175 DspinDhccpParam::MULTI_UPDT_NLINE); 8176 8177 p_dspin_m2p.write = true; 8178 p_dspin_m2p.data = flit; 8179 8180 break; 8181 } 8182 /////////////////////////// 8183 case CC_SEND_CAS_UPDT_DATA: 8184 { 8185 uint64_t flit = 0; 8186 8187 DspinDhccpParam::dspin_set( 8188 flit, 8189 0xF, 8190 DspinDhccpParam::MULTI_UPDT_BE); 8191 8192 DspinDhccpParam::dspin_set( 8193 flit, 8194 r_cas_to_cc_send_wdata.read(), 8195 DspinDhccpParam::MULTI_UPDT_DATA); 8196 8197 p_dspin_m2p.write = true; 8198 p_dspin_m2p.eop = not r_cas_to_cc_send_is_long.read(); 8199 p_dspin_m2p.data = flit; 8200 8201 break; 8202 } 8203 //////////////////////////////// 8204 case CC_SEND_CAS_UPDT_DATA_HIGH: 8205 { 8206 uint64_t flit = 0; 8207 8208 DspinDhccpParam::dspin_set( 8209 flit, 8210 0xF, 8211 DspinDhccpParam::MULTI_UPDT_BE); 8212 8213 DspinDhccpParam::dspin_set( 8214 flit, 8215 r_cas_to_cc_send_wdata_high.read(), 8216 DspinDhccpParam::MULTI_UPDT_DATA); 8217 8218 p_dspin_m2p.write = true; 8219 p_dspin_m2p.eop = true; 8220 p_dspin_m2p.data = flit; 8221 8222 break; 8223 } 8224 } 8225 8226 //////////////////////////////////////////////////////////////////// 8227 // p_dspin_clack port (CLEANUP FSM) 8228 //////////////////////////////////////////////////////////////////// 8229 8230 if ( r_cleanup_fsm.read() == CLEANUP_SEND_CLACK ) 8231 { 8232 uint8_t cleanup_ack_type; 8233 if(r_cleanup_inst.read()) 8234 { 8235 cleanup_ack_type = DspinDhccpParam::TYPE_CLACK_INST; 8236 } 8237 else 8238 { 8239 cleanup_ack_type = DspinDhccpParam::TYPE_CLACK_DATA; 8240 } 8241 8242 uint64_t flit = 0; 8243 uint64_t dest = r_cleanup_srcid.read() << 8244 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 8245 8246 DspinDhccpParam::dspin_set( 8247 flit, 8248 dest, 8249 DspinDhccpParam::CLACK_DEST); 8250 8251 DspinDhccpParam::dspin_set( 8252 flit, 8253 r_cleanup_nline.read(), 8254 DspinDhccpParam::CLACK_SET); 8255 8256 DspinDhccpParam::dspin_set( 8257 flit, 8258 r_cleanup_way_index.read(), 8259 DspinDhccpParam::CLACK_WAY); 8260 8261 DspinDhccpParam::dspin_set( 8262 flit, 8263 cleanup_ack_type, 8264 DspinDhccpParam::CLACK_TYPE); 8265 8266 p_dspin_clack.eop = true; 8267 p_dspin_clack.write = true; 8268 p_dspin_clack.data = flit; 8269 } 8270 else 8271 { 8272 p_dspin_clack.write = false; 8273 p_dspin_clack.eop = false; 8274 p_dspin_clack.data = 0; 8275 } 8276 8277 /////////////////////////////////////////////////////////////////// 8278 // p_dspin_p2m port (CC_RECEIVE FSM) 8279 /////////////////////////////////////////////////////////////////// 8280 // 8281 switch(r_cc_receive_fsm.read()) 8282 { 8283 case CC_RECEIVE_IDLE: 8284 { 8285 p_dspin_p2m.read = false; 8286 break; 8287 } 8288 case CC_RECEIVE_CLEANUP: 8289 case CC_RECEIVE_CLEANUP_EOP: 8290 { 8291 p_dspin_p2m.read = m_cc_receive_to_cleanup_fifo.wok(); 8292 break; 8293 } 8294 case CC_RECEIVE_MULTI_ACK: 8295 { 8296 p_dspin_p2m.read = m_cc_receive_to_multi_ack_fifo.wok(); 8297 break; 8298 } 8299 } 8300 // end switch r_cc_send_fsm 8301 } // end genMoore() 8482 // end switch r_cc_send_fsm 8483 } // end genMoore() 8302 8484 8303 8485 } … … 8305 8487 8306 8488 // Local Variables: 8307 // tab-width: 28308 // c-basic-offset: 28489 // tab-width: 4 8490 // c-basic-offset: 4 8309 8491 // c-file-offsets:((innamespace . 0)(inline-open . 0)) 8310 8492 // indent-tabs-mode: nil 8311 8493 // End: 8312 8494 8313 // vim: filetype=cpp:expandtab:shiftwidth= 2:tabstop=2:softtabstop=28495 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=4:softtabstop=4 -
trunk/platforms/tsar_generic_iob/tsar_iob_cluster/caba/source/src/tsar_iob_cluster.cpp
r498 r504 166 166 IntTab(cluster_id, memc_int_tgtid), // TGTID INT network 167 167 (cluster_id << l_width) + nb_procs, // CC_GLOBAL_ID 168 x_width, // Number of x bits in platform 169 y_width, // Number of y bits in platform 168 170 memc_ways, memc_sets, 16, // CACHE SIZE 169 171 3, // MAX NUMBER OF COPIES -
trunk/platforms/tsar_generic_xbar/soclib.conf
r486 r504 1 1 2 config.addDescPath("/Users/alain/soc/tsar-trunk-svn-2013/") -
trunk/platforms/tsar_generic_xbar/top.cpp
r493 r504 98 98 /////////////////////////////////////////////////// 99 99 100 //#define USE_ALMOS 101 #define USE_GIET100 #define USE_ALMOS 1 101 //#define USE_GIET 102 102 103 103 #ifdef USE_ALMOS … … 116 116 // Parallelisation 117 117 /////////////////////////////////////////////////// 118 #define USE_OPENMP 118 #define USE_OPENMP 0 119 119 120 120 #if USE_OPENMP … … 123 123 124 124 // cluster index (computed from x,y coordinates) 125 #define cluster(x,y) (y + YMAX *x)125 #define cluster(x,y) (y + YMAX * x) 126 126 127 127 /////////////////////////////////////////////////////////// … … 139 139 #define vci_cell_width_ext 8 140 140 141 #ifdef USE_ALMOS 142 #define vci_address_width 32 143 #endif 144 #ifdef USE_GIET 145 #define vci_address_width 40 146 #endif 141 147 #define vci_plen_width 8 142 #define vci_address_width 40143 148 #define vci_rerror_width 1 144 149 #define vci_clen_width 1 … … 224 229 //////////////////////i///////////////////////////////////// 225 230 226 #define MAX_FROZEN_CYCLES 10000 231 #define MAX_FROZEN_CYCLES 100000 227 232 228 233 ///////////////////////////////////////////////////////// … … 240 245 241 246 #define FBUF_BASE 0x00B2000000 242 #define FBUF_SIZE FBUF_X_SIZE * FBUF_Y_SIZE * 2247 #define FBUF_SIZE (FBUF_X_SIZE * FBUF_Y_SIZE * 2) 243 248 244 249 #define BDEV_BASE 0x00B3000000 … … 260 265 #define MEMC_SIZE 0x0010000000 // 256 Mbytes per cluster 261 266 262 #define XICU_BASE 0x00B0000000 267 #ifdef USE_ALMOS 268 #define XICU_BASE 0x0030000000 269 #endif 270 #ifdef USE_GIET 271 #define XICU_BASE 0x00B0000000 272 #endif 263 273 #define XICU_SIZE 0x0000001000 // 4 Kbytes 264 274 265 #define MDMA_BASE 0x00B1000000 275 #ifdef USE_ALMOS 276 #define MDMA_BASE 0x0031000000 277 #endif 278 #ifdef USE_GIET 279 #define MDMA_BASE 0x00B1000000 280 #endif 266 281 #define MDMA_SIZE 0x0000001000 * NB_DMA_CHANNELS // 4 Kbytes per channel 267 282 … … 281 296 #define CDMA_TGTID 8 282 297 298 bool stop_called = false; 299 283 300 ///////////////////////////////// 284 301 int _main(int argc, char *argv[]) … … 291 308 char soft_name[256] = soft_pathname; // pathname to binary code 292 309 #endif 293 uint64_t ncycles = 1000000000;// simulated cycles310 uint64_t ncycles = 0xFFFFFFFFFFFFFFFF; // simulated cycles 294 311 char disk_name[256] = BDEV_IMAGE_NAME; // pathname to the disk image 295 312 char nic_rx_name[256] = NIC_RX_NAME; // pathname to the rx packets file … … 302 319 uint32_t debug_from = 0; // trace start cycle 303 320 uint32_t frozen_cycles = MAX_FROZEN_CYCLES; // monitoring frozen processor 304 size_t cluster_io_id = 0;// index of cluster containing IOs321 size_t cluster_io_id; // index of cluster containing IOs 305 322 struct timeval t1,t2; 306 323 uint64_t ms1,ms2; … … 311 328 for (int n = 1; n < argc; n = n + 2) 312 329 { 313 if ((strcmp(argv[n], "-NCYCLES") == 0) && (n+1<argc))314 { 315 ncycles = atoi(argv[n +1]);316 } 317 else if ((strcmp(argv[n], "-SOFT") == 0) && (n+1<argc))330 if ((strcmp(argv[n], "-NCYCLES") == 0) && (n + 1 < argc)) 331 { 332 ncycles = atoi(argv[n + 1]); 333 } 334 else if ((strcmp(argv[n], "-SOFT") == 0) && (n + 1 < argc)) 318 335 { 319 336 #ifdef USE_ALMOS … … 321 338 #endif 322 339 #ifdef USE_GIET 323 strcpy(soft_name, argv[n +1]);324 #endif 325 } 326 else if ((strcmp(argv[n],"-DISK") == 0) && (n +1<argc))327 { 328 strcpy(disk_name, argv[n +1]);329 } 330 else if ((strcmp(argv[n],"-DEBUG") == 0) && (n +1<argc))340 strcpy(soft_name, argv[n + 1]); 341 #endif 342 } 343 else if ((strcmp(argv[n],"-DISK") == 0) && (n + 1 < argc)) 344 { 345 strcpy(disk_name, argv[n + 1]); 346 } 347 else if ((strcmp(argv[n],"-DEBUG") == 0) && (n + 1 < argc)) 331 348 { 332 349 debug_ok = true; 333 debug_from = atoi(argv[n +1]);334 } 335 else if ((strcmp(argv[n], "-MEMCID") == 0) && (n+1<argc))336 { 337 debug_memc_id = atoi(argv[n +1]);338 assert( (debug_memc_id < (XMAX*YMAX)) &&350 debug_from = atoi(argv[n + 1]); 351 } 352 else if ((strcmp(argv[n], "-MEMCID") == 0) && (n + 1 < argc)) 353 { 354 debug_memc_id = atoi(argv[n + 1]); 355 assert((debug_memc_id < (XMAX * YMAX)) && 339 356 "debug_memc_id larger than XMAX * YMAX" ); 340 357 } 341 else if ((strcmp(argv[n], "-PROCID") == 0) && (n+1<argc))342 { 343 debug_proc_id = atoi(argv[n +1]);344 assert( (debug_proc_id < (XMAX * YMAX * NB_PROCS_MAX)) &&345 "debug_proc_id larger than XMAX * YMAX * NB_PROCS" 346 } 347 else if ((strcmp(argv[n], "-THREADS") == 0) && ((n +1) < argc))348 { 349 threads_nr = atoi(argv[n +1]);358 else if ((strcmp(argv[n], "-PROCID") == 0) && (n + 1 < argc)) 359 { 360 debug_proc_id = atoi(argv[n + 1]); 361 assert((debug_proc_id < (XMAX * YMAX * NB_PROCS_MAX)) && 362 "debug_proc_id larger than XMAX * YMAX * NB_PROCS"); 363 } 364 else if ((strcmp(argv[n], "-THREADS") == 0) && ((n + 1) < argc)) 365 { 366 threads_nr = atoi(argv[n + 1]); 350 367 threads_nr = (threads_nr < 1) ? 1 : threads_nr; 351 368 } 352 else if ((strcmp(argv[n], "-FROZEN") == 0) && (n +1 < argc))353 { 354 frozen_cycles = atoi(argv[n +1]);355 } 356 else if ((strcmp(argv[n], "-PERIOD") == 0) && (n +1 < argc))357 { 358 debug_period = atoi(argv[n +1]);369 else if ((strcmp(argv[n], "-FROZEN") == 0) && (n + 1 < argc)) 370 { 371 frozen_cycles = atoi(argv[n + 1]); 372 } 373 else if ((strcmp(argv[n], "-PERIOD") == 0) && (n + 1 < argc)) 374 { 375 debug_period = atoi(argv[n + 1]); 359 376 } 360 377 else … … 401 418 #ifdef USE_GIET 402 419 assert( (vci_address_width == 40) and 403 "VCI address width must be 40 bits" ); 404 #endif 420 "VCI address width with the GIET must be 40 bits" ); 421 #endif 422 423 #ifdef USE_ALMOS 424 assert( (vci_address_width == 32) and 425 "VCI address width with ALMOS must be 32 bits" ); 426 #endif 427 405 428 406 429 std::cout << std::endl; … … 454 477 else if (XMAX <= 4) x_width = 2; 455 478 else if (XMAX <= 8) x_width = 3; 456 else 479 else x_width = 4; 457 480 458 481 if (YMAX == 1) y_width = 0; … … 460 483 else if (YMAX <= 4) y_width = 2; 461 484 else if (YMAX <= 8) y_width = 3; 462 else y_width = 4; 485 else y_width = 4; 486 487 488 #ifdef USE_ALMOS 489 cluster_io_id = 0xbfc00000 >> (vci_address_width - x_width - y_width); // index of cluster containing IOs 490 #else 491 cluster_io_id = 0; 492 #endif 463 493 464 494 ///////////////////// … … 808 838 } 809 839 810 for (uint64_t n = 1; n < ncycles ; n++)840 for (uint64_t n = 1; n < ncycles && !stop_called; n++) 811 841 { 812 842 // Monitor a specific address for L1 & L2 caches … … 823 853 } 824 854 825 ms1 = (uint64_t) t1.tv_sec * 1000ULL + (uint64_t)t1.tv_usec / 1000;826 ms2 = (uint64_t) t2.tv_sec * 1000ULL + (uint64_t)t2.tv_usec / 1000;827 std::cerr << "platform clock frequency " << (double) 5000000 / (double)(ms2 - ms1) << "Khz" << std::endl;855 ms1 = (uint64_t) t1.tv_sec * 1000ULL + (uint64_t) t1.tv_usec / 1000; 856 ms2 = (uint64_t) t2.tv_sec * 1000ULL + (uint64_t) t2.tv_usec / 1000; 857 std::cerr << "platform clock frequency " << (double) 5000000 / (double) (ms2 - ms1) << "Khz" << std::endl; 828 858 829 859 if (gettimeofday(&t1, NULL) != 0) … … 919 949 sc_start(sc_core::sc_time(1, SC_NS)); 920 950 } 951 952 953 for (size_t i = 0; i < (XMAX * YMAX); i++) 954 { 955 size_t x = i / YMAX; 956 size_t y = i % YMAX; 957 delete clusters[x][y]; 958 } 959 921 960 return EXIT_SUCCESS; 922 961 } 923 962 963 964 void handler(int dummy = 0) { 965 stop_called = true; 966 sc_stop(); 967 } 968 969 924 970 int sc_main (int argc, char *argv[]) 925 971 { 972 signal(SIGINT, handler); 973 926 974 try { 927 975 return _main(argc, argv); -
trunk/platforms/tsar_generic_xbar/top.desc
r438 r504 7 7 8 8 vci_plen_size = 8 9 vci_addr_size = 409 vci_addr_size = 32 10 10 vci_rerror_size = 1 11 11 vci_clen_size = 1 -
trunk/platforms/tsar_generic_xbar/tsar_xbar_cluster/caba/source/src/tsar_xbar_cluster.cpp
r485 r504 145 145 IntTab(cluster_id, tgtid_memc), // TGTID direct space 146 146 (cluster_id << l_width) + nb_procs, // CC_GLOBAL_ID 147 x_width, // Number of x bits in platform 148 y_width, // Number of y bits in platform 147 149 memc_ways, memc_sets, 16, // CACHE SIZE 148 150 3, // MAX NUMBER OF COPIES
Note: See TracChangeset
for help on using the changeset viewer.