Changeset 543 for branches/ODCCP/modules/vci_mem_cache
- Timestamp:
- Oct 4, 2013, 11:10:51 AM (11 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
branches/ODCCP/modules/vci_mem_cache/caba/source/src/vci_mem_cache.cpp
r541 r543 61 61 namespace soclib { namespace caba { 62 62 63 const char *tgt_cmd_fsm_str[] =64 {65 "TGT_CMD_IDLE",66 "TGT_CMD_ERROR",67 "TGT_CMD_READ",68 "TGT_CMD_WRITE",69 "TGT_CMD_CAS",70 "TGT_CMD_CONFIG"71 };72 const char *tgt_rsp_fsm_str[] =73 {74 "TGT_RSP_CONFIG_IDLE",75 "TGT_RSP_TGT_CMD_IDLE",76 "TGT_RSP_READ_IDLE",77 "TGT_RSP_WRITE_IDLE",78 "TGT_RSP_CAS_IDLE",79 "TGT_RSP_XRAM_IDLE",80 "TGT_RSP_MULTI_ACK_IDLE",81 "TGT_RSP_CLEANUP_IDLE",82 "TGT_RSP_CONFIG",83 "TGT_RSP_TGT_CMD",84 "TGT_RSP_READ",85 "TGT_RSP_WRITE",86 "TGT_RSP_CAS",87 "TGT_RSP_XRAM",88 "TGT_RSP_MULTI_ACK",89 "TGT_RSP_CLEANUP"90 };91 const char *cc_receive_fsm_str[] =92 {93 "CC_RECEIVE_IDLE",94 "CC_RECEIVE_CLEANUP",95 "CC_RECEIVE_CLEANUP_EOP",96 "CC_RECEIVE_MULTI_ACK"97 };98 const char *cc_send_fsm_str[] =99 {100 "CC_SEND_CONFIG_IDLE",101 "CC_SEND_XRAM_RSP_IDLE",102 "CC_SEND_WRITE_IDLE",103 "CC_SEND_CAS_IDLE",104 "CC_SEND_CONFIG_INVAL_HEADER",105 "CC_SEND_CONFIG_INVAL_NLINE",106 "CC_SEND_CONFIG_BRDCAST_HEADER",107 "CC_SEND_CONFIG_BRDCAST_NLINE",108 "CC_SEND_XRAM_RSP_BRDCAST_HEADER",109 "CC_SEND_XRAM_RSP_BRDCAST_NLINE",110 "CC_SEND_XRAM_RSP_INVAL_HEADER",111 "CC_SEND_XRAM_RSP_INVAL_NLINE",112 "CC_SEND_WRITE_BRDCAST_HEADER",113 "CC_SEND_WRITE_BRDCAST_NLINE",114 "CC_SEND_WRITE_UPDT_HEADER",115 "CC_SEND_WRITE_UPDT_NLINE",116 "CC_SEND_WRITE_UPDT_DATA",117 "CC_SEND_CAS_BRDCAST_HEADER",118 "CC_SEND_CAS_BRDCAST_NLINE",119 "CC_SEND_CAS_UPDT_HEADER",120 "CC_SEND_CAS_UPDT_NLINE",121 "CC_SEND_CAS_UPDT_DATA",122 "CC_SEND_CAS_UPDT_DATA_HIGH"123 };124 const char *multi_ack_fsm_str[] =125 {126 "MULTI_ACK_IDLE",127 "MULTI_ACK_UPT_LOCK",128 "MULTI_ACK_UPT_CLEAR",129 "MULTI_ACK_WRITE_RSP"130 };131 const char *config_fsm_str[] =132 {133 "CONFIG_IDLE",134 "CONFIG_LOOP",135 "CONFIG_WAIT",136 "CONFIG_RSP",137 "CONFIG_DIR_REQ",138 "CONFIG_DIR_ACCESS",139 "CONFIG_IVT_LOCK",140 "CONFIG_BC_SEND",141 "CONFIG_INVAL_SEND",142 "CONFIG_HEAP_REQ",143 "CONFIG_HEAP_SCAN",144 "CONFIG_HEAP_LAST",145 "CONFIG_TRT_LOCK",146 "CONFIG_TRT_SET",147 "CONFIG_PUT_REQ"148 };149 const char *read_fsm_str[] =150 {151 "READ_IDLE",152 "READ_DIR_REQ",153 "READ_DIR_LOCK",154 "READ_DIR_HIT",155 "READ_HEAP_REQ",156 "READ_HEAP_LOCK",157 "READ_HEAP_WRITE",158 "READ_HEAP_ERASE",159 "READ_HEAP_LAST",160 "READ_RSP",161 "READ_TRT_LOCK",162 "READ_TRT_SET",163 "READ_TRT_REQ"164 };165 const char *write_fsm_str[] =166 {167 "WRITE_IDLE",168 "WRITE_NEXT",169 "WRITE_DIR_REQ",170 "WRITE_DIR_LOCK",171 "WRITE_DIR_HIT",172 "WRITE_UPT_LOCK",173 "WRITE_UPT_HEAP_LOCK",174 "WRITE_UPT_REQ",175 "WRITE_UPT_NEXT",176 "WRITE_UPT_DEC",177 "WRITE_RSP",178 "WRITE_MISS_TRT_LOCK",179 "WRITE_MISS_TRT_DATA",180 "WRITE_MISS_TRT_SET",181 "WRITE_MISS_XRAM_REQ",182 "WRITE_BC_DIR_READ",183 "WRITE_BC_TRT_LOCK",184 "WRITE_BC_IVT_LOCK",185 "WRITE_BC_DIR_INVAL",186 "WRITE_BC_CC_SEND",187 "WRITE_BC_XRAM_REQ",188 "WRITE_WAIT"189 };190 const char *ixr_rsp_fsm_str[] =191 {192 "IXR_RSP_IDLE",193 "IXR_RSP_ACK",194 "IXR_RSP_TRT_ERASE",195 "IXR_RSP_TRT_READ"196 };197 const char *xram_rsp_fsm_str[] =198 {199 "XRAM_RSP_IDLE",200 "XRAM_RSP_TRT_COPY",201 "XRAM_RSP_TRT_DIRTY",202 "XRAM_RSP_DIR_LOCK",203 "XRAM_RSP_DIR_UPDT",204 "XRAM_RSP_DIR_RSP",205 "XRAM_RSP_IVT_LOCK",206 "XRAM_RSP_INVAL_WAIT",207 "XRAM_RSP_INVAL",208 "XRAM_RSP_WRITE_DIRTY",209 "XRAM_RSP_HEAP_REQ",210 "XRAM_RSP_HEAP_ERASE",211 "XRAM_RSP_HEAP_LAST",212 "XRAM_RSP_ERROR_ERASE",213 "XRAM_RSP_ERROR_RSP"214 };215 const char *ixr_cmd_fsm_str[] =216 {217 "IXR_CMD_READ_IDLE",218 "IXR_CMD_WRITE_IDLE",219 "IXR_CMD_CAS_IDLE",220 "IXR_CMD_XRAM_IDLE",221 "IXR_CMD_CLEANUP_IDLE",222 "IXR_CMD_CONFIG_IDLE",223 "IXR_CMD_READ_TRT",224 "IXR_CMD_WRITE_TRT",225 "IXR_CMD_CAS_TRT",226 "IXR_CMD_XRAM_TRT",227 "IXR_CMD_CLEANUP_TRT",228 "IXR_CMD_CONFIG_TRT",229 "IXR_CMD_READ_SEND",230 "IXR_CMD_WRITE_SEND",231 "IXR_CMD_CAS_SEND",232 "IXR_CMD_XRAM_SEND",233 "IXR_CMD_CLEANUP_DATA_SEND",234 "IXR_CMD_CONFIG_SEND"235 };236 const char *cas_fsm_str[] =237 {238 "CAS_IDLE",239 "CAS_DIR_REQ",240 "CAS_DIR_LOCK",241 "CAS_DIR_HIT_READ",242 "CAS_DIR_HIT_COMPARE",243 "CAS_DIR_HIT_WRITE",244 "CAS_UPT_LOCK",245 "CAS_UPT_HEAP_LOCK",246 "CAS_UPT_REQ",247 "CAS_UPT_NEXT",248 "CAS_BC_TRT_LOCK",249 "CAS_BC_IVT_LOCK",250 "CAS_BC_DIR_INVAL",251 "CAS_BC_CC_SEND",252 "CAS_BC_XRAM_REQ",253 "CAS_RSP_FAIL",254 "CAS_RSP_SUCCESS",255 "CAS_MISS_TRT_LOCK",256 "CAS_MISS_TRT_SET",257 "CAS_MISS_XRAM_REQ",258 "CAS_WAIT"259 };260 const char *cleanup_fsm_str[] =261 {262 "CLEANUP_IDLE",263 "CLEANUP_GET_NLINE",264 "CLEANUP_GET_DATA",265 "CLEANUP_DIR_REQ",266 "CLEANUP_DIR_LOCK",267 "CLEANUP_DIR_WRITE",268 "CLEANUP_HEAP_REQ",269 "CLEANUP_HEAP_LOCK",270 "CLEANUP_HEAP_SEARCH",271 "CLEANUP_HEAP_CLEAN",272 "CLEANUP_HEAP_FREE",273 "CLEANUP_IVT_LOCK",274 "CLEANUP_IVT_DECREMENT",275 "CLEANUP_IVT_CLEAR",276 "CLEANUP_WRITE_RSP",277 "CLEANUP_IXR_REQ",278 "CLEANUP_WAIT",279 "CLEANUP_SEND_CLACK"280 };281 const char *alloc_dir_fsm_str[] =282 {283 "ALLOC_DIR_RESET",284 "ALLOC_DIR_CONFIG",285 "ALLOC_DIR_READ",286 "ALLOC_DIR_WRITE",287 "ALLOC_DIR_CAS",288 "ALLOC_DIR_CLEANUP",289 "ALLOC_DIR_XRAM_RSP"290 };291 const char *alloc_trt_fsm_str[] =292 {293 "ALLOC_TRT_READ",294 "ALLOC_TRT_WRITE",295 "ALLOC_TRT_CAS",296 "ALLOC_TRT_XRAM_RSP",297 "ALLOC_TRT_IXR_RSP",298 "ALLOC_TRT_CLEANUP",299 "ALLOC_TRT_IXR_CMD",300 "ALLOC_TRT_CONFIG"301 };302 const char *alloc_upt_fsm_str[] =303 {304 "ALLOC_UPT_WRITE",305 "ALLOC_UPT_CAS",306 "ALLOC_UPT_MULTI_ACK"307 };308 const char *alloc_ivt_fsm_str[] =309 {310 "ALLOC_IVT_WRITE",311 "ALLOC_IVT_XRAM_RSP",312 "ALLOC_IVT_CLEANUP",313 "ALLOC_IVT_CAS",314 "ALLOC_IVT_CONFIG"315 };316 const char *alloc_heap_fsm_str[] =317 {318 "ALLOC_HEAP_RESET",319 "ALLOC_HEAP_READ",320 "ALLOC_HEAP_WRITE",321 "ALLOC_HEAP_CAS",322 "ALLOC_HEAP_CLEANUP",323 "ALLOC_HEAP_XRAM_RSP",324 "ALLOC_HEAP_CONFIG"325 };63 const char *tgt_cmd_fsm_str[] = 64 { 65 "TGT_CMD_IDLE", 66 "TGT_CMD_ERROR", 67 "TGT_CMD_READ", 68 "TGT_CMD_WRITE", 69 "TGT_CMD_CAS", 70 "TGT_CMD_CONFIG" 71 }; 72 const char *tgt_rsp_fsm_str[] = 73 { 74 "TGT_RSP_CONFIG_IDLE", 75 "TGT_RSP_TGT_CMD_IDLE", 76 "TGT_RSP_READ_IDLE", 77 "TGT_RSP_WRITE_IDLE", 78 "TGT_RSP_CAS_IDLE", 79 "TGT_RSP_XRAM_IDLE", 80 "TGT_RSP_MULTI_ACK_IDLE", 81 "TGT_RSP_CLEANUP_IDLE", 82 "TGT_RSP_CONFIG", 83 "TGT_RSP_TGT_CMD", 84 "TGT_RSP_READ", 85 "TGT_RSP_WRITE", 86 "TGT_RSP_CAS", 87 "TGT_RSP_XRAM", 88 "TGT_RSP_MULTI_ACK", 89 "TGT_RSP_CLEANUP" 90 }; 91 const char *cc_receive_fsm_str[] = 92 { 93 "CC_RECEIVE_IDLE", 94 "CC_RECEIVE_CLEANUP", 95 "CC_RECEIVE_CLEANUP_EOP", 96 "CC_RECEIVE_MULTI_ACK" 97 }; 98 const char *cc_send_fsm_str[] = 99 { 100 "CC_SEND_CONFIG_IDLE", 101 "CC_SEND_XRAM_RSP_IDLE", 102 "CC_SEND_WRITE_IDLE", 103 "CC_SEND_CAS_IDLE", 104 "CC_SEND_CONFIG_INVAL_HEADER", 105 "CC_SEND_CONFIG_INVAL_NLINE", 106 "CC_SEND_CONFIG_BRDCAST_HEADER", 107 "CC_SEND_CONFIG_BRDCAST_NLINE", 108 "CC_SEND_XRAM_RSP_BRDCAST_HEADER", 109 "CC_SEND_XRAM_RSP_BRDCAST_NLINE", 110 "CC_SEND_XRAM_RSP_INVAL_HEADER", 111 "CC_SEND_XRAM_RSP_INVAL_NLINE", 112 "CC_SEND_WRITE_BRDCAST_HEADER", 113 "CC_SEND_WRITE_BRDCAST_NLINE", 114 "CC_SEND_WRITE_UPDT_HEADER", 115 "CC_SEND_WRITE_UPDT_NLINE", 116 "CC_SEND_WRITE_UPDT_DATA", 117 "CC_SEND_CAS_BRDCAST_HEADER", 118 "CC_SEND_CAS_BRDCAST_NLINE", 119 "CC_SEND_CAS_UPDT_HEADER", 120 "CC_SEND_CAS_UPDT_NLINE", 121 "CC_SEND_CAS_UPDT_DATA", 122 "CC_SEND_CAS_UPDT_DATA_HIGH" 123 }; 124 const char *multi_ack_fsm_str[] = 125 { 126 "MULTI_ACK_IDLE", 127 "MULTI_ACK_UPT_LOCK", 128 "MULTI_ACK_UPT_CLEAR", 129 "MULTI_ACK_WRITE_RSP" 130 }; 131 const char *config_fsm_str[] = 132 { 133 "CONFIG_IDLE", 134 "CONFIG_LOOP", 135 "CONFIG_WAIT", 136 "CONFIG_RSP", 137 "CONFIG_DIR_REQ", 138 "CONFIG_DIR_ACCESS", 139 "CONFIG_IVT_LOCK", 140 "CONFIG_BC_SEND", 141 "CONFIG_INVAL_SEND", 142 "CONFIG_HEAP_REQ", 143 "CONFIG_HEAP_SCAN", 144 "CONFIG_HEAP_LAST", 145 "CONFIG_TRT_LOCK", 146 "CONFIG_TRT_SET", 147 "CONFIG_PUT_REQ" 148 }; 149 const char *read_fsm_str[] = 150 { 151 "READ_IDLE", 152 "READ_DIR_REQ", 153 "READ_DIR_LOCK", 154 "READ_DIR_HIT", 155 "READ_HEAP_REQ", 156 "READ_HEAP_LOCK", 157 "READ_HEAP_WRITE", 158 "READ_HEAP_ERASE", 159 "READ_HEAP_LAST", 160 "READ_RSP", 161 "READ_TRT_LOCK", 162 "READ_TRT_SET", 163 "READ_TRT_REQ" 164 }; 165 const char *write_fsm_str[] = 166 { 167 "WRITE_IDLE", 168 "WRITE_NEXT", 169 "WRITE_DIR_REQ", 170 "WRITE_DIR_LOCK", 171 "WRITE_DIR_HIT", 172 "WRITE_UPT_LOCK", 173 "WRITE_UPT_HEAP_LOCK", 174 "WRITE_UPT_REQ", 175 "WRITE_UPT_NEXT", 176 "WRITE_UPT_DEC", 177 "WRITE_RSP", 178 "WRITE_MISS_TRT_LOCK", 179 "WRITE_MISS_TRT_DATA", 180 "WRITE_MISS_TRT_SET", 181 "WRITE_MISS_XRAM_REQ", 182 "WRITE_BC_DIR_READ", 183 "WRITE_BC_TRT_LOCK", 184 "WRITE_BC_IVT_LOCK", 185 "WRITE_BC_DIR_INVAL", 186 "WRITE_BC_CC_SEND", 187 "WRITE_BC_XRAM_REQ", 188 "WRITE_WAIT" 189 }; 190 const char *ixr_rsp_fsm_str[] = 191 { 192 "IXR_RSP_IDLE", 193 "IXR_RSP_ACK", 194 "IXR_RSP_TRT_ERASE", 195 "IXR_RSP_TRT_READ" 196 }; 197 const char *xram_rsp_fsm_str[] = 198 { 199 "XRAM_RSP_IDLE", 200 "XRAM_RSP_TRT_COPY", 201 "XRAM_RSP_TRT_DIRTY", 202 "XRAM_RSP_DIR_LOCK", 203 "XRAM_RSP_DIR_UPDT", 204 "XRAM_RSP_DIR_RSP", 205 "XRAM_RSP_IVT_LOCK", 206 "XRAM_RSP_INVAL_WAIT", 207 "XRAM_RSP_INVAL", 208 "XRAM_RSP_WRITE_DIRTY", 209 "XRAM_RSP_HEAP_REQ", 210 "XRAM_RSP_HEAP_ERASE", 211 "XRAM_RSP_HEAP_LAST", 212 "XRAM_RSP_ERROR_ERASE", 213 "XRAM_RSP_ERROR_RSP" 214 }; 215 const char *ixr_cmd_fsm_str[] = 216 { 217 "IXR_CMD_READ_IDLE", 218 "IXR_CMD_WRITE_IDLE", 219 "IXR_CMD_CAS_IDLE", 220 "IXR_CMD_XRAM_IDLE", 221 "IXR_CMD_CLEANUP_IDLE", 222 "IXR_CMD_CONFIG_IDLE", 223 "IXR_CMD_READ_TRT", 224 "IXR_CMD_WRITE_TRT", 225 "IXR_CMD_CAS_TRT", 226 "IXR_CMD_XRAM_TRT", 227 "IXR_CMD_CLEANUP_TRT", 228 "IXR_CMD_CONFIG_TRT", 229 "IXR_CMD_READ_SEND", 230 "IXR_CMD_WRITE_SEND", 231 "IXR_CMD_CAS_SEND", 232 "IXR_CMD_XRAM_SEND", 233 "IXR_CMD_CLEANUP_DATA_SEND", 234 "IXR_CMD_CONFIG_SEND" 235 }; 236 const char *cas_fsm_str[] = 237 { 238 "CAS_IDLE", 239 "CAS_DIR_REQ", 240 "CAS_DIR_LOCK", 241 "CAS_DIR_HIT_READ", 242 "CAS_DIR_HIT_COMPARE", 243 "CAS_DIR_HIT_WRITE", 244 "CAS_UPT_LOCK", 245 "CAS_UPT_HEAP_LOCK", 246 "CAS_UPT_REQ", 247 "CAS_UPT_NEXT", 248 "CAS_BC_TRT_LOCK", 249 "CAS_BC_IVT_LOCK", 250 "CAS_BC_DIR_INVAL", 251 "CAS_BC_CC_SEND", 252 "CAS_BC_XRAM_REQ", 253 "CAS_RSP_FAIL", 254 "CAS_RSP_SUCCESS", 255 "CAS_MISS_TRT_LOCK", 256 "CAS_MISS_TRT_SET", 257 "CAS_MISS_XRAM_REQ", 258 "CAS_WAIT" 259 }; 260 const char *cleanup_fsm_str[] = 261 { 262 "CLEANUP_IDLE", 263 "CLEANUP_GET_NLINE", 264 "CLEANUP_GET_DATA", 265 "CLEANUP_DIR_REQ", 266 "CLEANUP_DIR_LOCK", 267 "CLEANUP_DIR_WRITE", 268 "CLEANUP_HEAP_REQ", 269 "CLEANUP_HEAP_LOCK", 270 "CLEANUP_HEAP_SEARCH", 271 "CLEANUP_HEAP_CLEAN", 272 "CLEANUP_HEAP_FREE", 273 "CLEANUP_IVT_LOCK", 274 "CLEANUP_IVT_DECREMENT", 275 "CLEANUP_IVT_CLEAR", 276 "CLEANUP_WRITE_RSP", 277 "CLEANUP_IXR_REQ", 278 "CLEANUP_WAIT", 279 "CLEANUP_SEND_CLACK" 280 }; 281 const char *alloc_dir_fsm_str[] = 282 { 283 "ALLOC_DIR_RESET", 284 "ALLOC_DIR_CONFIG", 285 "ALLOC_DIR_READ", 286 "ALLOC_DIR_WRITE", 287 "ALLOC_DIR_CAS", 288 "ALLOC_DIR_CLEANUP", 289 "ALLOC_DIR_XRAM_RSP" 290 }; 291 const char *alloc_trt_fsm_str[] = 292 { 293 "ALLOC_TRT_READ", 294 "ALLOC_TRT_WRITE", 295 "ALLOC_TRT_CAS", 296 "ALLOC_TRT_XRAM_RSP", 297 "ALLOC_TRT_IXR_RSP", 298 "ALLOC_TRT_CLEANUP", 299 "ALLOC_TRT_IXR_CMD", 300 "ALLOC_TRT_CONFIG" 301 }; 302 const char *alloc_upt_fsm_str[] = 303 { 304 "ALLOC_UPT_WRITE", 305 "ALLOC_UPT_CAS", 306 "ALLOC_UPT_MULTI_ACK" 307 }; 308 const char *alloc_ivt_fsm_str[] = 309 { 310 "ALLOC_IVT_WRITE", 311 "ALLOC_IVT_XRAM_RSP", 312 "ALLOC_IVT_CLEANUP", 313 "ALLOC_IVT_CAS", 314 "ALLOC_IVT_CONFIG" 315 }; 316 const char *alloc_heap_fsm_str[] = 317 { 318 "ALLOC_HEAP_RESET", 319 "ALLOC_HEAP_READ", 320 "ALLOC_HEAP_WRITE", 321 "ALLOC_HEAP_CAS", 322 "ALLOC_HEAP_CLEANUP", 323 "ALLOC_HEAP_XRAM_RSP", 324 "ALLOC_HEAP_CONFIG" 325 }; 326 326 327 327 #define tmpl(x) \ 328 template<typename vci_param_int, \329 330 331 332 VciMemCache<vci_param_int, vci_param_ext, dspin_in_width, dspin_out_width>333 334 using namespace soclib::common;335 336 ////////////////////////////////337 // Constructor338 ////////////////////////////////339 340 tmpl(/**/) ::VciMemCache(341 sc_module_name name,342 const MappingTable &mtp, // mapping table for direct network343 const MappingTable &mtx, // mapping table for external network344 const IntTab &srcid_x, // global index on external network345 const IntTab &tgtid_d, // global index on direct network346 const size_t cc_global_id, // global index on cc network347 const size_t nways, // number of ways per set348 const size_t nsets, // number of associative sets349 const size_t nwords, // number of words in cache line350 const size_t max_copies, // max number of copies in heap351 const size_t heap_size, // number of heap entries352 const size_t trt_lines, // number of TRT entries353 const size_t upt_lines, // number of UPT entries354 const size_t ivt_lines, // number of IVT entries355 const size_t debug_start_cycle,356 const bool debug_ok)357 358 : soclib::caba::BaseModule(name),359 360 //m_monitor_ok(false),361 362 p_clk( "p_clk" ),363 p_resetn( "p_resetn" ),364 p_vci_tgt( "p_vci_tgt" ),365 p_vci_ixr( "p_vci_ixr" ),366 p_dspin_p2m( "p_dspin_p2m" ),367 p_dspin_m2p( "p_dspin_m2p" ),368 p_dspin_clack( "p_dspin_clack" ),369 370 m_seglist( mtp.getSegmentList(tgtid_d) ),371 m_nseg( 0 ),372 m_srcid_x( mtx.indexForId(srcid_x) ),373 m_initiators( 1 << vci_param_int::S ),374 m_heap_size( heap_size ),375 m_ways( nways ),376 m_sets( nsets ),377 m_words( nwords ),378 m_cc_global_id( cc_global_id ),379 m_debug_start_cycle( debug_start_cycle ),380 m_debug_ok( debug_ok ),381 m_trt_lines(trt_lines),382 m_trt(this->name(), trt_lines, nwords),383 m_upt_lines(upt_lines),384 m_upt(upt_lines),385 m_ivt(ivt_lines),386 m_cache_directory(nways, nsets, nwords, vci_param_int::N),387 m_cache_data(nways, nsets, nwords),388 m_heap(m_heap_size),389 m_max_copies( max_copies ),390 m_llsc_table(),328 template<typename vci_param_int, \ 329 typename vci_param_ext, \ 330 size_t dspin_in_width, \ 331 size_t dspin_out_width> x \ 332 VciMemCache<vci_param_int, vci_param_ext, dspin_in_width, dspin_out_width> 333 334 using namespace soclib::common; 335 336 //////////////////////////////// 337 // Constructor 338 //////////////////////////////// 339 340 tmpl(/**/) ::VciMemCache( 341 sc_module_name name, 342 const MappingTable &mtp, // mapping table for direct network 343 const MappingTable &mtx, // mapping table for external network 344 const IntTab &srcid_x, // global index on external network 345 const IntTab &tgtid_d, // global index on direct network 346 const size_t cc_global_id, // global index on cc network 347 const size_t nways, // number of ways per set 348 const size_t nsets, // number of associative sets 349 const size_t nwords, // number of words in cache line 350 const size_t max_copies, // max number of copies in heap 351 const size_t heap_size, // number of heap entries 352 const size_t trt_lines, // number of TRT entries 353 const size_t upt_lines, // number of UPT entries 354 const size_t ivt_lines, // number of IVT entries 355 const size_t debug_start_cycle, 356 const bool debug_ok) 357 358 : soclib::caba::BaseModule(name), 359 360 //m_monitor_ok(false), 361 362 p_clk( "p_clk" ), 363 p_resetn( "p_resetn" ), 364 p_vci_tgt( "p_vci_tgt" ), 365 p_vci_ixr( "p_vci_ixr" ), 366 p_dspin_p2m( "p_dspin_p2m" ), 367 p_dspin_m2p( "p_dspin_m2p" ), 368 p_dspin_clack( "p_dspin_clack" ), 369 370 m_seglist( mtp.getSegmentList(tgtid_d) ), 371 m_nseg( 0 ), 372 m_srcid_x( mtx.indexForId(srcid_x) ), 373 m_initiators( 1 << vci_param_int::S ), 374 m_heap_size( heap_size ), 375 m_ways( nways ), 376 m_sets( nsets ), 377 m_words( nwords ), 378 m_cc_global_id( cc_global_id ), 379 m_debug_start_cycle( debug_start_cycle ), 380 m_debug_ok( debug_ok ), 381 m_trt_lines(trt_lines), 382 m_trt(this->name(), trt_lines, nwords), 383 m_upt_lines(upt_lines), 384 m_upt(upt_lines), 385 m_ivt(ivt_lines), 386 m_cache_directory(nways, nsets, nwords, vci_param_int::N), 387 m_cache_data(nways, nsets, nwords), 388 m_heap(m_heap_size), 389 m_max_copies( max_copies ), 390 m_llsc_table(), 391 391 392 392 #define L2 soclib::common::uint32_log2 393 m_x(L2(m_words), 2),394 m_y(L2(m_sets), L2(m_words) + 2),395 m_z(vci_param_int::N - L2(m_sets) - L2(m_words) - 2, L2(m_sets) + L2(m_words) + 2),396 m_nline(vci_param_int::N - L2(m_words) - 2, L2(m_words) + 2),393 m_x(L2(m_words), 2), 394 m_y(L2(m_sets), L2(m_words) + 2), 395 m_z(vci_param_int::N - L2(m_sets) - L2(m_words) - 2, L2(m_sets) + L2(m_words) + 2), 396 m_nline(vci_param_int::N - L2(m_words) - 2, L2(m_words) + 2), 397 397 #undef L2 398 398 399 // XMIN(5 bits) / XMAX(5 bits) / YMIN(5 bits) / YMAX(5 bits) 400 // 0b00000 / 0b11111 / 0b00000 / 0b11111 401 m_broadcast_boundaries(0x7C1F), 402 403 404 // FIFOs 405 m_cmd_read_addr_fifo("m_cmd_read_addr_fifo", 4), 406 m_cmd_read_length_fifo("m_cmd_read_length_fifo", 4), 407 m_cmd_read_srcid_fifo("m_cmd_read_srcid_fifo", 4), 408 m_cmd_read_trdid_fifo("m_cmd_read_trdid_fifo", 4), 409 m_cmd_read_pktid_fifo("m_cmd_read_pktid_fifo", 4), 410 411 m_cmd_write_addr_fifo("m_cmd_write_addr_fifo",8), 412 m_cmd_write_eop_fifo("m_cmd_write_eop_fifo",8), 413 m_cmd_write_srcid_fifo("m_cmd_write_srcid_fifo",8), 414 m_cmd_write_trdid_fifo("m_cmd_write_trdid_fifo",8), 415 m_cmd_write_pktid_fifo("m_cmd_write_pktid_fifo",8), 416 m_cmd_write_data_fifo("m_cmd_write_data_fifo",8), 417 m_cmd_write_be_fifo("m_cmd_write_be_fifo",8), 418 419 m_cmd_cas_addr_fifo("m_cmd_cas_addr_fifo",4), 420 m_cmd_cas_eop_fifo("m_cmd_cas_eop_fifo",4), 421 m_cmd_cas_srcid_fifo("m_cmd_cas_srcid_fifo",4), 422 m_cmd_cas_trdid_fifo("m_cmd_cas_trdid_fifo",4), 423 m_cmd_cas_pktid_fifo("m_cmd_cas_pktid_fifo",4), 424 m_cmd_cas_wdata_fifo("m_cmd_cas_wdata_fifo",4), 425 426 m_cc_receive_to_cleanup_fifo("m_cc_receive_to_cleanup_fifo", 4), 427 m_cc_receive_to_multi_ack_fifo("m_cc_receive_to_multi_ack_fifo", 4), 428 429 r_tgt_cmd_fsm("r_tgt_cmd_fsm"), 430 431 r_config_fsm( "r_config_fsm" ), 432 433 m_config_to_cc_send_inst_fifo( "m_config_to_cc_send_inst_fifo", 8 ), 434 m_config_to_cc_send_srcid_fifo( "m_config_to_cc_send_srcid_fifo", 8 ), 435 436 r_read_fsm( "r_read_fsm" ), 437 438 r_write_fsm( "r_write_fsm" ), 439 440 m_write_to_cc_send_inst_fifo("m_write_to_cc_send_inst_fifo",8), 441 m_write_to_cc_send_srcid_fifo("m_write_to_cc_send_srcid_fifo",8), 442 443 r_multi_ack_fsm("r_multi_ack_fsm"), 444 445 r_cleanup_fsm("r_cleanup_fsm"), 446 447 r_cas_fsm("r_cas_fsm"), 448 449 m_cas_to_cc_send_inst_fifo("m_cas_to_cc_send_inst_fifo",8), 450 m_cas_to_cc_send_srcid_fifo("m_cas_to_cc_send_srcid_fifo",8), 451 452 r_ixr_rsp_fsm("r_ixr_rsp_fsm"), 453 r_xram_rsp_fsm("r_xram_rsp_fsm"), 454 455 m_xram_rsp_to_cc_send_inst_fifo("m_xram_rsp_to_cc_send_inst_fifo",8), 456 m_xram_rsp_to_cc_send_srcid_fifo("m_xram_rsp_to_cc_send_srcid_fifo",8), 457 458 r_ixr_cmd_fsm("r_ixr_cmd_fsm"), 459 460 r_tgt_rsp_fsm("r_tgt_rsp_fsm"), 461 462 r_cc_send_fsm("r_cc_send_fsm"), 463 r_cc_receive_fsm("r_cc_receive_fsm"), 464 465 r_alloc_dir_fsm("r_alloc_dir_fsm"), 466 r_alloc_dir_reset_cpt("r_alloc_dir_reset_cpt"), 467 r_alloc_trt_fsm("r_alloc_trt_fsm"), 468 r_alloc_upt_fsm("r_alloc_upt_fsm"), 469 r_alloc_ivt_fsm("r_alloc_ivt_fsm"), 470 r_alloc_heap_fsm("r_alloc_heap_fsm"), 471 r_alloc_heap_reset_cpt("r_alloc_heap_reset_cpt") 472 { 473 std::cout << " - Building VciMemCache : " << name << std::endl; 474 475 assert(IS_POW_OF_2(nsets)); 476 assert(IS_POW_OF_2(nwords)); 477 assert(IS_POW_OF_2(nways)); 478 assert(nsets); 479 assert(nwords); 480 assert(nways); 481 482 // check Transaction table size 483 assert((uint32_log2(trt_lines) <= vci_param_ext::T) and 484 "MEMC ERROR : Need more bits for VCI TRDID field"); 485 486 // check internal and external data width 487 assert( (vci_param_int::B == 4 ) and 488 "MEMC ERROR : VCI internal data width must be 32 bits"); 489 490 assert( (vci_param_ext::B == 8) and 491 "MEMC ERROR : VCI external data width must be 64 bits"); 492 493 // Check coherence between internal & external addresses 494 assert( (vci_param_int::N == vci_param_ext::N) and 495 "MEMC ERROR : VCI internal & external addresses must have the same width"); 496 497 // Get the segments associated to the MemCache 498 std::list<soclib::common::Segment>::iterator seg; 499 size_t i = 0; 500 501 for(seg = m_seglist.begin(); seg != m_seglist.end() ; seg++) 399 // XMIN(5 bits) / XMAX(5 bits) / YMIN(5 bits) / YMAX(5 bits) 400 // 0b00000 / 0b11111 / 0b00000 / 0b11111 401 m_broadcast_boundaries(0x7C1F), 402 403 404 // FIFOs 405 m_cmd_read_addr_fifo("m_cmd_read_addr_fifo", 4), 406 m_cmd_read_length_fifo("m_cmd_read_length_fifo", 4), 407 m_cmd_read_srcid_fifo("m_cmd_read_srcid_fifo", 4), 408 m_cmd_read_trdid_fifo("m_cmd_read_trdid_fifo", 4), 409 m_cmd_read_pktid_fifo("m_cmd_read_pktid_fifo", 4), 410 411 m_cmd_write_addr_fifo("m_cmd_write_addr_fifo",8), 412 m_cmd_write_eop_fifo("m_cmd_write_eop_fifo",8), 413 m_cmd_write_srcid_fifo("m_cmd_write_srcid_fifo",8), 414 m_cmd_write_trdid_fifo("m_cmd_write_trdid_fifo",8), 415 m_cmd_write_pktid_fifo("m_cmd_write_pktid_fifo",8), 416 m_cmd_write_data_fifo("m_cmd_write_data_fifo",8), 417 m_cmd_write_be_fifo("m_cmd_write_be_fifo",8), 418 419 m_cmd_cas_addr_fifo("m_cmd_cas_addr_fifo",4), 420 m_cmd_cas_eop_fifo("m_cmd_cas_eop_fifo",4), 421 m_cmd_cas_srcid_fifo("m_cmd_cas_srcid_fifo",4), 422 m_cmd_cas_trdid_fifo("m_cmd_cas_trdid_fifo",4), 423 m_cmd_cas_pktid_fifo("m_cmd_cas_pktid_fifo",4), 424 m_cmd_cas_wdata_fifo("m_cmd_cas_wdata_fifo",4), 425 426 m_cc_receive_to_cleanup_fifo("m_cc_receive_to_cleanup_fifo", 4), 427 m_cc_receive_to_multi_ack_fifo("m_cc_receive_to_multi_ack_fifo", 4), 428 429 r_tgt_cmd_fsm("r_tgt_cmd_fsm"), 430 431 r_config_fsm( "r_config_fsm" ), 432 433 m_config_to_cc_send_inst_fifo( "m_config_to_cc_send_inst_fifo", 8 ), 434 m_config_to_cc_send_srcid_fifo( "m_config_to_cc_send_srcid_fifo", 8 ), 435 436 r_read_fsm( "r_read_fsm" ), 437 438 r_write_fsm( "r_write_fsm" ), 439 440 m_write_to_cc_send_inst_fifo("m_write_to_cc_send_inst_fifo",8), 441 m_write_to_cc_send_srcid_fifo("m_write_to_cc_send_srcid_fifo",8), 442 443 r_multi_ack_fsm("r_multi_ack_fsm"), 444 445 r_cleanup_fsm("r_cleanup_fsm"), 446 447 r_cas_fsm("r_cas_fsm"), 448 449 m_cas_to_cc_send_inst_fifo("m_cas_to_cc_send_inst_fifo",8), 450 m_cas_to_cc_send_srcid_fifo("m_cas_to_cc_send_srcid_fifo",8), 451 452 r_ixr_rsp_fsm("r_ixr_rsp_fsm"), 453 r_xram_rsp_fsm("r_xram_rsp_fsm"), 454 455 m_xram_rsp_to_cc_send_inst_fifo("m_xram_rsp_to_cc_send_inst_fifo",8), 456 m_xram_rsp_to_cc_send_srcid_fifo("m_xram_rsp_to_cc_send_srcid_fifo",8), 457 458 r_ixr_cmd_fsm("r_ixr_cmd_fsm"), 459 460 r_tgt_rsp_fsm("r_tgt_rsp_fsm"), 461 462 r_cc_send_fsm("r_cc_send_fsm"), 463 r_cc_receive_fsm("r_cc_receive_fsm"), 464 465 r_alloc_dir_fsm("r_alloc_dir_fsm"), 466 r_alloc_dir_reset_cpt("r_alloc_dir_reset_cpt"), 467 r_alloc_trt_fsm("r_alloc_trt_fsm"), 468 r_alloc_upt_fsm("r_alloc_upt_fsm"), 469 r_alloc_ivt_fsm("r_alloc_ivt_fsm"), 470 r_alloc_heap_fsm("r_alloc_heap_fsm"), 471 r_alloc_heap_reset_cpt("r_alloc_heap_reset_cpt") 472 { 473 std::cout << " - Building VciMemCache : " << name << std::endl; 474 475 assert(IS_POW_OF_2(nsets)); 476 assert(IS_POW_OF_2(nwords)); 477 assert(IS_POW_OF_2(nways)); 478 assert(nsets); 479 assert(nwords); 480 assert(nways); 481 482 // check Transaction table size 483 assert((uint32_log2(trt_lines) <= vci_param_ext::T) and 484 "MEMC ERROR : Need more bits for VCI TRDID field"); 485 486 // check internal and external data width 487 assert( (vci_param_int::B == 4 ) and 488 "MEMC ERROR : VCI internal data width must be 32 bits"); 489 490 assert( (vci_param_ext::B == 8) and 491 "MEMC ERROR : VCI external data width must be 64 bits"); 492 493 // Check coherence between internal & external addresses 494 assert( (vci_param_int::N == vci_param_ext::N) and 495 "MEMC ERROR : VCI internal & external addresses must have the same width"); 496 497 // Get the segments associated to the MemCache 498 std::list<soclib::common::Segment>::iterator seg; 499 size_t i = 0; 500 501 for(seg = m_seglist.begin(); seg != m_seglist.end() ; seg++) 502 { 503 std::cout << " => segment " << seg->name() 504 << " / base = " << std::hex << seg->baseAddress() 505 << " / size = " << seg->size() << std::endl; 506 m_nseg++; 507 } 508 509 m_seg = new soclib::common::Segment*[m_nseg]; 510 511 for(seg = m_seglist.begin() ; seg != m_seglist.end() ; seg++) 512 { 513 if ( seg->special() ) m_seg_config = i; 514 m_seg[i] = & (*seg); 515 i++; 516 } 517 518 // Allocation for IXR_RSP FSM 519 r_ixr_rsp_to_xram_rsp_rok = new sc_signal<bool>[m_trt_lines]; 520 //r_ixr_rsp_to_xram_rsp_no_coherent = new sc_signal<bool>[m_trt_lines]; 521 522 // Allocation for XRAM_RSP FSM 523 r_xram_rsp_victim_data = new sc_signal<data_t>[nwords]; 524 r_xram_rsp_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 525 526 // Allocation for READ FSM 527 r_read_data = new sc_signal<data_t>[nwords]; 528 r_read_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 529 530 // Allocation for WRITE FSM 531 r_write_data = new sc_signal<data_t>[nwords]; 532 r_write_be = new sc_signal<be_t>[nwords]; 533 r_write_to_cc_send_data = new sc_signal<data_t>[nwords]; 534 r_write_to_cc_send_be = new sc_signal<be_t>[nwords]; 535 536 // Allocation for CAS FSM 537 r_cas_data = new sc_signal<data_t>[nwords]; 538 r_cas_rdata = new sc_signal<data_t>[2]; 539 540 // Allocation for ODCCP 541 r_cleanup_data = new sc_signal<data_t>[nwords]; 542 r_cleanup_to_ixr_cmd_data = new sc_signal<data_t>[nwords]; 543 544 // Allocation for IXR_CMD FSM 545 r_ixr_cmd_wdata = new sc_signal<data_t>[nwords]; 546 547 // Allocation for debug 548 m_debug_previous_data = new data_t[nwords]; 549 m_debug_data = new data_t[nwords]; 550 551 SC_METHOD(transition); 552 dont_initialize(); 553 sensitive << p_clk.pos(); 554 555 SC_METHOD(genMoore); 556 dont_initialize(); 557 sensitive << p_clk.neg(); 558 } // end constructor 559 560 561 ///////////////////////////////////////////////////// 562 tmpl(void) ::cache_monitor(addr_t addr) 563 ///////////////////////////////////////////////////// 502 564 { 503 std::cout << " => segment " << seg->name() 504 << " / base = " << std::hex << seg->baseAddress() 505 << " / size = " << seg->size() << std::endl; 506 m_nseg++; 507 } 508 509 m_seg = new soclib::common::Segment*[m_nseg]; 510 511 for(seg = m_seglist.begin() ; seg != m_seglist.end() ; seg++) 512 { 513 if ( seg->special() ) m_seg_config = i; 514 m_seg[i] = & (*seg); 515 i++; 516 } 517 518 // Allocation for IXR_RSP FSM 519 r_ixr_rsp_to_xram_rsp_rok = new sc_signal<bool>[m_trt_lines]; 520 //r_ixr_rsp_to_xram_rsp_no_coherent = new sc_signal<bool>[m_trt_lines]; 521 522 // Allocation for XRAM_RSP FSM 523 r_xram_rsp_victim_data = new sc_signal<data_t>[nwords]; 524 r_xram_rsp_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 525 526 // Allocation for READ FSM 527 r_read_data = new sc_signal<data_t>[nwords]; 528 r_read_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 529 530 // Allocation for WRITE FSM 531 r_write_data = new sc_signal<data_t>[nwords]; 532 r_write_be = new sc_signal<be_t>[nwords]; 533 r_write_to_cc_send_data = new sc_signal<data_t>[nwords]; 534 r_write_to_cc_send_be = new sc_signal<be_t>[nwords]; 535 536 // Allocation for CAS FSM 537 r_cas_data = new sc_signal<data_t>[nwords]; 538 r_cas_rdata = new sc_signal<data_t>[2]; 539 540 // Allocation for ODCCP 541 r_cleanup_data = new sc_signal<data_t>[nwords]; 542 r_cleanup_to_ixr_cmd_data = new sc_signal<data_t>[nwords]; 543 544 // Allocation for IXR_CMD FSM 545 r_ixr_cmd_wdata = new sc_signal<data_t>[nwords]; 546 547 // Allocation for debug 548 m_debug_previous_data = new data_t[nwords]; 549 m_debug_data = new data_t[nwords]; 550 551 SC_METHOD(transition); 552 dont_initialize(); 553 sensitive << p_clk.pos(); 554 555 SC_METHOD(genMoore); 556 dont_initialize(); 557 sensitive << p_clk.neg(); 558 } // end constructor 559 560 561 ///////////////////////////////////////////////////// 562 tmpl(void) ::cache_monitor(addr_t addr) 563 ///////////////////////////////////////////////////// 564 { 565 size_t way = 0; 566 size_t set = 0; 567 DirectoryEntry entry = m_cache_directory.read_neutral(addr, &way, &set ); 568 569 // read data and compute data_change 570 bool data_change = false; 571 if ( entry.valid ) 572 { 573 for ( size_t word = 0 ; word<m_words ; word++ ) 565 size_t way = 0; 566 size_t set = 0; 567 DirectoryEntry entry = m_cache_directory.read_neutral(addr, &way, &set ); 568 569 // read data and compute data_change 570 bool data_change = false; 571 if ( entry.valid ) 574 572 { 575 m_debug_data[word] = m_cache_data.read(way, set, word); 576 if ( m_debug_previous_valid and 577 (m_debug_data[word] != m_debug_previous_data[word]) ) 573 for ( size_t word = 0 ; word<m_words ; word++ ) 578 574 { 579 data_change = true; 575 m_debug_data[word] = m_cache_data.read(way, set, word); 576 if ( m_debug_previous_valid and 577 (m_debug_data[word] != m_debug_previous_data[word]) ) 578 { 579 data_change = true; 580 } 580 581 } 581 582 } 583 584 // print values if any change 585 if ( (entry.valid != m_debug_previous_valid) or 586 (entry.valid and (entry.count != m_debug_previous_count)) or 587 (entry.valid and (entry.dirty != m_debug_previous_dirty)) or data_change ) 588 { 589 std::cout << "Monitor MEMC " << name() 590 << " at cycle " << std::dec << m_cpt_cycles 591 << " for address " << std::hex << addr 592 << " / VAL = " << std::dec << entry.valid 593 << " / WAY = " << way 594 << " / COUNT = " << entry.count 595 << " / DIRTY = " << entry.dirty 596 << " / DATA_CHANGE = " << data_change 597 << std::endl; 598 std::cout << std::hex << " /0:" << m_debug_data[0] 599 << "/1:" << m_debug_data[1] 600 << "/2:" << m_debug_data[2] 601 << "/3:" << m_debug_data[3] 602 << "/4:" << m_debug_data[4] 603 << "/5:" << m_debug_data[5] 604 << "/6:" << m_debug_data[6] 605 << "/7:" << m_debug_data[7] 606 << "/8:" << m_debug_data[8] 607 << "/9:" << m_debug_data[9] 608 << "/A:" << m_debug_data[10] 609 << "/B:" << m_debug_data[11] 610 << "/C:" << m_debug_data[12] 611 << "/D:" << m_debug_data[13] 612 << "/E:" << m_debug_data[14] 613 << "/F:" << m_debug_data[15] 614 << std::endl; 615 } 616 617 // register values 618 m_debug_previous_count = entry.count; 619 m_debug_previous_valid = entry.valid; 620 m_debug_previous_dirty = entry.dirty; 621 for( size_t word=0 ; word<m_words ; word++ ) 622 m_debug_previous_data[word] = m_debug_data[word]; 582 623 } 583 584 // print values if any change 585 if ( (entry.valid != m_debug_previous_valid) or 586 (entry.valid and (entry.count != m_debug_previous_count)) or 587 (entry.valid and (entry.dirty != m_debug_previous_dirty)) or data_change ) 624 625 ////////////////////////////////////////////////// 626 tmpl(void) ::print_trace() 627 ////////////////////////////////////////////////// 588 628 { 589 std::cout << "Monitor MEMC " << name() 590 << " at cycle " << std::dec << m_cpt_cycles 591 << " for address " << std::hex << addr 592 << " / VAL = " << std::dec << entry.valid 593 << " / WAY = " << way 594 << " / COUNT = " << entry.count 595 << " / DIRTY = " << entry.dirty 596 << " / DATA_CHANGE = " << data_change 597 << std::endl; 598 std::cout << std::hex << " /0:" << m_debug_data[0] 599 << "/1:" << m_debug_data[1] 600 << "/2:" << m_debug_data[2] 601 << "/3:" << m_debug_data[3] 602 << "/4:" << m_debug_data[4] 603 << "/5:" << m_debug_data[5] 604 << "/6:" << m_debug_data[6] 605 << "/7:" << m_debug_data[7] 606 << "/8:" << m_debug_data[8] 607 << "/9:" << m_debug_data[9] 608 << "/A:" << m_debug_data[10] 609 << "/B:" << m_debug_data[11] 610 << "/C:" << m_debug_data[12] 611 << "/D:" << m_debug_data[13] 612 << "/E:" << m_debug_data[14] 613 << "/F:" << m_debug_data[15] 614 << std::endl; 615 } 616 617 // register values 618 m_debug_previous_count = entry.count; 619 m_debug_previous_valid = entry.valid; 620 m_debug_previous_dirty = entry.dirty; 621 for( size_t word=0 ; word<m_words ; word++ ) 622 m_debug_previous_data[word] = m_debug_data[word]; 623 } 624 625 ////////////////////////////////////////////////// 626 tmpl(void) ::print_trace() 627 ////////////////////////////////////////////////// 628 { 629 std::cout << "MEMC " << name() << std::endl; 630 std::cout << " " << tgt_cmd_fsm_str[r_tgt_cmd_fsm.read()] 629 std::cout << "MEMC " << name() << std::endl; 630 std::cout << " " << tgt_cmd_fsm_str[r_tgt_cmd_fsm.read()] 631 631 << " | " << tgt_rsp_fsm_str[r_tgt_rsp_fsm.read()] 632 632 << " | " << read_fsm_str[r_read_fsm.read()] … … 635 635 << " | " << config_fsm_str[r_config_fsm.read()] 636 636 << " | " << cleanup_fsm_str[r_cleanup_fsm.read()] << std::endl; 637 std::cout << " " << cc_send_fsm_str[r_cc_send_fsm.read()]637 std::cout << " " << cc_send_fsm_str[r_cc_send_fsm.read()] 638 638 << " | " << cc_receive_fsm_str[r_cc_receive_fsm.read()] 639 639 << " | " << multi_ack_fsm_str[r_multi_ack_fsm.read()] … … 641 641 << " | " << ixr_rsp_fsm_str[r_ixr_rsp_fsm.read()] 642 642 << " | " << xram_rsp_fsm_str[r_xram_rsp_fsm.read()] << std::endl; 643 std::cout << " " << alloc_dir_fsm_str[r_alloc_dir_fsm.read()]643 std::cout << " " << alloc_dir_fsm_str[r_alloc_dir_fsm.read()] 644 644 << " | " << alloc_trt_fsm_str[r_alloc_trt_fsm.read()] 645 645 << " | " << alloc_upt_fsm_str[r_alloc_upt_fsm.read()] 646 646 << " | " << alloc_ivt_fsm_str[r_alloc_ivt_fsm.read()] 647 647 << " | " << alloc_heap_fsm_str[r_alloc_heap_fsm.read()] << std::endl; 648 } 649 650 ///////////////////////////////////////// 651 tmpl(void) ::clear_stats() 652 ///////////////////////////////////////// 653 { 654 m_cpt_cycles = 0; 655 m_cpt_read = 0; 656 m_cpt_read_miss = 0; 657 m_cpt_write = 0; 658 m_cpt_write_miss = 0; 659 m_cpt_write_cells = 0; 660 m_cpt_write_dirty = 0; 661 m_cpt_update = 0; 662 m_cpt_update_mult = 0; 663 m_cpt_inval_brdcast = 0; 664 m_cpt_inval = 0; 665 m_cpt_inval_mult = 0; 666 m_cpt_cleanup = 0; 667 m_cpt_cleanup_data = 0; 668 m_cpt_ll = 0; 669 m_cpt_sc = 0; 670 m_cpt_cas = 0; 671 m_cpt_trt_full = 0; 672 m_cpt_trt_rb = 0; 673 m_cpt_dir_unused = 0; 674 m_cpt_ivt_unused = 0; 675 m_cpt_heap_unused = 0; 676 m_cpt_trt_unused = 0; 677 m_cpt_read_fsm_n_dir_lock = 0; 678 m_cpt_read_fsm_dir_lock = 0; 679 m_cpt_read_fsm_dir_used = 0; 680 m_cpt_read_fsm_trt_lock = 0; 681 m_cpt_read_fsm_heap_lock = 0; 682 m_cpt_write_fsm_dir_lock = 0; 683 m_cpt_write_fsm_n_dir_lock = 0; 684 m_cpt_write_fsm_upt_lock = 0; 685 m_cpt_write_fsm_heap_lock = 0; 686 m_cpt_write_fsm_dir_used = 0; 687 m_cpt_write_fsm_trt_lock = 0; 688 m_cpt_cas_fsm_n_dir_lock = 0; 689 m_cpt_cas_fsm_dir_lock = 0; 690 m_cpt_cas_fsm_upt_lock = 0; 691 m_cpt_cas_fsm_heap_lock = 0; 692 m_cpt_cas_fsm_trt_lock = 0; 693 m_cpt_cas_fsm_dir_used = 0; 694 m_cpt_xram_rsp_fsm_n_dir_lock = 0; 695 m_cpt_xram_rsp_fsm_dir_lock = 0; 696 m_cpt_xram_rsp_fsm_trt_lock = 0; 697 m_cpt_xram_rsp_fsm_upt_lock = 0; 698 m_cpt_xram_rsp_fsm_heap_lock = 0; 699 m_cpt_xram_rsp_fsm_dir_used = 0; 700 m_cpt_cleanup_fsm_dir_lock = 0; 701 m_cpt_cleanup_fsm_n_dir_lock = 0; 702 m_cpt_cleanup_fsm_heap_lock = 0; 703 m_cpt_cleanup_fsm_upt_lock = 0; 704 m_cpt_cleanup_fsm_dir_used = 0; 705 m_cpt_ixr_fsm_trt_lock = 0; 706 m_cpt_multi_ack_fsm_upt_lock = 0; 707 } 708 ///////////////////////////////////////// 709 tmpl(void) ::print_stats() 710 ///////////////////////////////////////// 711 { 712 std::cout << "----------------------------------" << std::dec << std::endl; 713 std::cout 714 << "MEM_CACHE " << name() << " / Time = " << m_cpt_cycles << std::endl 715 << "- READ RATE = " << (double) m_cpt_read/m_cpt_cycles << std::endl 716 << "- READ TOTAL = " << m_cpt_read << std::endl 717 << "- READ MISS RATE = " << (double) m_cpt_read_miss/m_cpt_read << std::endl 718 << "- WRITE RATE = " << (double) m_cpt_write/m_cpt_cycles << std::endl 719 << "- WRITE TOTAL = " << m_cpt_write << std::endl 720 << "- WRITE MISS RATE = " << (double) m_cpt_write_miss/m_cpt_write << std::endl 721 << "- WRITE BURST LENGTH = " << (double) m_cpt_write_cells/m_cpt_write << std::endl 722 << "- WRITE BURST TOTAL = " << m_cpt_write_cells << std::endl 723 << "- REQUESTS TRT FULL = " << m_cpt_trt_full << std::endl 724 << "- READ TRT BLOKED HIT = " << m_cpt_trt_rb << std::endl 725 << "- UPDATE RATE = " << (double) m_cpt_update/m_cpt_cycles << std::endl 726 << "- UPDATE ARITY = " << (double) m_cpt_update_mult/m_cpt_update << std::endl 727 << "- INVAL MULTICAST RATE = " << (double)(m_cpt_inval-m_cpt_inval_brdcast) /m_cpt_cycles << std::endl 728 << "- INVAL MULTICAST ARITY = " << (double) m_cpt_inval_mult/ (m_cpt_inval-m_cpt_inval_brdcast) << std::endl 729 << "- INVAL BROADCAST RATE = " << (double) m_cpt_inval_brdcast/m_cpt_cycles << std::endl 730 << "- SAVE DIRTY RATE = " << (double) m_cpt_write_dirty/m_cpt_cycles << std::endl 731 << "- CLEANUP RATE = " << (double) m_cpt_cleanup/m_cpt_cycles << std::endl 732 << "- CLEANUP TOTAL = " << (double) m_cpt_cleanup << std::endl 733 << "- CLEANUP WITH DATA RATE = " << (double) m_cpt_cleanup_data/m_cpt_cycles << std::endl 734 << "- CLEANUP WITH DATA TOTAL = " << (double) m_cpt_cleanup_data << std::endl 735 << "- LL RATE = " << (double) m_cpt_ll/m_cpt_cycles << std::endl 736 << "- SC RATE = " << (double) m_cpt_sc/m_cpt_cycles << std::endl 737 << "- CAS RATE = " << (double) m_cpt_cas/m_cpt_cycles << std::endl << std::endl; 738 739 /* << "- WAIT DIR LOCK in READ_FSM = " << (double) m_cpt_read_fsm_dir_lock/m_cpt_read_fsm_n_dir_lock << std::endl 740 << "- NB CYCLES IN DIR LOCK in READ_FSM = " << (double) m_cpt_read_fsm_dir_used/m_cpt_read_fsm_n_dir_lock << std::endl 741 << "- WAIT DIR LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_dir_lock/m_cpt_write_fsm_n_dir_lock << std::endl 742 << "- NB CYCLES IN DIR LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_dir_used/m_cpt_write_fsm_n_dir_lock << std::endl 743 << "- WAIT DIR LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_dir_lock/m_cpt_xram_rsp_fsm_n_dir_lock << std::endl 744 << "- NB CYCLES IN DIR LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_dir_used/m_cpt_xram_rsp_fsm_n_dir_lock << std::endl 745 << "- WAIT DIR LOCK in CLEANUP_FSM = " << (double) m_cpt_cleanup_fsm_dir_lock/m_cpt_cleanup_fsm_n_dir_lock << std::endl 746 << "- NB CYCLES IN DIR LOCK in CLEANUP_FSM = " << (double) m_cpt_cleanup_fsm_dir_used/m_cpt_cleanup_fsm_n_dir_lock << std::endl 747 << "- WAIT DIR LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_dir_lock/m_cpt_cas_fsm_n_dir_lock << std::endl 748 << "- NB CYCLES IN LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_dir_used/m_cpt_cas_fsm_n_dir_lock << std::endl 749 << "- DIR UNUSED RATE = " << (double) m_cpt_dir_unused/m_cpt_cycles << std::endl << std::endl 750 751 << "- WAIT TRT LOCK in READ_FSM = " << (double) m_cpt_read_fsm_trt_lock/m_cpt_read_fsm_n_trt_lock << std::endl 752 << "- NB CYCLES IN TRT LOCK in READ_FSM = " << (double) m_cpt_read_fsm_trt_used/m_cpt_read_fsm_n_trt_lock << std::endl 753 << "- WAIT TRT LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_trt_lock/m_cpt_write_fsm_n_trt_lock << std::endl 754 << "- NB CYCLES IN TRT LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_trt_used/m_cpt_write_fsm_n_trt_lock << std::endl 755 << "- WAIT TRT LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_trt_lock/m_cpt_cas_fsm_n_trt_lock << std::endl 756 << "- NB CYCLES IN TRT LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_trt_used/m_cpt_cas_fsm_n_trt_lock << std::endl 757 << "- WAIT TRT LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_trt_lock/m_cpt_xram_rsp_fsm_n_trt_lock << std::endl 758 << "- NB CYCLES IN TRT LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_trt_used/m_cpt_xram_rsp_fsm_n_trt_lock << std::endl 759 << "- WAIT TRT LOCK in IXR_FSM = " << (double) m_cpt_ixr_fsm_trt_lock/m_cpt_ixr_fsm_n_trt_lock << std::endl 760 << "- NB CYCLES IN TRT LOCK in IXR_FSM = " << (double) m_cpt_ixr_fsm_trt_used/m_cpt_ixr_fsm_n_trt_lock << std::endl 761 << "- TRT UNUSED RATE = " << (double) m_cpt_trt_unused/m_cpt_cycles << std::endl << std::endl 762 763 << "- WAIT UPT LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_upt_lock/m_cpt_write_fsm_n_upt_lock << std::endl 764 << "- NB CYCLES IN UPT LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_upt_used/m_cpt_write_fsm_n_upt_lock << std::endl 765 << "- WAIT UPT LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_upt_lock/m_cpt_xram_rsp_fsm_n_upt_lock << std::endl 766 << "- NB CYCLES IN UPT LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_upt_used/m_cpt_xram_rsp_fsm_n_upt_lock << std::endl 767 << "- WAIT UPT LOCK in MULTIACK_FSM = " << (double) m_cpt_multi_ack_fsm_upt_lock/m_cpt_multi_ack_fsm_n_upt_lock << std::endl 768 << "- NB CYCLES IN UPT LOCK in MULTIACK_FSM = " << (double) m_cpt_multi_ack_fsm_upt_used/m_cpt_multi_ack_fsm_n_upt_lock << std::endl 769 << "- WAIT UPT LOCK in CLEANUP_FSM = " << (double) m_cpt_cleanup_fsm_upt_lock/m_cpt_cleanup_fsm_n_upt_lock << std::endl 770 << "- NB CYCLES IN UPT LOCK in CLEANUP_FSM = " << (double) m_cpt_cleanup_fsm_upt_used/m_cpt_cleanup_fsm_n_upt_lock << std::endl 771 << "- WAIT UPT LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_upt_lock/m_cpt_cas_fsm_n_upt_lock << std::endl 772 << "- NB CYCLES IN UPT LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_upt_used/m_cpt_cas_fsm_n_upt_lock << std::endl 773 << "- IVT UNUSED RATE = " << (double) m_cpt_ivt_unused/m_cpt_cycles << std::endl << std::endl 774 775 << "- WAIT HEAP LOCK in READ_FSM = " << (double) m_cpt_read_fsm_heap_lock/m_cpt_read_fsm_n_heap_lock << std::endl 776 << "- NB CYCLES IN HEAP LOCK in READ_FSM = " << (double) m_cpt_read_fsm_heap_used/m_cpt_read_fsm_n_heap_lock << std::endl 777 << "- WAIT HEAP LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_heap_lock/m_cpt_write_fsm_n_heap_lock << std::endl 778 << "- NB CYCLES IN HEAP LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_heap_used/m_cpt_write_fsm_n_heap_lock << std::endl 779 << "- WAIT HEAP LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_heap_lock/m_cpt_xram_rsp_fsm_n_heap_lock << std::endl 780 << "- NB CYCLES IN HEAP LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_heap_used/m_cpt_xram_rsp_fsm_n_heap_lock << std::endl 781 << "- WAIT HEAP LOCK in CLEANUP_FSM = " << (double) m_cpt_cleanup_fsm_heap_lock/m_cpt_cleanup_fsm_n_heap_lock << std::endl 782 << "- NB CYCLES IN HEAP LOCK in CLEANUP_FSM = " << (double) m_cpt_cleanup_fsm_heap_used/m_cpt_cleanup_fsm_n_heap_lock << std::endl 783 << "- WAIT HEAP LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_heap_lock/m_cpt_cas_fsm_n_heap_lock << std::endl 784 << "- NB CYCLES IN HEAP LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_heap_used/m_cpt_cas_fsm_n_heap_lock << std::endl 785 << "- HEAP UNUSED RATE = " << (double) m_cpt_heap_unused/m_cpt_cycles << std::endl;*/ 786 } 787 788 ///////////////////////////////// 789 tmpl(/**/) ::~VciMemCache() 790 ///////////////////////////////// 791 { 792 delete [] r_ixr_rsp_to_xram_rsp_rok; 793 //delete [] r_ixr_rsp_to_xram_rsp_no_coherent; 794 795 delete [] r_xram_rsp_victim_data; 796 delete [] r_xram_rsp_to_tgt_rsp_data; 797 798 delete [] r_read_data; 799 delete [] r_read_to_tgt_rsp_data; 800 801 delete [] r_write_data; 802 delete [] r_write_be; 803 delete [] r_write_to_cc_send_data; 804 805 delete [] r_cleanup_data; 806 delete [] r_ixr_cmd_data; 807 delete [] r_cleanup_to_ixr_cmd_data; 808 } 809 810 ////////////////////////////////// 811 tmpl(void) ::transition() 812 ////////////////////////////////// 813 { 814 using soclib::common::uint32_log2; 815 816 // RESET 817 if(! p_resetn.read()) 818 { 819 820 // Initializing FSMs 821 r_tgt_cmd_fsm = TGT_CMD_IDLE; 822 r_config_fsm = CONFIG_IDLE; 823 r_tgt_rsp_fsm = TGT_RSP_TGT_CMD_IDLE; 824 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 825 r_cc_receive_fsm = CC_RECEIVE_IDLE; 826 r_multi_ack_fsm = MULTI_ACK_IDLE; 827 r_read_fsm = READ_IDLE; 828 r_write_fsm = WRITE_IDLE; 829 r_cas_fsm = CAS_IDLE; 830 r_cleanup_fsm = CLEANUP_IDLE; 831 r_alloc_dir_fsm = ALLOC_DIR_RESET; 832 r_alloc_heap_fsm = ALLOC_HEAP_RESET; 833 r_alloc_trt_fsm = ALLOC_TRT_READ; 834 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 835 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 836 r_ixr_rsp_fsm = IXR_RSP_IDLE; 837 r_xram_rsp_fsm = XRAM_RSP_IDLE; 838 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 839 840 m_debug = false; 841 m_debug_previous_valid = false; 842 m_debug_previous_dirty = false; 843 m_debug_previous_count = 0; 844 845 // Initializing Tables 846 m_trt.init(); 847 m_upt.init(); 848 m_ivt.init(); 849 m_llsc_table.init(); 850 851 // initializing FIFOs and communication Buffers 852 853 m_cmd_read_addr_fifo.init(); 854 m_cmd_read_length_fifo.init(); 855 m_cmd_read_srcid_fifo.init(); 856 m_cmd_read_trdid_fifo.init(); 857 m_cmd_read_pktid_fifo.init(); 858 859 m_cmd_write_addr_fifo.init(); 860 m_cmd_write_eop_fifo.init(); 861 m_cmd_write_srcid_fifo.init(); 862 m_cmd_write_trdid_fifo.init(); 863 m_cmd_write_pktid_fifo.init(); 864 m_cmd_write_data_fifo.init(); 865 866 m_cmd_cas_addr_fifo.init() ; 867 m_cmd_cas_srcid_fifo.init() ; 868 m_cmd_cas_trdid_fifo.init() ; 869 m_cmd_cas_pktid_fifo.init() ; 870 m_cmd_cas_wdata_fifo.init() ; 871 m_cmd_cas_eop_fifo.init() ; 872 873 r_config_cmd = MEMC_CMD_NOP; 874 r_config_lock = false; 875 876 m_config_to_cc_send_inst_fifo.init(); 877 m_config_to_cc_send_srcid_fifo.init(); 878 879 r_tgt_cmd_to_tgt_rsp_req = false; 880 881 r_read_to_tgt_rsp_req = false; 882 r_read_to_ixr_cmd_req = false; 883 884 r_write_to_tgt_rsp_req = false; 885 r_write_to_ixr_cmd_req = false; 886 r_write_to_cc_send_multi_req = false; 887 r_write_to_cc_send_brdcast_req = false; 888 r_write_to_multi_ack_req = false; 889 890 m_write_to_cc_send_inst_fifo.init(); 891 m_write_to_cc_send_srcid_fifo.init(); 892 893 r_cleanup_to_tgt_rsp_req = false; 894 895 m_cc_receive_to_cleanup_fifo.init(); 896 897 r_multi_ack_to_tgt_rsp_req = false; 898 899 m_cc_receive_to_multi_ack_fifo.init(); 900 901 r_cas_to_tgt_rsp_req = false; 902 r_cas_cpt = 0 ; 903 r_cas_lfsr = -1 ; 904 r_cas_to_ixr_cmd_req = false; 905 r_cas_to_cc_send_multi_req = false; 906 r_cas_to_cc_send_brdcast_req = false; 907 908 m_cas_to_cc_send_inst_fifo.init(); 909 m_cas_to_cc_send_srcid_fifo.init(); 910 911 for(size_t i=0; i<m_trt_lines ; i++) 648 } 649 650 ///////////////////////////////////////// 651 tmpl(void) ::clear_stats() 652 ///////////////////////////////////////// 912 653 { 913 r_ixr_rsp_to_xram_rsp_rok[i] = false; 914 //r_ixr_rsp_to_xram_rsp_no_coherent[i] = false; 654 m_cpt_cycles = 0; 655 m_cpt_read = 0; 656 m_cpt_read_miss = 0; 657 m_cpt_write = 0; 658 m_cpt_write_miss = 0; 659 m_cpt_write_cells = 0; 660 m_cpt_write_dirty = 0; 661 m_cpt_update = 0; 662 m_cpt_update_mult = 0; 663 m_cpt_inval_brdcast = 0; 664 m_cpt_inval = 0; 665 m_cpt_inval_mult = 0; 666 m_cpt_cleanup = 0; 667 m_cpt_cleanup_data = 0; 668 m_cpt_ll = 0; 669 m_cpt_sc = 0; 670 m_cpt_cas = 0; 671 m_cpt_trt_full = 0; 672 m_cpt_trt_rb = 0; 673 m_cpt_dir_unused = 0; 674 m_cpt_ivt_unused = 0; 675 m_cpt_heap_unused = 0; 676 m_cpt_trt_unused = 0; 677 m_cpt_read_fsm_n_dir_lock = 0; 678 m_cpt_read_fsm_dir_lock = 0; 679 m_cpt_read_fsm_dir_used = 0; 680 m_cpt_read_fsm_trt_lock = 0; 681 m_cpt_read_fsm_heap_lock = 0; 682 m_cpt_write_fsm_dir_lock = 0; 683 m_cpt_write_fsm_n_dir_lock = 0; 684 m_cpt_write_fsm_upt_lock = 0; 685 m_cpt_write_fsm_heap_lock = 0; 686 m_cpt_write_fsm_dir_used = 0; 687 m_cpt_write_fsm_trt_lock = 0; 688 m_cpt_cas_fsm_n_dir_lock = 0; 689 m_cpt_cas_fsm_dir_lock = 0; 690 m_cpt_cas_fsm_upt_lock = 0; 691 m_cpt_cas_fsm_heap_lock = 0; 692 m_cpt_cas_fsm_trt_lock = 0; 693 m_cpt_cas_fsm_dir_used = 0; 694 m_cpt_xram_rsp_fsm_n_dir_lock = 0; 695 m_cpt_xram_rsp_fsm_dir_lock = 0; 696 m_cpt_xram_rsp_fsm_trt_lock = 0; 697 m_cpt_xram_rsp_fsm_upt_lock = 0; 698 m_cpt_xram_rsp_fsm_heap_lock = 0; 699 m_cpt_xram_rsp_fsm_dir_used = 0; 700 m_cpt_cleanup_fsm_dir_lock = 0; 701 m_cpt_cleanup_fsm_n_dir_lock = 0; 702 m_cpt_cleanup_fsm_heap_lock = 0; 703 m_cpt_cleanup_fsm_upt_lock = 0; 704 m_cpt_cleanup_fsm_dir_used = 0; 705 m_cpt_ixr_fsm_trt_lock = 0; 706 m_cpt_multi_ack_fsm_upt_lock = 0; 915 707 } 916 917 r_xram_rsp_to_tgt_rsp_req = false; 918 r_xram_rsp_to_cc_send_multi_req = false; 919 r_xram_rsp_to_cc_send_brdcast_req = false; 920 r_xram_rsp_to_ixr_cmd_req = false; 921 r_xram_rsp_trt_index = 0; 922 923 m_xram_rsp_to_cc_send_inst_fifo.init(); 924 m_xram_rsp_to_cc_send_srcid_fifo.init(); 925 926 r_alloc_dir_reset_cpt = 0; 927 r_alloc_heap_reset_cpt = 0; 928 929 r_tgt_rsp_key_sent = false; 930 931 // ODCCP 932 r_cleanup_data_index = 0; 933 r_cleanup_trdid = 0; 934 r_cleanup_pktid = 0; 935 r_cleanup_contains_data = false; 936 r_cleanup_to_ixr_cmd_req = false; 937 //r_cleanup_to_ixr_cmd_l1_dirty_ncc = false; 938 //r_xram_rsp_to_ixr_cmd_inval_ncc_pending = false; 939 r_cleanup_to_ixr_cmd_srcid = 0; 940 r_cleanup_to_ixr_cmd_index = 0; 941 r_cleanup_to_ixr_cmd_pktid = 0; 942 r_cleanup_to_ixr_cmd_nline = 0; 943 for (size_t word = 0; word < m_words; word ++) 708 ///////////////////////////////////////// 709 tmpl(void) ::print_stats() 710 ///////////////////////////////////////// 944 711 { 945 r_cleanup_to_ixr_cmd_data[word] = 0; 946 r_cleanup_data[word] = 0; 947 r_ixr_cmd_wdata[word] = 0; 712 std::cout << "----------------------------------" << std::dec << std::endl; 713 std::cout 714 << "MEM_CACHE " << name() << " / Time = " << m_cpt_cycles << std::endl 715 << "- READ RATE = " << (double) m_cpt_read/m_cpt_cycles << std::endl 716 << "- READ TOTAL = " << m_cpt_read << std::endl 717 << "- READ MISS RATE = " << (double) m_cpt_read_miss/m_cpt_read << std::endl 718 << "- WRITE RATE = " << (double) m_cpt_write/m_cpt_cycles << std::endl 719 << "- WRITE TOTAL = " << m_cpt_write << std::endl 720 << "- WRITE MISS RATE = " << (double) m_cpt_write_miss/m_cpt_write << std::endl 721 << "- WRITE BURST LENGTH = " << (double) m_cpt_write_cells/m_cpt_write << std::endl 722 << "- WRITE BURST TOTAL = " << m_cpt_write_cells << std::endl 723 << "- REQUESTS TRT FULL = " << m_cpt_trt_full << std::endl 724 << "- READ TRT BLOKED HIT = " << m_cpt_trt_rb << std::endl 725 << "- UPDATE RATE = " << (double) m_cpt_update/m_cpt_cycles << std::endl 726 << "- UPDATE ARITY = " << (double) m_cpt_update_mult/m_cpt_update << std::endl 727 << "- INVAL MULTICAST RATE = " << (double)(m_cpt_inval-m_cpt_inval_brdcast) /m_cpt_cycles << std::endl 728 << "- INVAL MULTICAST ARITY = " << (double) m_cpt_inval_mult/ (m_cpt_inval-m_cpt_inval_brdcast) << std::endl 729 << "- INVAL BROADCAST RATE = " << (double) m_cpt_inval_brdcast/m_cpt_cycles << std::endl 730 << "- SAVE DIRTY RATE = " << (double) m_cpt_write_dirty/m_cpt_cycles << std::endl 731 << "- CLEANUP RATE = " << (double) m_cpt_cleanup/m_cpt_cycles << std::endl 732 << "- CLEANUP TOTAL = " << (double) m_cpt_cleanup << std::endl 733 << "- CLEANUP WITH DATA RATE = " << (double) m_cpt_cleanup_data/m_cpt_cycles << std::endl 734 << "- CLEANUP WITH DATA TOTAL = " << (double) m_cpt_cleanup_data << std::endl 735 << "- LL RATE = " << (double) m_cpt_ll/m_cpt_cycles << std::endl 736 << "- SC RATE = " << (double) m_cpt_sc/m_cpt_cycles << std::endl 737 << "- CAS RATE = " << (double) m_cpt_cas/m_cpt_cycles << std::endl << std::endl; 738 739 /* << "- WAIT DIR LOCK in READ_FSM = " << (double) m_cpt_read_fsm_dir_lock/m_cpt_read_fsm_n_dir_lock << std::endl 740 << "- NB CYCLES IN DIR LOCK in READ_FSM = " << (double) m_cpt_read_fsm_dir_used/m_cpt_read_fsm_n_dir_lock << std::endl 741 << "- WAIT DIR LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_dir_lock/m_cpt_write_fsm_n_dir_lock << std::endl 742 << "- NB CYCLES IN DIR LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_dir_used/m_cpt_write_fsm_n_dir_lock << std::endl 743 << "- WAIT DIR LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_dir_lock/m_cpt_xram_rsp_fsm_n_dir_lock << std::endl 744 << "- NB CYCLES IN DIR LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_dir_used/m_cpt_xram_rsp_fsm_n_dir_lock << std::endl 745 << "- WAIT DIR LOCK in CLEANUP_FSM = " << (double) m_cpt_cleanup_fsm_dir_lock/m_cpt_cleanup_fsm_n_dir_lock << std::endl 746 << "- NB CYCLES IN DIR LOCK in CLEANUP_FSM = " << (double) m_cpt_cleanup_fsm_dir_used/m_cpt_cleanup_fsm_n_dir_lock << std::endl 747 << "- WAIT DIR LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_dir_lock/m_cpt_cas_fsm_n_dir_lock << std::endl 748 << "- NB CYCLES IN LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_dir_used/m_cpt_cas_fsm_n_dir_lock << std::endl 749 << "- DIR UNUSED RATE = " << (double) m_cpt_dir_unused/m_cpt_cycles << std::endl << std::endl 750 751 << "- WAIT TRT LOCK in READ_FSM = " << (double) m_cpt_read_fsm_trt_lock/m_cpt_read_fsm_n_trt_lock << std::endl 752 << "- NB CYCLES IN TRT LOCK in READ_FSM = " << (double) m_cpt_read_fsm_trt_used/m_cpt_read_fsm_n_trt_lock << std::endl 753 << "- WAIT TRT LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_trt_lock/m_cpt_write_fsm_n_trt_lock << std::endl 754 << "- NB CYCLES IN TRT LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_trt_used/m_cpt_write_fsm_n_trt_lock << std::endl 755 << "- WAIT TRT LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_trt_lock/m_cpt_cas_fsm_n_trt_lock << std::endl 756 << "- NB CYCLES IN TRT LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_trt_used/m_cpt_cas_fsm_n_trt_lock << std::endl 757 << "- WAIT TRT LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_trt_lock/m_cpt_xram_rsp_fsm_n_trt_lock << std::endl 758 << "- NB CYCLES IN TRT LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_trt_used/m_cpt_xram_rsp_fsm_n_trt_lock << std::endl 759 << "- WAIT TRT LOCK in IXR_FSM = " << (double) m_cpt_ixr_fsm_trt_lock/m_cpt_ixr_fsm_n_trt_lock << std::endl 760 << "- NB CYCLES IN TRT LOCK in IXR_FSM = " << (double) m_cpt_ixr_fsm_trt_used/m_cpt_ixr_fsm_n_trt_lock << std::endl 761 << "- TRT UNUSED RATE = " << (double) m_cpt_trt_unused/m_cpt_cycles << std::endl << std::endl 762 763 << "- WAIT UPT LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_upt_lock/m_cpt_write_fsm_n_upt_lock << std::endl 764 << "- NB CYCLES IN UPT LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_upt_used/m_cpt_write_fsm_n_upt_lock << std::endl 765 << "- WAIT UPT LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_upt_lock/m_cpt_xram_rsp_fsm_n_upt_lock << std::endl 766 << "- NB CYCLES IN UPT LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_upt_used/m_cpt_xram_rsp_fsm_n_upt_lock << std::endl 767 << "- WAIT UPT LOCK in MULTIACK_FSM = " << (double) m_cpt_multi_ack_fsm_upt_lock/m_cpt_multi_ack_fsm_n_upt_lock << std::endl 768 << "- NB CYCLES IN UPT LOCK in MULTIACK_FSM = " << (double) m_cpt_multi_ack_fsm_upt_used/m_cpt_multi_ack_fsm_n_upt_lock << std::endl 769 << "- WAIT UPT LOCK in CLEANUP_FSM = " << (double) m_cpt_cleanup_fsm_upt_lock/m_cpt_cleanup_fsm_n_upt_lock << std::endl 770 << "- NB CYCLES IN UPT LOCK in CLEANUP_FSM = " << (double) m_cpt_cleanup_fsm_upt_used/m_cpt_cleanup_fsm_n_upt_lock << std::endl 771 << "- WAIT UPT LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_upt_lock/m_cpt_cas_fsm_n_upt_lock << std::endl 772 << "- NB CYCLES IN UPT LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_upt_used/m_cpt_cas_fsm_n_upt_lock << std::endl 773 << "- IVT UNUSED RATE = " << (double) m_cpt_ivt_unused/m_cpt_cycles << std::endl << std::endl 774 775 << "- WAIT HEAP LOCK in READ_FSM = " << (double) m_cpt_read_fsm_heap_lock/m_cpt_read_fsm_n_heap_lock << std::endl 776 << "- NB CYCLES IN HEAP LOCK in READ_FSM = " << (double) m_cpt_read_fsm_heap_used/m_cpt_read_fsm_n_heap_lock << std::endl 777 << "- WAIT HEAP LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_heap_lock/m_cpt_write_fsm_n_heap_lock << std::endl 778 << "- NB CYCLES IN HEAP LOCK in WRITE_FSM = " << (double) m_cpt_write_fsm_heap_used/m_cpt_write_fsm_n_heap_lock << std::endl 779 << "- WAIT HEAP LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_heap_lock/m_cpt_xram_rsp_fsm_n_heap_lock << std::endl 780 << "- NB CYCLES IN HEAP LOCK in XRAM_FSM = " << (double) m_cpt_xram_rsp_fsm_heap_used/m_cpt_xram_rsp_fsm_n_heap_lock << std::endl 781 << "- WAIT HEAP LOCK in CLEANUP_FSM = " << (double) m_cpt_cleanup_fsm_heap_lock/m_cpt_cleanup_fsm_n_heap_lock << std::endl 782 << "- NB CYCLES IN HEAP LOCK in CLEANUP_FSM = " << (double) m_cpt_cleanup_fsm_heap_used/m_cpt_cleanup_fsm_n_heap_lock << std::endl 783 << "- WAIT HEAP LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_heap_lock/m_cpt_cas_fsm_n_heap_lock << std::endl 784 << "- NB CYCLES IN HEAP LOCK in CAS_FSM = " << (double) m_cpt_cas_fsm_heap_used/m_cpt_cas_fsm_n_heap_lock << std::endl 785 << "- HEAP UNUSED RATE = " << (double) m_cpt_heap_unused/m_cpt_cycles << std::endl;*/ 948 786 } 949 787 950 951 // Activity counters 952 m_cpt_cycles = 0; 953 m_cpt_read = 0; 954 m_cpt_read_miss = 0; 955 m_cpt_write = 0; 956 m_cpt_write_miss = 0; 957 m_cpt_write_cells = 0; 958 m_cpt_write_dirty = 0; 959 m_cpt_update = 0; 960 m_cpt_update_mult = 0; 961 m_cpt_inval_brdcast = 0; 962 m_cpt_inval = 0; 963 m_cpt_inval_mult = 0; 964 m_cpt_cleanup = 0; 965 m_cpt_cleanup_data = 0; 966 m_cpt_ll = 0; 967 m_cpt_sc = 0; 968 m_cpt_cas = 0; 969 m_cpt_trt_full = 0; 970 m_cpt_trt_rb = 0; 971 m_cpt_dir_unused = 0; 972 m_cpt_ivt_unused = 0; 973 m_cpt_heap_unused = 0; 974 m_cpt_trt_unused = 0; 975 m_cpt_read_fsm_n_dir_lock = 0; 976 m_cpt_read_fsm_dir_lock = 0; 977 m_cpt_read_fsm_dir_used = 0; 978 m_cpt_read_fsm_trt_lock = 0; 979 m_cpt_read_fsm_heap_lock = 0; 980 m_cpt_write_fsm_dir_lock = 0; 981 m_cpt_write_fsm_n_dir_lock = 0; 982 m_cpt_write_fsm_upt_lock = 0; 983 m_cpt_write_fsm_heap_lock = 0; 984 m_cpt_write_fsm_dir_used = 0; 985 m_cpt_write_fsm_trt_lock = 0; 986 m_cpt_cas_fsm_n_dir_lock = 0; 987 m_cpt_cas_fsm_dir_lock = 0; 988 m_cpt_cas_fsm_upt_lock = 0; 989 m_cpt_cas_fsm_heap_lock = 0; 990 m_cpt_cas_fsm_trt_lock = 0; 991 m_cpt_cas_fsm_dir_used = 0; 992 m_cpt_xram_rsp_fsm_n_dir_lock = 0; 993 m_cpt_xram_rsp_fsm_dir_lock = 0; 994 m_cpt_xram_rsp_fsm_trt_lock = 0; 995 m_cpt_xram_rsp_fsm_upt_lock = 0; 996 m_cpt_xram_rsp_fsm_heap_lock = 0; 997 m_cpt_xram_rsp_fsm_dir_used = 0; 998 m_cpt_cleanup_fsm_dir_lock = 0; 999 m_cpt_cleanup_fsm_n_dir_lock = 0; 1000 m_cpt_cleanup_fsm_heap_lock = 0; 1001 m_cpt_cleanup_fsm_upt_lock = 0; 1002 m_cpt_cleanup_fsm_dir_used = 0; 1003 m_cpt_ixr_fsm_trt_lock = 0; 1004 m_cpt_multi_ack_fsm_upt_lock = 0; 1005 1006 return; 1007 } 1008 1009 bool cmd_read_fifo_put = false; 1010 bool cmd_read_fifo_get = false; 1011 1012 bool cmd_write_fifo_put = false; 1013 bool cmd_write_fifo_get = false; 1014 1015 bool cmd_cas_fifo_put = false; 1016 bool cmd_cas_fifo_get = false; 1017 1018 bool cc_receive_to_cleanup_fifo_get = false; 1019 bool cc_receive_to_cleanup_fifo_put = false; 1020 1021 bool cc_receive_to_multi_ack_fifo_get = false; 1022 bool cc_receive_to_multi_ack_fifo_put = false; 1023 1024 bool write_to_cc_send_fifo_put = false; 1025 bool write_to_cc_send_fifo_get = false; 1026 bool write_to_cc_send_fifo_inst = false; 1027 size_t write_to_cc_send_fifo_srcid = 0; 1028 1029 bool xram_rsp_to_cc_send_fifo_put = false; 1030 bool xram_rsp_to_cc_send_fifo_get = false; 1031 bool xram_rsp_to_cc_send_fifo_inst = false; 1032 size_t xram_rsp_to_cc_send_fifo_srcid = 0; 1033 1034 bool config_to_cc_send_fifo_put = false; 1035 bool config_to_cc_send_fifo_get = false; 1036 bool config_to_cc_send_fifo_inst = false; 1037 size_t config_to_cc_send_fifo_srcid = 0; 1038 1039 bool cas_to_cc_send_fifo_put = false; 1040 bool cas_to_cc_send_fifo_get = false; 1041 bool cas_to_cc_send_fifo_inst = false; 1042 size_t cas_to_cc_send_fifo_srcid = 0; 1043 1044 m_debug = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 788 ///////////////////////////////// 789 tmpl(/**/) ::~VciMemCache() 790 ///////////////////////////////// 791 { 792 delete [] r_ixr_rsp_to_xram_rsp_rok; 793 //delete [] r_ixr_rsp_to_xram_rsp_no_coherent; 794 795 delete [] r_xram_rsp_victim_data; 796 delete [] r_xram_rsp_to_tgt_rsp_data; 797 798 delete [] r_read_data; 799 delete [] r_read_to_tgt_rsp_data; 800 801 delete [] r_write_data; 802 delete [] r_write_be; 803 delete [] r_write_to_cc_send_data; 804 805 delete [] r_cleanup_data; 806 delete [] r_ixr_cmd_data; 807 delete [] r_cleanup_to_ixr_cmd_data; 808 } 809 810 ////////////////////////////////// 811 tmpl(void) ::transition() 812 ////////////////////////////////// 813 { 814 using soclib::common::uint32_log2; 815 816 // RESET 817 if(! p_resetn.read()) 818 { 819 820 // Initializing FSMs 821 r_tgt_cmd_fsm = TGT_CMD_IDLE; 822 r_config_fsm = CONFIG_IDLE; 823 r_tgt_rsp_fsm = TGT_RSP_TGT_CMD_IDLE; 824 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 825 r_cc_receive_fsm = CC_RECEIVE_IDLE; 826 r_multi_ack_fsm = MULTI_ACK_IDLE; 827 r_read_fsm = READ_IDLE; 828 r_write_fsm = WRITE_IDLE; 829 r_cas_fsm = CAS_IDLE; 830 r_cleanup_fsm = CLEANUP_IDLE; 831 r_alloc_dir_fsm = ALLOC_DIR_RESET; 832 r_alloc_heap_fsm = ALLOC_HEAP_RESET; 833 r_alloc_trt_fsm = ALLOC_TRT_READ; 834 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 835 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 836 r_ixr_rsp_fsm = IXR_RSP_IDLE; 837 r_xram_rsp_fsm = XRAM_RSP_IDLE; 838 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 839 840 m_debug = false; 841 m_debug_previous_valid = false; 842 m_debug_previous_dirty = false; 843 m_debug_previous_count = 0; 844 845 // Initializing Tables 846 m_trt.init(); 847 m_upt.init(); 848 m_ivt.init(); 849 m_llsc_table.init(); 850 851 // initializing FIFOs and communication Buffers 852 853 m_cmd_read_addr_fifo.init(); 854 m_cmd_read_length_fifo.init(); 855 m_cmd_read_srcid_fifo.init(); 856 m_cmd_read_trdid_fifo.init(); 857 m_cmd_read_pktid_fifo.init(); 858 859 m_cmd_write_addr_fifo.init(); 860 m_cmd_write_eop_fifo.init(); 861 m_cmd_write_srcid_fifo.init(); 862 m_cmd_write_trdid_fifo.init(); 863 m_cmd_write_pktid_fifo.init(); 864 m_cmd_write_data_fifo.init(); 865 866 m_cmd_cas_addr_fifo.init() ; 867 m_cmd_cas_srcid_fifo.init() ; 868 m_cmd_cas_trdid_fifo.init() ; 869 m_cmd_cas_pktid_fifo.init() ; 870 m_cmd_cas_wdata_fifo.init() ; 871 m_cmd_cas_eop_fifo.init() ; 872 873 r_config_cmd = MEMC_CMD_NOP; 874 r_config_lock = false; 875 876 m_config_to_cc_send_inst_fifo.init(); 877 m_config_to_cc_send_srcid_fifo.init(); 878 879 r_tgt_cmd_to_tgt_rsp_req = false; 880 881 r_read_to_tgt_rsp_req = false; 882 r_read_to_ixr_cmd_req = false; 883 884 r_write_to_tgt_rsp_req = false; 885 r_write_to_ixr_cmd_req = false; 886 r_write_to_cc_send_multi_req = false; 887 r_write_to_cc_send_brdcast_req = false; 888 r_write_to_multi_ack_req = false; 889 890 m_write_to_cc_send_inst_fifo.init(); 891 m_write_to_cc_send_srcid_fifo.init(); 892 893 r_cleanup_to_tgt_rsp_req = false; 894 895 m_cc_receive_to_cleanup_fifo.init(); 896 897 r_multi_ack_to_tgt_rsp_req = false; 898 899 m_cc_receive_to_multi_ack_fifo.init(); 900 901 r_cas_to_tgt_rsp_req = false; 902 r_cas_cpt = 0 ; 903 r_cas_lfsr = -1 ; 904 r_cas_to_ixr_cmd_req = false; 905 r_cas_to_cc_send_multi_req = false; 906 r_cas_to_cc_send_brdcast_req = false; 907 908 m_cas_to_cc_send_inst_fifo.init(); 909 m_cas_to_cc_send_srcid_fifo.init(); 910 911 for(size_t i=0; i<m_trt_lines ; i++) 912 { 913 r_ixr_rsp_to_xram_rsp_rok[i] = false; 914 //r_ixr_rsp_to_xram_rsp_no_coherent[i] = false; 915 } 916 917 r_xram_rsp_to_tgt_rsp_req = false; 918 r_xram_rsp_to_cc_send_multi_req = false; 919 r_xram_rsp_to_cc_send_brdcast_req = false; 920 r_xram_rsp_to_ixr_cmd_req = false; 921 r_xram_rsp_trt_index = 0; 922 923 m_xram_rsp_to_cc_send_inst_fifo.init(); 924 m_xram_rsp_to_cc_send_srcid_fifo.init(); 925 926 r_alloc_dir_reset_cpt = 0; 927 r_alloc_heap_reset_cpt = 0; 928 929 r_tgt_rsp_key_sent = false; 930 931 // ODCCP 932 r_cleanup_data_index = 0; 933 r_cleanup_trdid = 0; 934 r_cleanup_pktid = 0; 935 r_cleanup_contains_data = false; 936 r_cleanup_to_ixr_cmd_req = false; 937 //r_cleanup_to_ixr_cmd_l1_dirty_ncc = false; 938 //r_xram_rsp_to_ixr_cmd_inval_ncc_pending = false; 939 r_cleanup_to_ixr_cmd_srcid = 0; 940 r_cleanup_to_ixr_cmd_index = 0; 941 r_cleanup_to_ixr_cmd_pktid = 0; 942 r_cleanup_to_ixr_cmd_nline = 0; 943 for (size_t word = 0; word < m_words; word ++) 944 { 945 r_cleanup_to_ixr_cmd_data[word] = 0; 946 r_cleanup_data[word] = 0; 947 r_ixr_cmd_wdata[word] = 0; 948 } 949 950 951 // Activity counters 952 m_cpt_cycles = 0; 953 m_cpt_read = 0; 954 m_cpt_read_miss = 0; 955 m_cpt_write = 0; 956 m_cpt_write_miss = 0; 957 m_cpt_write_cells = 0; 958 m_cpt_write_dirty = 0; 959 m_cpt_update = 0; 960 m_cpt_update_mult = 0; 961 m_cpt_inval_brdcast = 0; 962 m_cpt_inval = 0; 963 m_cpt_inval_mult = 0; 964 m_cpt_cleanup = 0; 965 m_cpt_cleanup_data = 0; 966 m_cpt_ll = 0; 967 m_cpt_sc = 0; 968 m_cpt_cas = 0; 969 m_cpt_trt_full = 0; 970 m_cpt_trt_rb = 0; 971 m_cpt_dir_unused = 0; 972 m_cpt_ivt_unused = 0; 973 m_cpt_heap_unused = 0; 974 m_cpt_trt_unused = 0; 975 m_cpt_read_fsm_n_dir_lock = 0; 976 m_cpt_read_fsm_dir_lock = 0; 977 m_cpt_read_fsm_dir_used = 0; 978 m_cpt_read_fsm_trt_lock = 0; 979 m_cpt_read_fsm_heap_lock = 0; 980 m_cpt_write_fsm_dir_lock = 0; 981 m_cpt_write_fsm_n_dir_lock = 0; 982 m_cpt_write_fsm_upt_lock = 0; 983 m_cpt_write_fsm_heap_lock = 0; 984 m_cpt_write_fsm_dir_used = 0; 985 m_cpt_write_fsm_trt_lock = 0; 986 m_cpt_cas_fsm_n_dir_lock = 0; 987 m_cpt_cas_fsm_dir_lock = 0; 988 m_cpt_cas_fsm_upt_lock = 0; 989 m_cpt_cas_fsm_heap_lock = 0; 990 m_cpt_cas_fsm_trt_lock = 0; 991 m_cpt_cas_fsm_dir_used = 0; 992 m_cpt_xram_rsp_fsm_n_dir_lock = 0; 993 m_cpt_xram_rsp_fsm_dir_lock = 0; 994 m_cpt_xram_rsp_fsm_trt_lock = 0; 995 m_cpt_xram_rsp_fsm_upt_lock = 0; 996 m_cpt_xram_rsp_fsm_heap_lock = 0; 997 m_cpt_xram_rsp_fsm_dir_used = 0; 998 m_cpt_cleanup_fsm_dir_lock = 0; 999 m_cpt_cleanup_fsm_n_dir_lock = 0; 1000 m_cpt_cleanup_fsm_heap_lock = 0; 1001 m_cpt_cleanup_fsm_upt_lock = 0; 1002 m_cpt_cleanup_fsm_dir_used = 0; 1003 m_cpt_ixr_fsm_trt_lock = 0; 1004 m_cpt_multi_ack_fsm_upt_lock = 0; 1005 1006 return; 1007 } 1008 1009 bool cmd_read_fifo_put = false; 1010 bool cmd_read_fifo_get = false; 1011 1012 bool cmd_write_fifo_put = false; 1013 bool cmd_write_fifo_get = false; 1014 1015 bool cmd_cas_fifo_put = false; 1016 bool cmd_cas_fifo_get = false; 1017 1018 bool cc_receive_to_cleanup_fifo_get = false; 1019 bool cc_receive_to_cleanup_fifo_put = false; 1020 1021 bool cc_receive_to_multi_ack_fifo_get = false; 1022 bool cc_receive_to_multi_ack_fifo_put = false; 1023 1024 bool write_to_cc_send_fifo_put = false; 1025 bool write_to_cc_send_fifo_get = false; 1026 bool write_to_cc_send_fifo_inst = false; 1027 size_t write_to_cc_send_fifo_srcid = 0; 1028 1029 bool xram_rsp_to_cc_send_fifo_put = false; 1030 bool xram_rsp_to_cc_send_fifo_get = false; 1031 bool xram_rsp_to_cc_send_fifo_inst = false; 1032 size_t xram_rsp_to_cc_send_fifo_srcid = 0; 1033 1034 bool config_to_cc_send_fifo_put = false; 1035 bool config_to_cc_send_fifo_get = false; 1036 bool config_to_cc_send_fifo_inst = false; 1037 size_t config_to_cc_send_fifo_srcid = 0; 1038 1039 bool cas_to_cc_send_fifo_put = false; 1040 bool cas_to_cc_send_fifo_get = false; 1041 bool cas_to_cc_send_fifo_inst = false; 1042 size_t cas_to_cc_send_fifo_srcid = 0; 1043 1044 m_debug = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 1045 1045 1046 1046 #if DEBUG_MEMC_GLOBAL 1047 if(m_debug)1048 {1049 std::cout1050 << "---------------------------------------------" << std::dec << std::endl1051 << "MEM_CACHE " << name()1052 << " ; Time = " << m_cpt_cycles << std::endl1053 << " - TGT_CMD FSM = " << tgt_cmd_fsm_str[r_tgt_cmd_fsm.read()] << std::endl1054 << " - TGT_RSP FSM = " << tgt_rsp_fsm_str[r_tgt_rsp_fsm.read()] << std::endl1055 << " - CC_SEND FSM = " << cc_send_fsm_str[r_cc_send_fsm.read()] << std::endl1056 << " - CC_RECEIVE FSM = " << cc_receive_fsm_str[r_cc_receive_fsm.read()] << std::endl1057 << " - MULTI_ACK FSM = " << multi_ack_fsm_str[r_multi_ack_fsm.read()] << std::endl1058 << " - READ FSM = " << read_fsm_str[r_read_fsm.read()] << std::endl1059 << " - WRITE FSM = " << write_fsm_str[r_write_fsm.read()] << std::endl1060 << " - CAS FSM = " << cas_fsm_str[r_cas_fsm.read()] << std::endl1061 << " - CLEANUP FSM = " << cleanup_fsm_str[r_cleanup_fsm.read()] << std::endl1062 << " - IXR_CMD FSM = " << ixr_cmd_fsm_str[r_ixr_cmd_fsm.read()] << std::endl1063 << " - IXR_RSP FSM = " << ixr_rsp_fsm_str[r_ixr_rsp_fsm.read()] << std::endl1064 << " - XRAM_RSP FSM = " << xram_rsp_fsm_str[r_xram_rsp_fsm.read()] << std::endl1065 << " - ALLOC_DIR FSM = " << alloc_dir_fsm_str[r_alloc_dir_fsm.read()] << std::endl1066 << " - ALLOC_TRT FSM = " << alloc_trt_fsm_str[r_alloc_trt_fsm.read()] << std::endl1067 << " - ALLOC_UPT FSM = " << alloc_upt_fsm_str[r_alloc_upt_fsm.read()] << std::endl1068 << " - ALLOC_HEAP FSM = " << alloc_heap_fsm_str[r_alloc_heap_fsm.read()] << std::endl;1069 }1070 #endif 1071 1072 ////////////////////////////////////////////////////////////////////////////////////1073 // TGT_CMD FSM1074 ////////////////////////////////////////////////////////////////////////////////////1075 // The TGT_CMD_FSM controls the incoming VCI command pakets from the processors,1076 // and dispatch these commands to the proper FSM through dedicated FIFOs.1077 //1078 // There are 5 types of commands accepted in the XRAM segment:1079 // - READ : A READ request has a length of 1 VCI flit. It can be a single word1080 // or an entire cache line, depending on the PLEN value => READ FSM1081 // - WRITE : A WRITE request has a maximum length of 16 flits, and can only1082 // concern words in a same line => WRITE FSM1083 // - CAS : A CAS request has a length of 2 flits or 4 flits => CAS FSM1084 // - LL : An LL request has a length of 1 flit => READ FSM1085 // - SC : An SC request has a length of 2 flits. First flit contains the1086 // acces key, second flit the data to write => WRITE FSM.1087 //1088 // The READ/WRITE commands accepted in the configuration segment are targeting1089 // configuration or status registers. They must contain one single flit.1090 // - For almost all addressable registers, the response is returned immediately.1091 // - For MEMC_CMD_TYPE, the response is delayed until the operation is completed.1092 ////////////////////////////////////////////////////////////////////////////////////1093 1094 //std::cout << std::endl << "tgt_cmd_fsm" << std::endl;1095 1096 switch(r_tgt_cmd_fsm.read())1097 {1098 //////////////////1099 case TGT_CMD_IDLE: // waiting a VCI command (RAM or CONFIG)1100 if(p_vci_tgt.cmdval)1101 {1047 if(m_debug) 1048 { 1049 std::cout 1050 << "---------------------------------------------" << std::dec << std::endl 1051 << "MEM_CACHE " << name() 1052 << " ; Time = " << m_cpt_cycles << std::endl 1053 << " - TGT_CMD FSM = " << tgt_cmd_fsm_str[r_tgt_cmd_fsm.read()] << std::endl 1054 << " - TGT_RSP FSM = " << tgt_rsp_fsm_str[r_tgt_rsp_fsm.read()] << std::endl 1055 << " - CC_SEND FSM = " << cc_send_fsm_str[r_cc_send_fsm.read()] << std::endl 1056 << " - CC_RECEIVE FSM = " << cc_receive_fsm_str[r_cc_receive_fsm.read()] << std::endl 1057 << " - MULTI_ACK FSM = " << multi_ack_fsm_str[r_multi_ack_fsm.read()] << std::endl 1058 << " - READ FSM = " << read_fsm_str[r_read_fsm.read()] << std::endl 1059 << " - WRITE FSM = " << write_fsm_str[r_write_fsm.read()] << std::endl 1060 << " - CAS FSM = " << cas_fsm_str[r_cas_fsm.read()] << std::endl 1061 << " - CLEANUP FSM = " << cleanup_fsm_str[r_cleanup_fsm.read()] << std::endl 1062 << " - IXR_CMD FSM = " << ixr_cmd_fsm_str[r_ixr_cmd_fsm.read()] << std::endl 1063 << " - IXR_RSP FSM = " << ixr_rsp_fsm_str[r_ixr_rsp_fsm.read()] << std::endl 1064 << " - XRAM_RSP FSM = " << xram_rsp_fsm_str[r_xram_rsp_fsm.read()] << std::endl 1065 << " - ALLOC_DIR FSM = " << alloc_dir_fsm_str[r_alloc_dir_fsm.read()] << std::endl 1066 << " - ALLOC_TRT FSM = " << alloc_trt_fsm_str[r_alloc_trt_fsm.read()] << std::endl 1067 << " - ALLOC_UPT FSM = " << alloc_upt_fsm_str[r_alloc_upt_fsm.read()] << std::endl 1068 << " - ALLOC_HEAP FSM = " << alloc_heap_fsm_str[r_alloc_heap_fsm.read()] << std::endl; 1069 } 1070 #endif 1071 1072 //////////////////////////////////////////////////////////////////////////////////// 1073 // TGT_CMD FSM 1074 //////////////////////////////////////////////////////////////////////////////////// 1075 // The TGT_CMD_FSM controls the incoming VCI command pakets from the processors, 1076 // and dispatch these commands to the proper FSM through dedicated FIFOs. 1077 // 1078 // There are 5 types of commands accepted in the XRAM segment: 1079 // - READ : A READ request has a length of 1 VCI flit. It can be a single word 1080 // or an entire cache line, depending on the PLEN value => READ FSM 1081 // - WRITE : A WRITE request has a maximum length of 16 flits, and can only 1082 // concern words in a same line => WRITE FSM 1083 // - CAS : A CAS request has a length of 2 flits or 4 flits => CAS FSM 1084 // - LL : An LL request has a length of 1 flit => READ FSM 1085 // - SC : An SC request has a length of 2 flits. First flit contains the 1086 // acces key, second flit the data to write => WRITE FSM. 1087 // 1088 // The READ/WRITE commands accepted in the configuration segment are targeting 1089 // configuration or status registers. They must contain one single flit. 1090 // - For almost all addressable registers, the response is returned immediately. 1091 // - For MEMC_CMD_TYPE, the response is delayed until the operation is completed. 1092 //////////////////////////////////////////////////////////////////////////////////// 1093 1094 //std::cout << std::endl << "tgt_cmd_fsm" << std::endl; 1095 1096 switch(r_tgt_cmd_fsm.read()) 1097 { 1098 ////////////////// 1099 case TGT_CMD_IDLE: // waiting a VCI command (RAM or CONFIG) 1100 if(p_vci_tgt.cmdval) 1101 { 1102 1102 1103 1103 #if DEBUG_MEMC_TGT_CMD 1104 if(m_debug) 1105 std::cout << " <MEMC " << name() 1106 << " TGT_CMD_IDLE> Receive command from srcid " 1107 << std::hex << p_vci_tgt.srcid.read() 1108 << " / address " << std::hex << p_vci_tgt.address.read() << std::endl; 1109 #endif 1110 // checking segmentation violation 1111 addr_t address = p_vci_tgt.address.read(); 1112 uint32_t plen = p_vci_tgt.plen.read(); 1113 bool found = false; 1114 bool config = false; 1115 1116 // register arguments for response (segmentation violation or config) 1117 r_tgt_cmd_to_tgt_rsp_srcid = p_vci_tgt.srcid.read(); 1118 r_tgt_cmd_to_tgt_rsp_trdid = p_vci_tgt.trdid.read(); 1119 r_tgt_cmd_to_tgt_rsp_pktid = p_vci_tgt.pktid.read(); 1120 1121 for(size_t seg_id = 0 ; (seg_id < m_nseg) and not found ; seg_id++) 1104 if(m_debug) 1105 std::cout << " <MEMC " << name() 1106 << " TGT_CMD_IDLE> Receive command from srcid " 1107 << std::hex << p_vci_tgt.srcid.read() 1108 << " / address " << std::hex << p_vci_tgt.address.read() << std::endl; 1109 #endif 1110 // checking segmentation violation 1111 addr_t address = p_vci_tgt.address.read(); 1112 uint32_t plen = p_vci_tgt.plen.read(); 1113 bool found = false; 1114 bool config = false; 1115 1116 // register arguments for response (segmentation violation or config) 1117 r_tgt_cmd_to_tgt_rsp_srcid = p_vci_tgt.srcid.read(); 1118 r_tgt_cmd_to_tgt_rsp_trdid = p_vci_tgt.trdid.read(); 1119 r_tgt_cmd_to_tgt_rsp_pktid = p_vci_tgt.pktid.read(); 1120 1121 for(size_t seg_id = 0 ; (seg_id < m_nseg) and not found ; seg_id++) 1122 { 1123 if( m_seg[seg_id]->contains(address) and 1124 m_seg[seg_id]->contains(address + plen - vci_param_int::B) ) 1125 { 1126 found = true; 1127 if ( m_seg[seg_id]->special() ) config = true; 1128 } 1129 } 1130 1131 if ( not found ) /////////// out of segment error 1132 { 1133 r_tgt_cmd_fsm = TGT_CMD_ERROR; 1134 } 1135 else if ( config ) /////////// configuration command 1136 { 1137 if ( not p_vci_tgt.eop.read() ) r_tgt_cmd_fsm = TGT_CMD_ERROR; 1138 else r_tgt_cmd_fsm = TGT_CMD_CONFIG; 1139 } 1140 else //////////// memory access 1141 { 1142 if ( p_vci_tgt.cmd.read() == vci_param_int::CMD_READ ) 1143 { 1144 // check that the pktid is either : 1145 // TYPE_READ_DATA_UNC 1146 // TYPE_READ_DATA_MISS 1147 // TYPE_READ_INS_UNC 1148 // TYPE_READ_INS_MISS 1149 // ==> bit2 must be zero with the TSAR encoding 1150 // ==> mask = 0b0100 = 0x4 1151 assert( ((p_vci_tgt.pktid.read() & 0x4) == 0x0) and 1152 "The type specified in the pktid field is incompatible with the READ CMD"); 1153 r_tgt_cmd_fsm = TGT_CMD_READ; 1154 } 1155 else if(p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) 1156 { 1157 // check that the pktid is TYPE_WRITE 1158 // ==> TYPE_WRITE = X100 with the TSAR encoding 1159 // ==> mask = 0b0111 = 0x7 1160 assert(((p_vci_tgt.pktid.read() & 0x7) == 0x4) and 1161 "The type specified in the pktid field is incompatible with the WRITE CMD"); 1162 r_tgt_cmd_fsm = TGT_CMD_WRITE; 1163 } 1164 else if(p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) 1165 { 1166 // check that the pktid is TYPE_LL 1167 // ==> TYPE_LL = X110 with the TSAR encoding 1168 // ==> mask = 0b0111 = 0x7 1169 assert(((p_vci_tgt.pktid.read() & 0x7) == 0x6) and 1170 "The type specified in the pktid field is incompatible with the LL CMD"); 1171 r_tgt_cmd_fsm = TGT_CMD_READ; 1172 } 1173 else if(p_vci_tgt.cmd.read() == vci_param_int::CMD_NOP) 1174 { 1175 // check that the pktid is either : 1176 // TYPE_CAS 1177 // TYPE_SC 1178 // ==> TYPE_CAS = X101 with the TSAR encoding 1179 // ==> TYPE_SC = X111 with the TSAR encoding 1180 // ==> mask = 0b0101 = 0x5 1181 assert(((p_vci_tgt.pktid.read() & 0x5) == 0x5) and 1182 "The type specified in the pktid field is incompatible with the NOP CMD"); 1183 1184 if((p_vci_tgt.pktid.read() & 0x7) == TYPE_CAS) r_tgt_cmd_fsm = TGT_CMD_CAS; 1185 else r_tgt_cmd_fsm = TGT_CMD_WRITE; 1186 } 1187 else 1188 { 1189 r_tgt_cmd_fsm = TGT_CMD_ERROR; 1190 } 1191 } 1192 } 1193 break; 1194 1195 /////////////////// 1196 case TGT_CMD_ERROR: // response error must be sent 1197 1198 // wait if pending request 1199 if(r_tgt_cmd_to_tgt_rsp_req.read()) break; 1200 1201 // consume all the command packet flits before sending response error 1202 if ( p_vci_tgt.cmdval and p_vci_tgt.eop ) 1203 { 1204 r_tgt_cmd_to_tgt_rsp_req = true; 1205 r_tgt_cmd_to_tgt_rsp_error = 1; 1206 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1207 1208 #if DEBUG_MEMC_TGT_CMD 1209 if(m_debug) 1210 std::cout << " <MEMC " << name() 1211 << " TGT_CMD_ERROR> Segmentation violation:" 1212 << " address = " << std::hex << p_vci_tgt.address.read() 1213 << " / srcid = " << p_vci_tgt.srcid.read() 1214 << " / trdid = " << p_vci_tgt.trdid.read() 1215 << " / pktid = " << p_vci_tgt.pktid.read() 1216 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1217 #endif 1218 1219 } 1220 break; 1221 1222 //////////////////// 1223 case TGT_CMD_CONFIG: // execute config request and return response 1224 { 1225 addr_t seg_base = m_seg[m_seg_config]->baseAddress(); 1226 addr_t address = p_vci_tgt.address.read(); 1227 size_t cell = (address - seg_base)/vci_param_int::B; 1228 1229 bool need_rsp; 1230 size_t error; 1231 uint32_t rdata = 0; // default value 1232 uint32_t wdata = p_vci_tgt.wdata.read(); 1233 1234 if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_READ) // get lock 1235 and (cell == MEMC_LOCK) ) 1236 { 1237 rdata = (uint32_t)r_config_lock.read(); 1238 need_rsp = true; 1239 error = 0; 1240 r_config_lock = true; 1241 if ( rdata == 0 ) 1242 { 1243 r_tgt_cmd_srcid = p_vci_tgt.srcid.read(); 1244 r_tgt_cmd_trdid = p_vci_tgt.trdid.read(); 1245 r_tgt_cmd_pktid = p_vci_tgt.pktid.read(); 1246 } 1247 } 1248 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // release lock 1249 and (cell == MEMC_LOCK) 1250 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1251 { 1252 need_rsp = true; 1253 error = 0; 1254 r_config_lock = false; 1255 } 1256 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_lo 1257 and (cell == MEMC_ADDR_LO) 1258 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1259 { 1260 assert( ((wdata % (m_words*vci_param_int::B)) == 0) and 1261 "VCI_MEM_CACHE CONFIG ERROR: The buffer must be aligned on a cache line"); 1262 1263 need_rsp = true; 1264 error = 0; 1265 r_config_address = (r_config_address.read() & 0xFFFFFFFF00000000LL) | 1266 (addr_t)p_vci_tgt.wdata.read(); 1267 } 1268 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_hi 1269 and (cell == MEMC_ADDR_HI) 1270 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1271 1272 { 1273 need_rsp = true; 1274 error = 0; 1275 r_config_address = (r_config_address.read() & 0x00000000FFFFFFFFLL) | 1276 ((addr_t)p_vci_tgt.wdata.read())<<32; 1277 } 1278 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set buf_lines 1279 and (cell == MEMC_BUF_LENGTH) 1280 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1281 { 1282 need_rsp = true; 1283 error = 0; 1284 size_t lines = (size_t)(p_vci_tgt.wdata.read()/(m_words<<2)); 1285 if ( r_config_address.read()%(m_words*4) ) lines++; 1286 r_config_cmd_lines = lines; 1287 r_config_rsp_lines = lines; 1288 } 1289 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set cmd type 1290 and (cell == MEMC_CMD_TYPE) 1291 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1292 { 1293 need_rsp = false; 1294 error = 0; 1295 r_config_cmd = p_vci_tgt.wdata.read(); 1296 1297 // prepare delayed response from CONFIG FSM 1298 r_config_srcid = p_vci_tgt.srcid.read(); 1299 r_config_trdid = p_vci_tgt.trdid.read(); 1300 r_config_pktid = p_vci_tgt.pktid.read(); 1301 } 1302 else 1303 { 1304 need_rsp = true; 1305 error = 1; 1306 } 1307 1308 if ( need_rsp ) 1309 { 1310 // blocked if previous pending request to TGT_RSP FSM 1311 if ( r_tgt_cmd_to_tgt_rsp_req.read() ) break; 1312 1313 r_tgt_cmd_to_tgt_rsp_req = true; 1314 r_tgt_cmd_to_tgt_rsp_error = error; 1315 r_tgt_cmd_to_tgt_rsp_rdata = rdata; 1316 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1317 } 1318 else 1319 { 1320 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1321 } 1322 1323 #if DEBUG_MEMC_TGT_CMD 1324 if(m_debug) 1325 std::cout << " <MEMC " << name() << " TGT_CMD_CONFIG> Configuration request:" 1326 << " address = " << std::hex << p_vci_tgt.address.read() 1327 << " / wdata = " << p_vci_tgt.wdata.read() 1328 << " / need_rsp = " << need_rsp 1329 << " / error = " << error << std::endl; 1330 #endif 1331 break; 1332 } 1333 ////////////////// 1334 case TGT_CMD_READ: // Push a read request into read fifo 1335 1336 // check that the read does not cross a cache line limit. 1337 if ( ((m_x[(addr_t) p_vci_tgt.address.read()]+ (p_vci_tgt.plen.read() >>2)) > 16) and 1338 (p_vci_tgt.cmd.read() != vci_param_int::CMD_LOCKED_READ)) 1339 { 1340 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 1341 << " illegal address/plen for VCI read command" << std::endl; 1342 exit(0); 1343 } 1344 // check single flit 1345 if(!p_vci_tgt.eop.read()) 1346 { 1347 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 1348 << " read command packet must contain one single flit" << std::endl; 1349 exit(0); 1350 } 1351 // check plen for LL 1352 if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) and 1353 (p_vci_tgt.plen.read() != 8) ) 1354 { 1355 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 1356 << " ll command packets must have a plen of 8" << std::endl; 1357 exit(0); 1358 } 1359 1360 if ( p_vci_tgt.cmdval and m_cmd_read_addr_fifo.wok() ) 1361 { 1362 1363 #if DEBUG_MEMC_TGT_CMD 1364 if(m_debug) 1365 std::cout << " <MEMC " << name() << " TGT_CMD_READ> Push into read_fifo:" 1366 << " address = " << std::hex << p_vci_tgt.address.read() 1367 << " / srcid = " << p_vci_tgt.srcid.read() 1368 << " / trdid = " << p_vci_tgt.trdid.read() 1369 << " / pktid = " << p_vci_tgt.pktid.read() 1370 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1371 #endif 1372 cmd_read_fifo_put = true; 1373 if(p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) m_cpt_ll++; 1374 else m_cpt_read++; 1375 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1376 } 1377 break; 1378 1379 /////////////////// 1380 case TGT_CMD_WRITE: 1381 if(p_vci_tgt.cmdval and m_cmd_write_addr_fifo.wok()) 1382 { 1383 1384 #if DEBUG_MEMC_TGT_CMD 1385 if(m_debug) 1386 std::cout << " <MEMC " << name() << " TGT_CMD_WRITE> Push into write_fifo:" 1387 << " address = " << std::hex << p_vci_tgt.address.read() 1388 << " / srcid = " << p_vci_tgt.srcid.read() 1389 << " / trdid = " << p_vci_tgt.trdid.read() 1390 << " / pktid = " << p_vci_tgt.pktid.read() 1391 << " / wdata = " << p_vci_tgt.wdata.read() 1392 << " / be = " << p_vci_tgt.be.read() 1393 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1394 #endif 1395 cmd_write_fifo_put = true; 1396 if(p_vci_tgt.eop) r_tgt_cmd_fsm = TGT_CMD_IDLE; 1397 } 1398 break; 1399 1400 ///////////////// 1401 case TGT_CMD_CAS: 1402 if((p_vci_tgt.plen.read() != 8) and (p_vci_tgt.plen.read() != 16)) 1403 { 1404 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_CAS state" 1405 << "illegal format for CAS command " << std::endl; 1406 exit(0); 1407 } 1408 1409 if(p_vci_tgt.cmdval and m_cmd_cas_addr_fifo.wok()) 1410 { 1411 1412 #if DEBUG_MEMC_TGT_CMD 1413 if(m_debug) 1414 std::cout << " <MEMC " << name() << " TGT_CMD_CAS> Pushing command into cmd_cas_fifo:" 1415 << " address = " << std::hex << p_vci_tgt.address.read() 1416 << " srcid = " << p_vci_tgt.srcid.read() 1417 << " trdid = " << p_vci_tgt.trdid.read() 1418 << " pktid = " << p_vci_tgt.pktid.read() 1419 << " wdata = " << p_vci_tgt.wdata.read() 1420 << " be = " << p_vci_tgt.be.read() 1421 << " plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1422 #endif 1423 cmd_cas_fifo_put = true; 1424 if(p_vci_tgt.eop) r_tgt_cmd_fsm = TGT_CMD_IDLE; 1425 } 1426 break; 1427 } // end switch tgt_cmd_fsm 1428 1429 ///////////////////////////////////////////////////////////////////////// 1430 // MULTI_ACK FSM 1431 ///////////////////////////////////////////////////////////////////////// 1432 // This FSM controls the response to the multicast update requests sent 1433 // by the memory cache to the L1 caches and update the UPT. 1434 // 1435 // - The FSM decrements the proper entry in UPT, 1436 // and clear the UPT entry when all responses have been received. 1437 // - If required, it sends a request to the TGT_RSP FSM to complete 1438 // a pending write transaction. 1439 // 1440 // All those multi-ack packets are one flit packet. 1441 // The index in the UPT is defined in the TRDID field. 1442 //////////////////////////////////////////////////////////////////////// 1443 1444 //std::cout << std::endl << "multi_ack_fsm" << std::endl; 1445 1446 switch(r_multi_ack_fsm.read()) 1122 1447 { 1123 if( m_seg[seg_id]->contains(address) and 1124 m_seg[seg_id]->contains(address + plen - vci_param_int::B) ) 1125 { 1126 found = true; 1127 if ( m_seg[seg_id]->special() ) config = true; 1128 } 1129 } 1130 1131 if ( not found ) /////////// out of segment error 1448 //////////////////// 1449 case MULTI_ACK_IDLE: 1450 { 1451 bool multi_ack_fifo_rok = m_cc_receive_to_multi_ack_fifo.rok(); 1452 1453 // No CC_RECEIVE FSM request and no WRITE FSM request 1454 if( not multi_ack_fifo_rok and not r_write_to_multi_ack_req.read()) 1455 break; 1456 1457 uint8_t updt_index; 1458 1459 // handling WRITE FSM request to decrement update table response 1460 // counter if no CC_RECEIVE FSM request 1461 if(not multi_ack_fifo_rok) 1462 { 1463 updt_index = r_write_to_multi_ack_upt_index.read(); 1464 r_write_to_multi_ack_req = false; 1465 } 1466 // Handling CC_RECEIVE FSM request 1467 else 1468 { 1469 uint64_t flit = m_cc_receive_to_multi_ack_fifo.read(); 1470 updt_index = DspinDhccpParam::dspin_get(flit, 1471 DspinDhccpParam::MULTI_ACK_UPDT_INDEX); 1472 1473 cc_receive_to_multi_ack_fifo_get = true; 1474 } 1475 1476 assert((updt_index < m_upt.size()) and 1477 "VCI_MEM_CACHE ERROR in MULTI_ACK_IDLE : " 1478 "index too large for UPT"); 1479 1480 r_multi_ack_upt_index = updt_index; 1481 r_multi_ack_fsm = MULTI_ACK_UPT_LOCK; 1482 1483 #if DEBUG_MEMC_MULTI_ACK 1484 if(m_debug) 1485 { 1486 if (multi_ack_fifo_rok) 1487 { 1488 std::cout << " <MEMC " << name() 1489 << " MULTI_ACK_IDLE> Response for UPT entry " 1490 << (size_t)updt_index << std::endl; 1491 } 1492 else 1493 { 1494 std::cout << " <MEMC " << name() 1495 << " MULTI_ACK_IDLE> Write FSM request to decrement UPT entry " 1496 << updt_index << std::endl; 1497 } 1498 } 1499 #endif 1500 break; 1501 } 1502 1503 //////////////////////// 1504 case MULTI_ACK_UPT_LOCK: 1505 { 1506 m_cpt_multi_ack_fsm_upt_lock++; 1507 // get lock to the UPDATE table 1508 if(r_alloc_upt_fsm.read() != ALLOC_UPT_MULTI_ACK) break; 1509 1510 // decrement the number of expected responses 1511 size_t count = 0; 1512 bool valid = m_upt.decrement(r_multi_ack_upt_index.read(), count); 1513 1514 1515 if(not valid) 1516 { 1517 std::cout << "VCI_MEM_CACHE ERROR " << name() 1518 << " MULTI_ACK_UPT_LOCK state" << std::endl 1519 << "unsuccessful access to decrement the UPT" << std::endl; 1520 exit(0); 1521 } 1522 1523 if(count == 0) 1524 { 1525 r_multi_ack_fsm = MULTI_ACK_UPT_CLEAR; 1526 } 1527 else 1528 { 1529 r_multi_ack_fsm = MULTI_ACK_IDLE; 1530 } 1531 1532 #if DEBUG_MEMC_MULTI_ACK 1533 if(m_debug) 1534 std::cout << " <MEMC " << name() 1535 << " MULTI_ACK_UPT_LOCK> Decrement the responses counter for UPT:" 1536 << " entry = " << r_multi_ack_upt_index.read() 1537 << " / rsp_count = " << std::dec << count << std::endl; 1538 m_cpt_multi_ack_fsm_n_upt_lock++; 1539 #endif 1540 break; 1541 } 1542 1543 ///////////////////////// 1544 case MULTI_ACK_UPT_CLEAR: // Clear UPT entry / Test if rsp or ack required 1545 { 1546 if(r_alloc_upt_fsm.read() != ALLOC_UPT_MULTI_ACK) 1547 { 1548 std::cout << "VCI_MEM_CACHE ERROR " << name() 1549 << " MULTI_ACK_UPT_CLEAR state" 1550 << " bad UPT allocation" << std::endl; 1551 exit(0); 1552 } 1553 1554 r_multi_ack_srcid = m_upt.srcid(r_multi_ack_upt_index.read()); 1555 r_multi_ack_trdid = m_upt.trdid(r_multi_ack_upt_index.read()); 1556 r_multi_ack_pktid = m_upt.pktid(r_multi_ack_upt_index.read()); 1557 r_multi_ack_nline = m_upt.nline(r_multi_ack_upt_index.read()); 1558 bool need_rsp = m_upt.need_rsp(r_multi_ack_upt_index.read()); 1559 1560 // clear the UPT entry 1561 m_upt.clear(r_multi_ack_upt_index.read()); 1562 1563 if ( need_rsp ) r_multi_ack_fsm = MULTI_ACK_WRITE_RSP; 1564 else r_multi_ack_fsm = MULTI_ACK_IDLE; 1565 1566 #if DEBUG_MEMC_MULTI_ACK 1567 if(m_debug) 1568 std::cout << " <MEMC " << name() 1569 << " MULTI_ACK_UPT_CLEAR> Clear UPT entry " 1570 << std::dec << r_multi_ack_upt_index.read() << std::endl; 1571 #endif 1572 break; 1573 } 1574 ///////////////////////// 1575 case MULTI_ACK_WRITE_RSP: // Post a response request to TGT_RSP FSM 1576 // Wait if pending request 1577 { 1578 if ( r_multi_ack_to_tgt_rsp_req.read() ) break; 1579 1580 r_multi_ack_to_tgt_rsp_req = true; 1581 r_multi_ack_to_tgt_rsp_srcid = r_multi_ack_srcid.read(); 1582 r_multi_ack_to_tgt_rsp_trdid = r_multi_ack_trdid.read(); 1583 r_multi_ack_to_tgt_rsp_pktid = r_multi_ack_pktid.read(); 1584 r_multi_ack_fsm = MULTI_ACK_IDLE; 1585 1586 #if DEBUG_MEMC_MULTI_ACK 1587 if(m_debug) 1588 std::cout << " <MEMC " << name() << " MULTI_ACK_WRITE_RSP>" 1589 << " Request TGT_RSP FSM to send a response to srcid " 1590 << std::hex << r_multi_ack_srcid.read() << std::endl; 1591 #endif 1592 break; 1593 } 1594 } // end switch r_multi_ack_fsm 1595 1596 //////////////////////////////////////////////////////////////////////////////////// 1597 // CONFIG FSM 1598 //////////////////////////////////////////////////////////////////////////////////// 1599 // The CONFIG FSM handles the VCI configuration requests (INVAL & SYNC). 1600 // The target buffer can have any size, and there is one single command for 1601 // all cache lines covered by the target buffer. 1602 // 1603 // An INVAL or SYNC configuration operation is defined by the following registers: 1604 // - bool r_config_cmd : INVAL / SYNC / NOP 1605 // - uint64_t r_config_address : buffer base address 1606 // - uint32_t r_config_cmd_lines : number of lines to be handled 1607 // - uint32_t r_config_rsp_lines : number of lines not completed 1608 // 1609 // For both INVAL and SYNC commands, the CONFIG FSM contains the loop handling 1610 // all cache lines covered by the buffer. The various lines of a given buffer 1611 // can be pipelined: the CONFIG FSM does not wait the response for line (n) to send 1612 // the command for line (n+1). It decrements the r_config_cmd_lines counter until 1613 // the last request has been registered in TRT (for a SYNC), or in IVT (for an INVAL). 1614 // 1615 // - INVAL request: 1616 // For each line, it access to the DIR. 1617 // In case of miss, it does nothing, and a response is requested to TGT_RSP FSM. 1618 // In case of hit, with no copies in L1 caches, the line is invalidated and 1619 // a response is requested to TGT_RSP FSM. 1620 // If there is copies, a multi-inval, or a broadcast-inval coherence transaction 1621 // is launched and registered in UPT. The multi-inval transaction completion 1622 // is signaled by the CLEANUP FSM by decrementing the r_config_rsp_lines counter. 1623 // The CONFIG INVAL response is sent only when the last line has been invalidated. 1624 // TODO : The target buffer address must be aligned on a cache line boundary. 1625 // This constraint can be released, but it requires to make 2 PUT transactions 1626 // for the first and the last line... 1627 // 1628 // - SYNC request: 1629 // For each line, it access to the DIR. 1630 // In case of miss, it does nothing, and a response is requested to TGT_RSP FSM. 1631 // In case of hit, a PUT transaction is registered in TRT and a request is sent 1632 // to IXR_CMD FSM. The IXR_RSP FSM decrements the r_config_rsp_lines counter 1633 // when a PUT response is received. 1634 // The CONFIG SYNC response is sent only when the last PUT response is received. 1635 // 1636 // From the software point of view, a configuration request is a sequence 1637 // of 6 atomic accesses in an uncached segment. A dedicated lock is used 1638 // to handle only one configuration command at a given time: 1639 // - Read MEMC_LOCK : Get the lock 1640 // - Write MEMC_ADDR_LO : Set the buffer address LSB 1641 // - Write MEMC_ADDR_HI : Set the buffer address MSB 1642 // - Write MEMC_BUF_LENGTH : set buffer length (bytes) 1643 // - Write MEMC_CMD_TYPE : launch the actual operation 1644 // - WRITE MEMC_LOCK : release the lock 1645 //////////////////////////////////////////////////////////////////////////////////// 1646 1647 //std::cout << std::endl << "config_fsm" << std::endl; 1648 1649 switch( r_config_fsm.read() ) 1132 1650 { 1133 r_tgt_cmd_fsm = TGT_CMD_ERROR; 1134 } 1135 else if ( config ) /////////// configuration command 1651 ///////////////// 1652 case CONFIG_IDLE: // waiting a config request 1653 { 1654 if ( r_config_cmd.read() != MEMC_CMD_NOP ) 1655 { 1656 r_config_fsm = CONFIG_LOOP; 1657 1658 #if DEBUG_MEMC_CONFIG 1659 if(m_debug) 1660 std::cout << " <MEMC " << name() << " CONFIG_IDLE> Config Request received" 1661 << " / address = " << std::hex << r_config_address.read() 1662 << " / lines = " << std::dec << r_config_cmd_lines.read() 1663 << " / type = " << r_config_cmd.read() << std::endl; 1664 #endif 1665 } 1666 break; 1667 } 1668 ///////////////// 1669 case CONFIG_LOOP: // test if last line to be handled 1670 { 1671 if ( r_config_cmd_lines.read() == 0 ) 1672 { 1673 r_config_cmd = MEMC_CMD_NOP; 1674 r_config_fsm = CONFIG_WAIT; 1675 } 1676 else 1677 { 1678 r_config_fsm = CONFIG_DIR_REQ; 1679 } 1680 1681 #if DEBUG_MEMC_CONFIG 1682 if(m_debug) 1683 std::cout << " <MEMC " << name() << " CONFIG_LOOP>" 1684 << " / address = " << std::hex << r_config_address.read() 1685 << " / lines not handled = " << std::dec << r_config_cmd_lines.read() 1686 << " / command = " << r_config_cmd.read() << std::endl; 1687 #endif 1688 break; 1689 } 1690 ///////////////// 1691 case CONFIG_WAIT: // wait completion (last response) 1692 { 1693 if ( r_config_rsp_lines.read() == 0 ) // last response received 1694 { 1695 r_config_fsm = CONFIG_RSP; 1696 } 1697 1698 #if DEBUG_MEMC_CONFIG 1699 if(m_debug) 1700 std::cout << " <MEMC " << name() << " CONFIG_WAIT>" 1701 << " / lines to do = " << std::dec << r_config_rsp_lines.read() << std::endl; 1702 #endif 1703 break; 1704 } 1705 //////////////// 1706 case CONFIG_RSP: // request TGT_RSP FSM to return response 1707 { 1708 if ( not r_config_to_tgt_rsp_req.read() ) 1709 { 1710 r_config_to_tgt_rsp_srcid = r_config_srcid.read(); 1711 r_config_to_tgt_rsp_trdid = r_config_trdid.read(); 1712 r_config_to_tgt_rsp_pktid = r_config_pktid.read(); 1713 r_config_to_tgt_rsp_error = false; 1714 r_config_to_tgt_rsp_req = true; 1715 r_config_fsm = CONFIG_IDLE; 1716 1717 #if DEBUG_MEMC_CONFIG 1718 if(m_debug) 1719 std::cout << " <MEMC " << name() << " CONFIG_RSP> Request TGT_RSP FSM to return response:" 1720 << " error = " << r_config_to_tgt_rsp_error.read() 1721 << " / rsrcid = " << std::hex << r_config_srcid.read() 1722 << " / rtrdid = " << std::hex << r_config_trdid.read() 1723 << " / rpktid = " << std::hex << r_config_pktid.read() << std::endl; 1724 #endif 1725 } 1726 break; 1727 1728 } 1729 //////////////////// 1730 case CONFIG_DIR_REQ: // Request directory lock 1731 { 1732 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG ) 1733 { 1734 r_config_fsm = CONFIG_DIR_ACCESS; 1735 } 1736 1737 #if DEBUG_MEMC_CONFIG 1738 if(m_debug) 1739 std::cout << " <MEMC " << name() << " CONFIG_DIR_REQ>" 1740 << " Request DIR access" << std::endl; 1741 #endif 1742 break; 1743 } 1744 /////////////////////// 1745 case CONFIG_DIR_ACCESS: // Access directory and decode config command 1746 { 1747 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1748 "MEMC ERROR in CONFIG_DIR_ACCESS state: bad DIR allocation"); 1749 1750 size_t way = 0; 1751 DirectoryEntry entry = m_cache_directory.read(r_config_address.read(), way); 1752 1753 if ( entry.valid and // hit & inval command 1754 (r_config_cmd.read() == MEMC_CMD_INVAL) ) 1755 { 1756 r_config_dir_way = way; 1757 r_config_dir_copy_inst = entry.owner.inst; 1758 r_config_dir_copy_srcid = entry.owner.srcid; 1759 r_config_dir_is_cnt = entry.is_cnt; 1760 r_config_dir_lock = entry.lock; 1761 r_config_dir_count = entry.count; 1762 r_config_dir_ptr = entry.ptr; 1763 1764 r_config_fsm = CONFIG_IVT_LOCK; 1765 } 1766 else if ( entry.valid and // hit & sync command 1767 entry.dirty and 1768 (r_config_cmd.read() == MEMC_CMD_SYNC) ) 1769 { 1770 r_config_fsm = CONFIG_TRT_LOCK; 1771 } 1772 else // miss : return to LOOP 1773 { 1774 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1775 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 1776 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1777 r_config_address = r_config_address.read() + (m_words<<2); 1778 r_config_fsm = CONFIG_LOOP; 1779 } 1780 1781 #if DEBUG_MEMC_CONFIG 1782 if(m_debug) 1783 std::cout << " <MEMC " << name() << " CONFIG_DIR_ACCESS> Accessing directory: " 1784 << " address = " << std::hex << r_config_address.read() 1785 << " / hit = " << std::dec << entry.valid 1786 << " / dirty = " << entry.dirty 1787 << " / count = " << entry.count 1788 << " / is_cnt = " << entry.is_cnt << std::endl; 1789 #endif 1790 break; 1791 } 1792 ///////////////////// 1793 case CONFIG_TRT_LOCK: // enter this state in case of SYNC command 1794 // to a dirty cache line 1795 // keep DIR lock, and try to get TRT lock 1796 // return to LOOP state if TRT full 1797 // reset dirty bit in DIR and register a PUT 1798 // trabsaction in TRT if not full. 1799 { 1800 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1801 "MEMC ERROR in CONFIG_TRT_LOCK state: bad DIR allocation"); 1802 1803 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG ) 1804 { 1805 size_t index = 0; 1806 bool wok = not m_trt.full(index); 1807 1808 if ( not wok ) 1809 { 1810 r_config_fsm = CONFIG_LOOP; 1811 } 1812 else 1813 { 1814 size_t way = r_config_dir_way.read(); 1815 size_t set = m_y[r_config_address.read()]; 1816 1817 // reset dirty bit in DIR 1818 DirectoryEntry entry; 1819 entry.valid = true; 1820 entry.dirty = false; 1821 entry.tag = m_z[r_config_address.read()]; 1822 entry.is_cnt = r_config_dir_is_cnt.read(); 1823 entry.lock = r_config_dir_lock.read(); 1824 entry.ptr = r_config_dir_ptr.read(); 1825 entry.count = r_config_dir_count.read(); 1826 entry.owner.inst = r_config_dir_copy_inst.read(); 1827 entry.owner.srcid = r_config_dir_copy_srcid.read(); 1828 m_cache_directory.write( set, 1829 way, 1830 entry ); 1831 1832 r_config_trt_index = index; 1833 r_config_fsm = CONFIG_TRT_SET; 1834 } 1835 1836 #if DEBUG_MEMC_CONFIG 1837 if(m_debug) 1838 std::cout << " <MEMC " << name() << " CONFIG_TRT_LOCK> Access TRT: " 1839 << " wok = " << std::dec << wok 1840 << " index = " << index << std::endl; 1841 #endif 1842 } 1843 break; 1844 } 1845 //////////////////// 1846 case CONFIG_TRT_SET: // read data in cache 1847 // and post a PUT request in TRT 1848 { 1849 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1850 "MEMC ERROR in CONFIG_TRT_SET state: bad DIR allocation"); 1851 1852 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG) and 1853 "MEMC ERROR in CONFIG_TRT_SET state: bad TRT allocation"); 1854 1855 // read data into cache 1856 size_t way = r_config_dir_way.read(); 1857 size_t set = m_y[r_config_address.read()]; 1858 1859 sc_signal<data_t> config_data[16]; 1860 m_cache_data.read_line( way, 1861 set, 1862 config_data ); 1863 1864 // post a PUT request in TRT 1865 std::vector<data_t> data_vector; 1866 data_vector.clear(); 1867 for(size_t i=0; i<m_words; i++) data_vector.push_back(config_data[i].read()); 1868 m_trt.set( r_config_trt_index.read(), 1869 false, // PUT 1870 m_nline[r_config_address.read()], // nline 1871 0, // srcid: unused 1872 0, // trdid: unused 1873 0, // pktid: unused 1874 false, // not proc_read 1875 0, // read_length: unused 1876 0, // word_index: unused 1877 std::vector<be_t>(m_words,0xF), 1878 data_vector); 1879 1880 #if DEBUG_MEMC_CONFIG 1881 if(m_debug) 1882 std::cout << " <MEMC " << name() << " CONFIG_TRT_SET> PUT request in TRT:" 1883 << " address = " << std::hex << r_config_address.read() 1884 << " index = " << std::dec << r_config_trt_index.read() << std::endl; 1885 #endif 1886 break; 1887 } 1888 //////////////////// 1889 case CONFIG_PUT_REQ: // PUT request to IXR_CMD_FSM 1890 { 1891 if ( not r_config_to_ixr_cmd_req.read() ) 1892 { 1893 r_config_to_ixr_cmd_req = true; 1894 r_config_to_ixr_cmd_index = r_config_trt_index.read(); 1895 1896 // prepare next iteration 1897 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1898 r_config_address = r_config_address.read() + (m_words<<2); 1899 r_config_fsm = CONFIG_LOOP; 1900 1901 #if DEBUG_MEMC_CONFIG 1902 if(m_debug) 1903 std::cout << " <MEMC " << name() << " CONFIG_PUT_REQ> PUT request to IXR_CMD_FSM" 1904 << " / address = " << std::hex << r_config_address.read() << std::endl; 1905 #endif 1906 } 1907 break; 1908 } 1909 ///////////////////// 1910 case CONFIG_IVT_LOCK: // enter this state in case of INVAL command 1911 // Keep DIR lock and Try to get IVT lock. 1912 // Return to LOOP state if IVT full. 1913 // Register inval in IVT, and invalidate the 1914 // directory if IVT not full. 1915 { 1916 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1917 "MEMC ERROR in CONFIG_IVT_LOCK state: bad DIR allocation"); 1918 1919 if ( r_alloc_ivt_fsm.read() == ALLOC_IVT_CONFIG ) 1920 { 1921 size_t set = m_y[(addr_t)(r_config_address.read())]; 1922 size_t way = r_config_dir_way.read(); 1923 1924 if ( r_config_dir_count.read() == 0 ) // inval DIR and return to LOOP 1925 { 1926 m_cache_directory.inval( way, set ); 1927 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1928 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 1929 r_config_address = r_config_address.read() + (m_words<<2); 1930 r_config_fsm = CONFIG_LOOP; 1931 1932 #if DEBUG_MEMC_CONFIG 1933 if(m_debug) 1934 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1935 << " No copies in L1 : inval DIR entry" << std::endl; 1936 #endif 1937 } 1938 else // try to register inval in IVT 1939 { 1940 bool wok = false; 1941 size_t index = 0; 1942 bool broadcast = r_config_dir_is_cnt.read(); 1943 size_t srcid = r_config_srcid.read(); 1944 size_t trdid = r_config_trdid.read(); 1945 size_t pktid = r_config_pktid.read(); 1946 addr_t nline = m_nline[(addr_t)(r_config_address.read())]; 1947 size_t nb_copies = r_config_dir_count.read(); 1948 1949 wok = m_ivt.set(false, // it's an inval transaction 1950 broadcast, 1951 false, // no response required 1952 true, // acknowledge required 1953 srcid, 1954 trdid, 1955 pktid, 1956 nline, 1957 nb_copies, 1958 index); 1959 1960 if ( wok ) // IVT success => inval DIR slot 1961 { 1962 m_cache_directory.inval( way, set ); 1963 r_config_ivt_index = index; 1964 if ( broadcast ) r_config_fsm = CONFIG_BC_SEND; 1965 else r_config_fsm = CONFIG_INVAL_SEND; 1966 1967 #if DEBUG_MEMC_CONFIG 1968 if(m_debug) 1969 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1970 << " Inval DIR entry and register inval in IVT" 1971 << " / index = " << std::dec << index 1972 << " / broadcast = " << broadcast << std::endl; 1973 #endif 1974 } 1975 else // IVT full => release both DIR and IVT locks 1976 { 1977 r_config_fsm = CONFIG_LOOP; 1978 1979 #if DEBUG_MEMC_CONFIG 1980 if(m_debug) 1981 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1982 << " IVT full : release DIR & IVT locks and retry" << std::endl; 1983 #endif 1984 } 1985 } 1986 } 1987 break; 1988 } 1989 //////////////////// 1990 case CONFIG_BC_SEND: // Post a broadcast inval request to CC_SEND FSM 1991 { 1992 if( not r_config_to_cc_send_multi_req.read() and 1993 not r_config_to_cc_send_brdcast_req.read() ) 1994 { 1995 // post bc inval request 1996 r_config_to_cc_send_multi_req = false; 1997 r_config_to_cc_send_brdcast_req = true; 1998 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 1999 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 2000 2001 // prepare next iteration 2002 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2003 r_config_address = r_config_address.read() + (m_words<<2); 2004 r_config_fsm = CONFIG_LOOP; 2005 2006 #if DEBUG_MEMC_CONFIG 2007 if(m_debug) 2008 std::cout << " <MEMC " << name() << " CONFIG_BC_SEND>" 2009 << " Post a broadcast inval request to CC_SEND FSM" 2010 << " / address = " << r_config_address.read() <<std::endl; 2011 #endif 2012 } 2013 break; 2014 } 2015 /////////////////////// 2016 case CONFIG_INVAL_SEND: // Post a multi inval request to CC_SEND FSM 2017 { 2018 if( not r_config_to_cc_send_multi_req.read() and 2019 not r_config_to_cc_send_brdcast_req.read() ) 2020 { 2021 // post multi inval request 2022 r_config_to_cc_send_multi_req = true; 2023 r_config_to_cc_send_brdcast_req = false; 2024 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 2025 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 2026 2027 // post data into FIFO 2028 config_to_cc_send_fifo_srcid = r_config_dir_copy_srcid.read(); 2029 config_to_cc_send_fifo_inst = r_config_dir_copy_inst.read(); 2030 config_to_cc_send_fifo_put = true; 2031 2032 if ( r_config_dir_count.read() == 1 ) // one copy 2033 { 2034 // prepare next iteration 2035 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2036 r_config_address = r_config_address.read() + (m_words<<2); 2037 r_config_fsm = CONFIG_LOOP; 2038 } 2039 else // several copies 2040 { 2041 r_config_fsm = CONFIG_HEAP_REQ; 2042 } 2043 2044 #if DEBUG_MEMC_CONFIG 2045 if(m_debug) 2046 std::cout << " <MEMC " << name() << " CONFIG_INVAL_SEND>" 2047 << " Post multi inval request to CC_SEND FSM" 2048 << " / address = " << std::hex << r_config_address.read() 2049 << " / copy = " << r_config_dir_copy_srcid.read() 2050 << " / inst = " << std::dec << r_config_dir_copy_inst.read() << std::endl; 2051 #endif 2052 } 2053 break; 2054 } 2055 ///////////////////// 2056 case CONFIG_HEAP_REQ: // Try to get access to Heap 2057 { 2058 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_CONFIG ) 2059 { 2060 r_config_fsm = CONFIG_HEAP_SCAN; 2061 r_config_heap_next = r_config_dir_ptr.read(); 2062 } 2063 2064 #if DEBUG_MEMC_CONFIG 2065 if(m_debug) 2066 std::cout << " <MEMC " << name() << " CONFIG_HEAP_REQ>" 2067 << " Requesting HEAP lock" << std::endl; 2068 #endif 2069 break; 2070 } 2071 ////////////////////// 2072 case CONFIG_HEAP_SCAN: // scan HEAP and send inval to CC_SEND FSM 2073 { 2074 HeapEntry entry = m_heap.read( r_config_heap_next.read() ); 2075 bool last_copy = (entry.next == r_config_heap_next.read()); 2076 2077 config_to_cc_send_fifo_srcid = entry.owner.srcid; 2078 config_to_cc_send_fifo_inst = entry.owner.inst; 2079 // config_to_cc_send_fifo_last = last_copy; 2080 config_to_cc_send_fifo_put = true; 2081 2082 if ( m_config_to_cc_send_inst_fifo.wok() ) // inval request accepted 2083 { 2084 r_config_heap_next = entry.next; 2085 if ( last_copy ) r_config_fsm = CONFIG_HEAP_LAST; 2086 } 2087 2088 #if DEBUG_MEMC_CONFIG 2089 if(m_debug) 2090 std::cout << " <MEMC " << name() << " CONFIG_HEAP_SCAN>" 2091 << " Post multi inval request to CC_SEND FSM" 2092 << " / address = " << std::hex << r_config_address.read() 2093 << " / copy = " << entry.owner.srcid 2094 << " / inst = " << std::dec << entry.owner.inst << std::endl; 2095 #endif 2096 break; 2097 } 2098 ////////////////////// 2099 case CONFIG_HEAP_LAST: // HEAP housekeeping 2100 { 2101 size_t free_pointer = m_heap.next_free_ptr(); 2102 HeapEntry last_entry; 2103 last_entry.owner.srcid = 0; 2104 last_entry.owner.inst = false; 2105 2106 if ( m_heap.is_full() ) 2107 { 2108 last_entry.next = r_config_dir_ptr.read(); 2109 m_heap.unset_full(); 2110 } 2111 else 2112 { 2113 last_entry.next = free_pointer; 2114 } 2115 2116 m_heap.write_free_ptr( r_config_dir_ptr.read() ); 2117 m_heap.write( r_config_heap_next.read(), last_entry ); 2118 2119 // prepare next iteration 2120 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2121 r_config_address = r_config_address.read() + (m_words<<2); 2122 r_config_fsm = CONFIG_LOOP; 2123 2124 #if DEBUG_MEMC_CONFIG 2125 if(m_debug) 2126 std::cout << " <MEMC " << name() << " CONFIG_HEAP_LAST>" 2127 << " Heap housekeeping" << std::endl; 2128 #endif 2129 break; 2130 } 2131 } // end switch r_config_fsm 2132 2133 //////////////////////////////////////////////////////////////////////////////////// 2134 // READ FSM 2135 //////////////////////////////////////////////////////////////////////////////////// 2136 // The READ FSM controls the VCI read and ll requests. 2137 // It takes the lock protecting the cache directory to check the cache line status: 2138 // - In case of HIT 2139 // The fsm copies the data (one line, or one single word) 2140 // in the r_read_to_tgt_rsp buffer. It waits if this buffer is not empty. 2141 // The requesting initiator is registered in the cache directory. 2142 // If the number of copy is larger than 1, the new copy is registered 2143 // in the HEAP. 2144 // If the number of copy is larger than the threshold, the HEAP is cleared, 2145 // and the corresponding line switches to the counter mode. 2146 // - In case of MISS 2147 // The READ fsm takes the lock protecting the transaction tab. 2148 // If a read transaction to the XRAM for this line already exists, 2149 // or if the transaction tab is full, the fsm is stalled. 2150 // If a TRT entry is free, the READ request is registered in TRT, 2151 // it is consumed in the request FIFO, and transmited to the IXR_CMD FSM. 2152 // The READ FSM returns in the IDLE state as the read transaction will be 2153 // completed when the missing line will be received. 2154 //////////////////////////////////////////////////////////////////////////////////// 2155 2156 //std::cout << std::endl << "read_fsm" << std::endl; 2157 2158 switch(r_read_fsm.read()) 1136 2159 { 1137 if ( not p_vci_tgt.eop.read() ) r_tgt_cmd_fsm = TGT_CMD_ERROR; 1138 else r_tgt_cmd_fsm = TGT_CMD_CONFIG; 1139 } 1140 else //////////// memory access 1141 { 1142 if ( p_vci_tgt.cmd.read() == vci_param_int::CMD_READ ) 1143 { 1144 // check that the pktid is either : 1145 // TYPE_READ_DATA_UNC 1146 // TYPE_READ_DATA_MISS 1147 // TYPE_READ_INS_UNC 1148 // TYPE_READ_INS_MISS 1149 // ==> bit2 must be zero with the TSAR encoding 1150 // ==> mask = 0b0100 = 0x4 1151 assert( ((p_vci_tgt.pktid.read() & 0x4) == 0x0) and 1152 "The type specified in the pktid field is incompatible with the READ CMD"); 1153 r_tgt_cmd_fsm = TGT_CMD_READ; 1154 } 1155 else if(p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) 1156 { 1157 // check that the pktid is TYPE_WRITE 1158 // ==> TYPE_WRITE = X100 with the TSAR encoding 1159 // ==> mask = 0b0111 = 0x7 1160 assert(((p_vci_tgt.pktid.read() & 0x7) == 0x4) and 1161 "The type specified in the pktid field is incompatible with the WRITE CMD"); 1162 r_tgt_cmd_fsm = TGT_CMD_WRITE; 1163 } 1164 else if(p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) 1165 { 1166 // check that the pktid is TYPE_LL 1167 // ==> TYPE_LL = X110 with the TSAR encoding 1168 // ==> mask = 0b0111 = 0x7 1169 assert(((p_vci_tgt.pktid.read() & 0x7) == 0x6) and 1170 "The type specified in the pktid field is incompatible with the LL CMD"); 1171 r_tgt_cmd_fsm = TGT_CMD_READ; 1172 } 1173 else if(p_vci_tgt.cmd.read() == vci_param_int::CMD_NOP) 1174 { 1175 // check that the pktid is either : 1176 // TYPE_CAS 1177 // TYPE_SC 1178 // ==> TYPE_CAS = X101 with the TSAR encoding 1179 // ==> TYPE_SC = X111 with the TSAR encoding 1180 // ==> mask = 0b0101 = 0x5 1181 assert(((p_vci_tgt.pktid.read() & 0x5) == 0x5) and 1182 "The type specified in the pktid field is incompatible with the NOP CMD"); 1183 1184 if((p_vci_tgt.pktid.read() & 0x7) == TYPE_CAS) r_tgt_cmd_fsm = TGT_CMD_CAS; 1185 else r_tgt_cmd_fsm = TGT_CMD_WRITE; 1186 } 1187 else 1188 { 1189 r_tgt_cmd_fsm = TGT_CMD_ERROR; 1190 } 1191 } 1192 } 1193 break; 1194 1195 /////////////////// 1196 case TGT_CMD_ERROR: // response error must be sent 1197 1198 // wait if pending request 1199 if(r_tgt_cmd_to_tgt_rsp_req.read()) break; 1200 1201 // consume all the command packet flits before sending response error 1202 if ( p_vci_tgt.cmdval and p_vci_tgt.eop ) 1203 { 1204 r_tgt_cmd_to_tgt_rsp_req = true; 1205 r_tgt_cmd_to_tgt_rsp_error = 1; 1206 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1207 1208 #if DEBUG_MEMC_TGT_CMD 1209 if(m_debug) 1210 std::cout << " <MEMC " << name() 1211 << " TGT_CMD_ERROR> Segmentation violation:" 1212 << " address = " << std::hex << p_vci_tgt.address.read() 1213 << " / srcid = " << p_vci_tgt.srcid.read() 1214 << " / trdid = " << p_vci_tgt.trdid.read() 1215 << " / pktid = " << p_vci_tgt.pktid.read() 1216 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1217 #endif 1218 1219 } 1220 break; 1221 1222 //////////////////// 1223 case TGT_CMD_CONFIG: // execute config request and return response 1224 { 1225 addr_t seg_base = m_seg[m_seg_config]->baseAddress(); 1226 addr_t address = p_vci_tgt.address.read(); 1227 size_t cell = (address - seg_base)/vci_param_int::B; 1228 1229 bool need_rsp; 1230 size_t error; 1231 uint32_t rdata = 0; // default value 1232 uint32_t wdata = p_vci_tgt.wdata.read(); 1233 1234 if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_READ) // get lock 1235 and (cell == MEMC_LOCK) ) 1236 { 1237 rdata = (uint32_t)r_config_lock.read(); 1238 need_rsp = true; 1239 error = 0; 1240 r_config_lock = true; 1241 if ( rdata == 0 ) 1242 { 1243 r_tgt_cmd_srcid = p_vci_tgt.srcid.read(); 1244 r_tgt_cmd_trdid = p_vci_tgt.trdid.read(); 1245 r_tgt_cmd_pktid = p_vci_tgt.pktid.read(); 1246 } 1247 } 1248 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // release lock 1249 and (cell == MEMC_LOCK) 1250 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1251 { 1252 need_rsp = true; 1253 error = 0; 1254 r_config_lock = false; 1255 } 1256 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_lo 1257 and (cell == MEMC_ADDR_LO) 1258 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1259 { 1260 assert( ((wdata % (m_words*vci_param_int::B)) == 0) and 1261 "VCI_MEM_CACHE CONFIG ERROR: The buffer must be aligned on a cache line"); 1262 1263 need_rsp = true; 1264 error = 0; 1265 r_config_address = (r_config_address.read() & 0xFFFFFFFF00000000LL) | 1266 (addr_t)p_vci_tgt.wdata.read(); 1267 } 1268 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_hi 1269 and (cell == MEMC_ADDR_HI) 1270 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1271 1272 { 1273 need_rsp = true; 1274 error = 0; 1275 r_config_address = (r_config_address.read() & 0x00000000FFFFFFFFLL) | 1276 ((addr_t)p_vci_tgt.wdata.read())<<32; 1277 } 1278 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set buf_lines 1279 and (cell == MEMC_BUF_LENGTH) 1280 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1281 { 1282 need_rsp = true; 1283 error = 0; 1284 size_t lines = (size_t)(p_vci_tgt.wdata.read()/(m_words<<2)); 1285 if ( r_config_address.read()%(m_words*4) ) lines++; 1286 r_config_cmd_lines = lines; 1287 r_config_rsp_lines = lines; 1288 } 1289 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set cmd type 1290 and (cell == MEMC_CMD_TYPE) 1291 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1292 { 1293 need_rsp = false; 1294 error = 0; 1295 r_config_cmd = p_vci_tgt.wdata.read(); 1296 1297 // prepare delayed response from CONFIG FSM 1298 r_config_srcid = p_vci_tgt.srcid.read(); 1299 r_config_trdid = p_vci_tgt.trdid.read(); 1300 r_config_pktid = p_vci_tgt.pktid.read(); 1301 } 1302 else 1303 { 1304 need_rsp = true; 1305 error = 1; 1306 } 1307 1308 if ( need_rsp ) 1309 { 1310 // blocked if previous pending request to TGT_RSP FSM 1311 if ( r_tgt_cmd_to_tgt_rsp_req.read() ) break; 1312 1313 r_tgt_cmd_to_tgt_rsp_req = true; 1314 r_tgt_cmd_to_tgt_rsp_error = error; 1315 r_tgt_cmd_to_tgt_rsp_rdata = rdata; 1316 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1317 } 1318 else 1319 { 1320 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1321 } 1322 1323 #if DEBUG_MEMC_TGT_CMD 1324 if(m_debug) 1325 std::cout << " <MEMC " << name() << " TGT_CMD_CONFIG> Configuration request:" 1326 << " address = " << std::hex << p_vci_tgt.address.read() 1327 << " / wdata = " << p_vci_tgt.wdata.read() 1328 << " / need_rsp = " << need_rsp 1329 << " / error = " << error << std::endl; 1330 #endif 1331 break; 1332 } 1333 ////////////////// 1334 case TGT_CMD_READ: // Push a read request into read fifo 1335 1336 // check that the read does not cross a cache line limit. 1337 if ( ((m_x[(addr_t) p_vci_tgt.address.read()]+ (p_vci_tgt.plen.read() >>2)) > 16) and 1338 (p_vci_tgt.cmd.read() != vci_param_int::CMD_LOCKED_READ)) 1339 { 1340 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 1341 << " illegal address/plen for VCI read command" << std::endl; 1342 exit(0); 1343 } 1344 // check single flit 1345 if(!p_vci_tgt.eop.read()) 1346 { 1347 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 1348 << " read command packet must contain one single flit" << std::endl; 1349 exit(0); 1350 } 1351 // check plen for LL 1352 if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) and 1353 (p_vci_tgt.plen.read() != 8) ) 1354 { 1355 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 1356 << " ll command packets must have a plen of 8" << std::endl; 1357 exit(0); 1358 } 1359 1360 if ( p_vci_tgt.cmdval and m_cmd_read_addr_fifo.wok() ) 1361 { 1362 1363 #if DEBUG_MEMC_TGT_CMD 1364 if(m_debug) 1365 std::cout << " <MEMC " << name() << " TGT_CMD_READ> Push into read_fifo:" 1366 << " address = " << std::hex << p_vci_tgt.address.read() 1367 << " / srcid = " << p_vci_tgt.srcid.read() 1368 << " / trdid = " << p_vci_tgt.trdid.read() 1369 << " / pktid = " << p_vci_tgt.pktid.read() 1370 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1371 #endif 1372 cmd_read_fifo_put = true; 1373 if(p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) m_cpt_ll++; 1374 else m_cpt_read++; 1375 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1376 } 1377 break; 1378 1379 /////////////////// 1380 case TGT_CMD_WRITE: 1381 if(p_vci_tgt.cmdval and m_cmd_write_addr_fifo.wok()) 1382 { 1383 1384 #if DEBUG_MEMC_TGT_CMD 1385 if(m_debug) 1386 std::cout << " <MEMC " << name() << " TGT_CMD_WRITE> Push into write_fifo:" 1387 << " address = " << std::hex << p_vci_tgt.address.read() 1388 << " / srcid = " << p_vci_tgt.srcid.read() 1389 << " / trdid = " << p_vci_tgt.trdid.read() 1390 << " / pktid = " << p_vci_tgt.pktid.read() 1391 << " / wdata = " << p_vci_tgt.wdata.read() 1392 << " / be = " << p_vci_tgt.be.read() 1393 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1394 #endif 1395 cmd_write_fifo_put = true; 1396 if(p_vci_tgt.eop) r_tgt_cmd_fsm = TGT_CMD_IDLE; 1397 } 1398 break; 1399 1400 ///////////////// 1401 case TGT_CMD_CAS: 1402 if((p_vci_tgt.plen.read() != 8) and (p_vci_tgt.plen.read() != 16)) 1403 { 1404 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_CAS state" 1405 << "illegal format for CAS command " << std::endl; 1406 exit(0); 1407 } 1408 1409 if(p_vci_tgt.cmdval and m_cmd_cas_addr_fifo.wok()) 1410 { 1411 1412 #if DEBUG_MEMC_TGT_CMD 1413 if(m_debug) 1414 std::cout << " <MEMC " << name() << " TGT_CMD_CAS> Pushing command into cmd_cas_fifo:" 1415 << " address = " << std::hex << p_vci_tgt.address.read() 1416 << " srcid = " << p_vci_tgt.srcid.read() 1417 << " trdid = " << p_vci_tgt.trdid.read() 1418 << " pktid = " << p_vci_tgt.pktid.read() 1419 << " wdata = " << p_vci_tgt.wdata.read() 1420 << " be = " << p_vci_tgt.be.read() 1421 << " plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1422 #endif 1423 cmd_cas_fifo_put = true; 1424 if(p_vci_tgt.eop) r_tgt_cmd_fsm = TGT_CMD_IDLE; 1425 } 1426 break; 1427 } // end switch tgt_cmd_fsm 1428 1429 ///////////////////////////////////////////////////////////////////////// 1430 // MULTI_ACK FSM 1431 ///////////////////////////////////////////////////////////////////////// 1432 // This FSM controls the response to the multicast update requests sent 1433 // by the memory cache to the L1 caches and update the UPT. 1434 // 1435 // - The FSM decrements the proper entry in UPT, 1436 // and clear the UPT entry when all responses have been received. 1437 // - If required, it sends a request to the TGT_RSP FSM to complete 1438 // a pending write transaction. 1439 // 1440 // All those multi-ack packets are one flit packet. 1441 // The index in the UPT is defined in the TRDID field. 1442 //////////////////////////////////////////////////////////////////////// 1443 1444 //std::cout << std::endl << "multi_ack_fsm" << std::endl; 1445 1446 switch(r_multi_ack_fsm.read()) 1447 { 1448 //////////////////// 1449 case MULTI_ACK_IDLE: 1450 { 1451 bool multi_ack_fifo_rok = m_cc_receive_to_multi_ack_fifo.rok(); 1452 1453 // No CC_RECEIVE FSM request and no WRITE FSM request 1454 if( not multi_ack_fifo_rok and not r_write_to_multi_ack_req.read()) 1455 break; 1456 1457 uint8_t updt_index; 1458 1459 // handling WRITE FSM request to decrement update table response 1460 // counter if no CC_RECEIVE FSM request 1461 if(not multi_ack_fifo_rok) 1462 { 1463 updt_index = r_write_to_multi_ack_upt_index.read(); 1464 r_write_to_multi_ack_req = false; 1465 } 1466 // Handling CC_RECEIVE FSM request 1467 else 1468 { 1469 uint64_t flit = m_cc_receive_to_multi_ack_fifo.read(); 1470 updt_index = DspinDhccpParam::dspin_get(flit, 1471 DspinDhccpParam::MULTI_ACK_UPDT_INDEX); 1472 1473 cc_receive_to_multi_ack_fifo_get = true; 1474 } 1475 1476 assert((updt_index < m_upt.size()) and 1477 "VCI_MEM_CACHE ERROR in MULTI_ACK_IDLE : " 1478 "index too large for UPT"); 1479 1480 r_multi_ack_upt_index = updt_index; 1481 r_multi_ack_fsm = MULTI_ACK_UPT_LOCK; 1482 1483 #if DEBUG_MEMC_MULTI_ACK 1484 if(m_debug) 1485 { 1486 if (multi_ack_fifo_rok) 1487 { 1488 std::cout << " <MEMC " << name() 1489 << " MULTI_ACK_IDLE> Response for UPT entry " 1490 << (size_t)updt_index << std::endl; 1491 } 1492 else 1493 { 1494 std::cout << " <MEMC " << name() 1495 << " MULTI_ACK_IDLE> Write FSM request to decrement UPT entry " 1496 << updt_index << std::endl; 1497 } 1498 } 1499 #endif 1500 break; 1501 } 1502 1503 //////////////////////// 1504 case MULTI_ACK_UPT_LOCK: 1505 { 1506 m_cpt_multi_ack_fsm_upt_lock++; 1507 // get lock to the UPDATE table 1508 if(r_alloc_upt_fsm.read() != ALLOC_UPT_MULTI_ACK) break; 1509 1510 // decrement the number of expected responses 1511 size_t count = 0; 1512 bool valid = m_upt.decrement(r_multi_ack_upt_index.read(), count); 1513 1514 1515 if(not valid) 1516 { 1517 std::cout << "VCI_MEM_CACHE ERROR " << name() 1518 << " MULTI_ACK_UPT_LOCK state" << std::endl 1519 << "unsuccessful access to decrement the UPT" << std::endl; 1520 exit(0); 1521 } 1522 1523 if(count == 0) 1524 { 1525 r_multi_ack_fsm = MULTI_ACK_UPT_CLEAR; 1526 } 1527 else 1528 { 1529 r_multi_ack_fsm = MULTI_ACK_IDLE; 1530 } 1531 1532 #if DEBUG_MEMC_MULTI_ACK 1533 if(m_debug) 1534 std::cout << " <MEMC " << name() 1535 << " MULTI_ACK_UPT_LOCK> Decrement the responses counter for UPT:" 1536 << " entry = " << r_multi_ack_upt_index.read() 1537 << " / rsp_count = " << std::dec << count << std::endl; 1538 m_cpt_multi_ack_fsm_n_upt_lock++; 1539 #endif 1540 break; 1541 } 1542 1543 ///////////////////////// 1544 case MULTI_ACK_UPT_CLEAR: // Clear UPT entry / Test if rsp or ack required 1545 { 1546 if(r_alloc_upt_fsm.read() != ALLOC_UPT_MULTI_ACK) 1547 { 1548 std::cout << "VCI_MEM_CACHE ERROR " << name() 1549 << " MULTI_ACK_UPT_CLEAR state" 1550 << " bad UPT allocation" << std::endl; 1551 exit(0); 1552 } 1553 1554 r_multi_ack_srcid = m_upt.srcid(r_multi_ack_upt_index.read()); 1555 r_multi_ack_trdid = m_upt.trdid(r_multi_ack_upt_index.read()); 1556 r_multi_ack_pktid = m_upt.pktid(r_multi_ack_upt_index.read()); 1557 r_multi_ack_nline = m_upt.nline(r_multi_ack_upt_index.read()); 1558 bool need_rsp = m_upt.need_rsp(r_multi_ack_upt_index.read()); 1559 1560 // clear the UPT entry 1561 m_upt.clear(r_multi_ack_upt_index.read()); 1562 1563 if ( need_rsp ) r_multi_ack_fsm = MULTI_ACK_WRITE_RSP; 1564 else r_multi_ack_fsm = MULTI_ACK_IDLE; 1565 1566 #if DEBUG_MEMC_MULTI_ACK 1567 if(m_debug) 1568 std::cout << " <MEMC " << name() 1569 << " MULTI_ACK_UPT_CLEAR> Clear UPT entry " 1570 << std::dec << r_multi_ack_upt_index.read() << std::endl; 1571 #endif 1572 break; 1573 } 1574 ///////////////////////// 1575 case MULTI_ACK_WRITE_RSP: // Post a response request to TGT_RSP FSM 1576 // Wait if pending request 1577 { 1578 if ( r_multi_ack_to_tgt_rsp_req.read() ) break; 1579 1580 r_multi_ack_to_tgt_rsp_req = true; 1581 r_multi_ack_to_tgt_rsp_srcid = r_multi_ack_srcid.read(); 1582 r_multi_ack_to_tgt_rsp_trdid = r_multi_ack_trdid.read(); 1583 r_multi_ack_to_tgt_rsp_pktid = r_multi_ack_pktid.read(); 1584 r_multi_ack_fsm = MULTI_ACK_IDLE; 1585 1586 #if DEBUG_MEMC_MULTI_ACK 1587 if(m_debug) 1588 std::cout << " <MEMC " << name() << " MULTI_ACK_WRITE_RSP>" 1589 << " Request TGT_RSP FSM to send a response to srcid " 1590 << std::hex << r_multi_ack_srcid.read() << std::endl; 1591 #endif 1592 break; 1593 } 1594 } // end switch r_multi_ack_fsm 1595 1596 //////////////////////////////////////////////////////////////////////////////////// 1597 // CONFIG FSM 1598 //////////////////////////////////////////////////////////////////////////////////// 1599 // The CONFIG FSM handles the VCI configuration requests (INVAL & SYNC). 1600 // The target buffer can have any size, and there is one single command for 1601 // all cache lines covered by the target buffer. 1602 // 1603 // An INVAL or SYNC configuration operation is defined by the following registers: 1604 // - bool r_config_cmd : INVAL / SYNC / NOP 1605 // - uint64_t r_config_address : buffer base address 1606 // - uint32_t r_config_cmd_lines : number of lines to be handled 1607 // - uint32_t r_config_rsp_lines : number of lines not completed 1608 // 1609 // For both INVAL and SYNC commands, the CONFIG FSM contains the loop handling 1610 // all cache lines covered by the buffer. The various lines of a given buffer 1611 // can be pipelined: the CONFIG FSM does not wait the response for line (n) to send 1612 // the command for line (n+1). It decrements the r_config_cmd_lines counter until 1613 // the last request has been registered in TRT (for a SYNC), or in IVT (for an INVAL). 1614 // 1615 // - INVAL request: 1616 // For each line, it access to the DIR. 1617 // In case of miss, it does nothing, and a response is requested to TGT_RSP FSM. 1618 // In case of hit, with no copies in L1 caches, the line is invalidated and 1619 // a response is requested to TGT_RSP FSM. 1620 // If there is copies, a multi-inval, or a broadcast-inval coherence transaction 1621 // is launched and registered in UPT. The multi-inval transaction completion 1622 // is signaled by the CLEANUP FSM by decrementing the r_config_rsp_lines counter. 1623 // The CONFIG INVAL response is sent only when the last line has been invalidated. 1624 // TODO : The target buffer address must be aligned on a cache line boundary. 1625 // This constraint can be released, but it requires to make 2 PUT transactions 1626 // for the first and the last line... 1627 // 1628 // - SYNC request: 1629 // For each line, it access to the DIR. 1630 // In case of miss, it does nothing, and a response is requested to TGT_RSP FSM. 1631 // In case of hit, a PUT transaction is registered in TRT and a request is sent 1632 // to IXR_CMD FSM. The IXR_RSP FSM decrements the r_config_rsp_lines counter 1633 // when a PUT response is received. 1634 // The CONFIG SYNC response is sent only when the last PUT response is received. 1635 // 1636 // From the software point of view, a configuration request is a sequence 1637 // of 6 atomic accesses in an uncached segment. A dedicated lock is used 1638 // to handle only one configuration command at a given time: 1639 // - Read MEMC_LOCK : Get the lock 1640 // - Write MEMC_ADDR_LO : Set the buffer address LSB 1641 // - Write MEMC_ADDR_HI : Set the buffer address MSB 1642 // - Write MEMC_BUF_LENGTH : set buffer length (bytes) 1643 // - Write MEMC_CMD_TYPE : launch the actual operation 1644 // - WRITE MEMC_LOCK : release the lock 1645 //////////////////////////////////////////////////////////////////////////////////// 1646 1647 //std::cout << std::endl << "config_fsm" << std::endl; 1648 1649 switch( r_config_fsm.read() ) 1650 { 1651 ///////////////// 1652 case CONFIG_IDLE: // waiting a config request 1653 { 1654 if ( r_config_cmd.read() != MEMC_CMD_NOP ) 1655 { 1656 r_config_fsm = CONFIG_LOOP; 1657 1658 #if DEBUG_MEMC_CONFIG 1659 if(m_debug) 1660 std::cout << " <MEMC " << name() << " CONFIG_IDLE> Config Request received" 1661 << " / address = " << std::hex << r_config_address.read() 1662 << " / lines = " << std::dec << r_config_cmd_lines.read() 1663 << " / type = " << r_config_cmd.read() << std::endl; 1664 #endif 1665 } 1666 break; 1667 } 1668 ///////////////// 1669 case CONFIG_LOOP: // test if last line to be handled 1670 { 1671 if ( r_config_cmd_lines.read() == 0 ) 1672 { 1673 r_config_cmd = MEMC_CMD_NOP; 1674 r_config_fsm = CONFIG_WAIT; 1675 } 1676 else 1677 { 1678 r_config_fsm = CONFIG_DIR_REQ; 1679 } 1680 1681 #if DEBUG_MEMC_CONFIG 1682 if(m_debug) 1683 std::cout << " <MEMC " << name() << " CONFIG_LOOP>" 1684 << " / address = " << std::hex << r_config_address.read() 1685 << " / lines not handled = " << std::dec << r_config_cmd_lines.read() 1686 << " / command = " << r_config_cmd.read() << std::endl; 1687 #endif 1688 break; 1689 } 1690 ///////////////// 1691 case CONFIG_WAIT: // wait completion (last response) 1692 { 1693 if ( r_config_rsp_lines.read() == 0 ) // last response received 1694 { 1695 r_config_fsm = CONFIG_RSP; 1696 } 1697 1698 #if DEBUG_MEMC_CONFIG 1699 if(m_debug) 1700 std::cout << " <MEMC " << name() << " CONFIG_WAIT>" 1701 << " / lines to do = " << std::dec << r_config_rsp_lines.read() << std::endl; 1702 #endif 1703 break; 1704 } 1705 //////////////// 1706 case CONFIG_RSP: // request TGT_RSP FSM to return response 1707 { 1708 if ( not r_config_to_tgt_rsp_req.read() ) 1709 { 1710 r_config_to_tgt_rsp_srcid = r_config_srcid.read(); 1711 r_config_to_tgt_rsp_trdid = r_config_trdid.read(); 1712 r_config_to_tgt_rsp_pktid = r_config_pktid.read(); 1713 r_config_to_tgt_rsp_error = false; 1714 r_config_to_tgt_rsp_req = true; 1715 r_config_fsm = CONFIG_IDLE; 1716 1717 #if DEBUG_MEMC_CONFIG 1718 if(m_debug) 1719 std::cout << " <MEMC " << name() << " CONFIG_RSP> Request TGT_RSP FSM to return response:" 1720 << " error = " << r_config_to_tgt_rsp_error.read() 1721 << " / rsrcid = " << std::hex << r_config_srcid.read() 1722 << " / rtrdid = " << std::hex << r_config_trdid.read() 1723 << " / rpktid = " << std::hex << r_config_pktid.read() << std::endl; 1724 #endif 1725 } 1726 break; 1727 1728 } 1729 //////////////////// 1730 case CONFIG_DIR_REQ: // Request directory lock 1731 { 1732 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG ) 1733 { 1734 r_config_fsm = CONFIG_DIR_ACCESS; 1735 } 1736 1737 #if DEBUG_MEMC_CONFIG 1738 if(m_debug) 1739 std::cout << " <MEMC " << name() << " CONFIG_DIR_REQ>" 1740 << " Request DIR access" << std::endl; 1741 #endif 1742 break; 1743 } 1744 /////////////////////// 1745 case CONFIG_DIR_ACCESS: // Access directory and decode config command 1746 { 1747 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1748 "MEMC ERROR in CONFIG_DIR_ACCESS state: bad DIR allocation"); 1749 1750 size_t way = 0; 1751 DirectoryEntry entry = m_cache_directory.read(r_config_address.read(), way); 1752 1753 if ( entry.valid and // hit & inval command 1754 (r_config_cmd.read() == MEMC_CMD_INVAL) ) 1755 { 1756 r_config_dir_way = way; 1757 r_config_dir_copy_inst = entry.owner.inst; 1758 r_config_dir_copy_srcid = entry.owner.srcid; 1759 r_config_dir_is_cnt = entry.is_cnt; 1760 r_config_dir_lock = entry.lock; 1761 r_config_dir_count = entry.count; 1762 r_config_dir_ptr = entry.ptr; 1763 1764 r_config_fsm = CONFIG_IVT_LOCK; 1765 } 1766 else if ( entry.valid and // hit & sync command 1767 entry.dirty and 1768 (r_config_cmd.read() == MEMC_CMD_SYNC) ) 1769 { 1770 r_config_fsm = CONFIG_TRT_LOCK; 1771 } 1772 else // miss : return to LOOP 1773 { 1774 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1775 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 1776 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1777 r_config_address = r_config_address.read() + (m_words<<2); 1778 r_config_fsm = CONFIG_LOOP; 1779 } 1780 1781 #if DEBUG_MEMC_CONFIG 1782 if(m_debug) 1783 std::cout << " <MEMC " << name() << " CONFIG_DIR_ACCESS> Accessing directory: " 1784 << " address = " << std::hex << r_config_address.read() 1785 << " / hit = " << std::dec << entry.valid 1786 << " / dirty = " << entry.dirty 1787 << " / count = " << entry.count 1788 << " / is_cnt = " << entry.is_cnt << std::endl; 1789 #endif 1790 break; 1791 } 1792 ///////////////////// 1793 case CONFIG_TRT_LOCK: // enter this state in case of SYNC command 1794 // to a dirty cache line 1795 // keep DIR lock, and try to get TRT lock 1796 // return to LOOP state if TRT full 1797 // reset dirty bit in DIR and register a PUT 1798 // trabsaction in TRT if not full. 1799 { 1800 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1801 "MEMC ERROR in CONFIG_TRT_LOCK state: bad DIR allocation"); 1802 1803 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG ) 1804 { 1805 size_t index = 0; 1806 bool wok = not m_trt.full(index); 1807 1808 if ( not wok ) 1809 { 1810 r_config_fsm = CONFIG_LOOP; 1811 } 1812 else 1813 { 1814 size_t way = r_config_dir_way.read(); 1815 size_t set = m_y[r_config_address.read()]; 1816 1817 // reset dirty bit in DIR 1818 DirectoryEntry entry; 1819 entry.valid = true; 1820 entry.dirty = false; 1821 entry.tag = m_z[r_config_address.read()]; 1822 entry.is_cnt = r_config_dir_is_cnt.read(); 1823 entry.lock = r_config_dir_lock.read(); 1824 entry.ptr = r_config_dir_ptr.read(); 1825 entry.count = r_config_dir_count.read(); 1826 entry.owner.inst = r_config_dir_copy_inst.read(); 1827 entry.owner.srcid = r_config_dir_copy_srcid.read(); 1828 m_cache_directory.write( set, 1829 way, 1830 entry ); 1831 1832 r_config_trt_index = index; 1833 r_config_fsm = CONFIG_TRT_SET; 1834 } 1835 1836 #if DEBUG_MEMC_CONFIG 1837 if(m_debug) 1838 std::cout << " <MEMC " << name() << " CONFIG_TRT_LOCK> Access TRT: " 1839 << " wok = " << std::dec << wok 1840 << " index = " << index << std::endl; 1841 #endif 1842 } 1843 break; 1844 } 1845 //////////////////// 1846 case CONFIG_TRT_SET: // read data in cache 1847 // and post a PUT request in TRT 1848 { 1849 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1850 "MEMC ERROR in CONFIG_TRT_SET state: bad DIR allocation"); 1851 1852 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG) and 1853 "MEMC ERROR in CONFIG_TRT_SET state: bad TRT allocation"); 1854 1855 // read data into cache 1856 size_t way = r_config_dir_way.read(); 1857 size_t set = m_y[r_config_address.read()]; 1858 1859 sc_signal<data_t> config_data[16]; 1860 m_cache_data.read_line( way, 1861 set, 1862 config_data ); 1863 1864 // post a PUT request in TRT 1865 std::vector<data_t> data_vector; 1866 data_vector.clear(); 1867 for(size_t i=0; i<m_words; i++) data_vector.push_back(config_data[i].read()); 1868 m_trt.set( r_config_trt_index.read(), 1869 false, // PUT 1870 m_nline[r_config_address.read()], // nline 1871 0, // srcid: unused 1872 0, // trdid: unused 1873 0, // pktid: unused 1874 false, // not proc_read 1875 0, // read_length: unused 1876 0, // word_index: unused 1877 std::vector<be_t>(m_words,0xF), 1878 data_vector); 1879 1880 #if DEBUG_MEMC_CONFIG 1881 if(m_debug) 1882 std::cout << " <MEMC " << name() << " CONFIG_TRT_SET> PUT request in TRT:" 1883 << " address = " << std::hex << r_config_address.read() 1884 << " index = " << std::dec << r_config_trt_index.read() << std::endl; 1885 #endif 1886 break; 1887 } 1888 //////////////////// 1889 case CONFIG_PUT_REQ: // PUT request to IXR_CMD_FSM 1890 { 1891 if ( not r_config_to_ixr_cmd_req.read() ) 1892 { 1893 r_config_to_ixr_cmd_req = true; 1894 r_config_to_ixr_cmd_index = r_config_trt_index.read(); 1895 1896 // prepare next iteration 1897 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1898 r_config_address = r_config_address.read() + (m_words<<2); 1899 r_config_fsm = CONFIG_LOOP; 1900 1901 #if DEBUG_MEMC_CONFIG 1902 if(m_debug) 1903 std::cout << " <MEMC " << name() << " CONFIG_PUT_REQ> PUT request to IXR_CMD_FSM" 1904 << " / address = " << std::hex << r_config_address.read() << std::endl; 1905 #endif 1906 } 1907 break; 1908 } 1909 ///////////////////// 1910 case CONFIG_IVT_LOCK: // enter this state in case of INVAL command 1911 // Keep DIR lock and Try to get IVT lock. 1912 // Return to LOOP state if IVT full. 1913 // Register inval in IVT, and invalidate the 1914 // directory if IVT not full. 1915 { 1916 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1917 "MEMC ERROR in CONFIG_IVT_LOCK state: bad DIR allocation"); 1918 1919 if ( r_alloc_ivt_fsm.read() == ALLOC_IVT_CONFIG ) 1920 { 1921 size_t set = m_y[(addr_t)(r_config_address.read())]; 1922 size_t way = r_config_dir_way.read(); 1923 1924 if ( r_config_dir_count.read() == 0 ) // inval DIR and return to LOOP 1925 { 1926 m_cache_directory.inval( way, set ); 1927 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1928 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 1929 r_config_address = r_config_address.read() + (m_words<<2); 1930 r_config_fsm = CONFIG_LOOP; 1931 1932 #if DEBUG_MEMC_CONFIG 1933 if(m_debug) 1934 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1935 << " No copies in L1 : inval DIR entry" << std::endl; 1936 #endif 1937 } 1938 else // try to register inval in IVT 1939 { 1940 bool wok = false; 1941 size_t index = 0; 1942 bool broadcast = r_config_dir_is_cnt.read(); 1943 size_t srcid = r_config_srcid.read(); 1944 size_t trdid = r_config_trdid.read(); 1945 size_t pktid = r_config_pktid.read(); 1946 addr_t nline = m_nline[(addr_t)(r_config_address.read())]; 1947 size_t nb_copies = r_config_dir_count.read(); 1948 1949 wok = m_ivt.set(false, // it's an inval transaction 1950 broadcast, 1951 false, // no response required 1952 true, // acknowledge required 1953 srcid, 1954 trdid, 1955 pktid, 1956 nline, 1957 nb_copies, 1958 index); 1959 1960 if ( wok ) // IVT success => inval DIR slot 1961 { 1962 m_cache_directory.inval( way, set ); 1963 r_config_ivt_index = index; 1964 if ( broadcast ) r_config_fsm = CONFIG_BC_SEND; 1965 else r_config_fsm = CONFIG_INVAL_SEND; 1966 1967 #if DEBUG_MEMC_CONFIG 1968 if(m_debug) 1969 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1970 << " Inval DIR entry and register inval in IVT" 1971 << " / index = " << std::dec << index 1972 << " / broadcast = " << broadcast << std::endl; 1973 #endif 1974 } 1975 else // IVT full => release both DIR and IVT locks 1976 { 1977 r_config_fsm = CONFIG_LOOP; 1978 1979 #if DEBUG_MEMC_CONFIG 1980 if(m_debug) 1981 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1982 << " IVT full : release DIR & IVT locks and retry" << std::endl; 1983 #endif 1984 } 1985 } 1986 } 1987 break; 1988 } 1989 //////////////////// 1990 case CONFIG_BC_SEND: // Post a broadcast inval request to CC_SEND FSM 1991 { 1992 if( not r_config_to_cc_send_multi_req.read() and 1993 not r_config_to_cc_send_brdcast_req.read() ) 1994 { 1995 // post bc inval request 1996 r_config_to_cc_send_multi_req = false; 1997 r_config_to_cc_send_brdcast_req = true; 1998 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 1999 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 2000 2001 // prepare next iteration 2002 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2003 r_config_address = r_config_address.read() + (m_words<<2); 2004 r_config_fsm = CONFIG_LOOP; 2005 2006 #if DEBUG_MEMC_CONFIG 2007 if(m_debug) 2008 std::cout << " <MEMC " << name() << " CONFIG_BC_SEND>" 2009 << " Post a broadcast inval request to CC_SEND FSM" 2010 << " / address = " << r_config_address.read() <<std::endl; 2011 #endif 2012 } 2013 break; 2014 } 2015 /////////////////////// 2016 case CONFIG_INVAL_SEND: // Post a multi inval request to CC_SEND FSM 2017 { 2018 if( not r_config_to_cc_send_multi_req.read() and 2019 not r_config_to_cc_send_brdcast_req.read() ) 2020 { 2021 // post multi inval request 2022 r_config_to_cc_send_multi_req = true; 2023 r_config_to_cc_send_brdcast_req = false; 2024 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 2025 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 2026 2027 // post data into FIFO 2028 config_to_cc_send_fifo_srcid = r_config_dir_copy_srcid.read(); 2029 config_to_cc_send_fifo_inst = r_config_dir_copy_inst.read(); 2030 config_to_cc_send_fifo_put = true; 2031 2032 if ( r_config_dir_count.read() == 1 ) // one copy 2033 { 2034 // prepare next iteration 2035 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2036 r_config_address = r_config_address.read() + (m_words<<2); 2037 r_config_fsm = CONFIG_LOOP; 2038 } 2039 else // several copies 2040 { 2041 r_config_fsm = CONFIG_HEAP_REQ; 2042 } 2043 2044 #if DEBUG_MEMC_CONFIG 2045 if(m_debug) 2046 std::cout << " <MEMC " << name() << " CONFIG_INVAL_SEND>" 2047 << " Post multi inval request to CC_SEND FSM" 2048 << " / address = " << std::hex << r_config_address.read() 2049 << " / copy = " << r_config_dir_copy_srcid.read() 2050 << " / inst = " << std::dec << r_config_dir_copy_inst.read() << std::endl; 2051 #endif 2052 } 2053 break; 2054 } 2055 ///////////////////// 2056 case CONFIG_HEAP_REQ: // Try to get access to Heap 2057 { 2058 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_CONFIG ) 2059 { 2060 r_config_fsm = CONFIG_HEAP_SCAN; 2061 r_config_heap_next = r_config_dir_ptr.read(); 2062 } 2063 2064 #if DEBUG_MEMC_CONFIG 2065 if(m_debug) 2066 std::cout << " <MEMC " << name() << " CONFIG_HEAP_REQ>" 2067 << " Requesting HEAP lock" << std::endl; 2068 #endif 2069 break; 2070 } 2071 ////////////////////// 2072 case CONFIG_HEAP_SCAN: // scan HEAP and send inval to CC_SEND FSM 2073 { 2074 HeapEntry entry = m_heap.read( r_config_heap_next.read() ); 2075 bool last_copy = (entry.next == r_config_heap_next.read()); 2076 2077 config_to_cc_send_fifo_srcid = entry.owner.srcid; 2078 config_to_cc_send_fifo_inst = entry.owner.inst; 2079 // config_to_cc_send_fifo_last = last_copy; 2080 config_to_cc_send_fifo_put = true; 2081 2082 if ( m_config_to_cc_send_inst_fifo.wok() ) // inval request accepted 2083 { 2084 r_config_heap_next = entry.next; 2085 if ( last_copy ) r_config_fsm = CONFIG_HEAP_LAST; 2086 } 2087 2088 #if DEBUG_MEMC_CONFIG 2089 if(m_debug) 2090 std::cout << " <MEMC " << name() << " CONFIG_HEAP_SCAN>" 2091 << " Post multi inval request to CC_SEND FSM" 2092 << " / address = " << std::hex << r_config_address.read() 2093 << " / copy = " << entry.owner.srcid 2094 << " / inst = " << std::dec << entry.owner.inst << std::endl; 2095 #endif 2096 break; 2097 } 2098 ////////////////////// 2099 case CONFIG_HEAP_LAST: // HEAP housekeeping 2100 { 2101 size_t free_pointer = m_heap.next_free_ptr(); 2102 HeapEntry last_entry; 2103 last_entry.owner.srcid = 0; 2104 last_entry.owner.inst = false; 2105 2106 if ( m_heap.is_full() ) 2107 { 2108 last_entry.next = r_config_dir_ptr.read(); 2109 m_heap.unset_full(); 2110 } 2111 else 2112 { 2113 last_entry.next = free_pointer; 2114 } 2115 2116 m_heap.write_free_ptr( r_config_dir_ptr.read() ); 2117 m_heap.write( r_config_heap_next.read(), last_entry ); 2118 2119 // prepare next iteration 2120 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2121 r_config_address = r_config_address.read() + (m_words<<2); 2122 r_config_fsm = CONFIG_LOOP; 2123 2124 #if DEBUG_MEMC_CONFIG 2125 if(m_debug) 2126 std::cout << " <MEMC " << name() << " CONFIG_HEAP_LAST>" 2127 << " Heap housekeeping" << std::endl; 2128 #endif 2129 break; 2130 } 2131 } // end switch r_config_fsm 2132 2133 //////////////////////////////////////////////////////////////////////////////////// 2134 // READ FSM 2135 //////////////////////////////////////////////////////////////////////////////////// 2136 // The READ FSM controls the VCI read and ll requests. 2137 // It takes the lock protecting the cache directory to check the cache line status: 2138 // - In case of HIT 2139 // The fsm copies the data (one line, or one single word) 2140 // in the r_read_to_tgt_rsp buffer. It waits if this buffer is not empty. 2141 // The requesting initiator is registered in the cache directory. 2142 // If the number of copy is larger than 1, the new copy is registered 2143 // in the HEAP. 2144 // If the number of copy is larger than the threshold, the HEAP is cleared, 2145 // and the corresponding line switches to the counter mode. 2146 // - In case of MISS 2147 // The READ fsm takes the lock protecting the transaction tab. 2148 // If a read transaction to the XRAM for this line already exists, 2149 // or if the transaction tab is full, the fsm is stalled. 2150 // If a TRT entry is free, the READ request is registered in TRT, 2151 // it is consumed in the request FIFO, and transmited to the IXR_CMD FSM. 2152 // The READ FSM returns in the IDLE state as the read transaction will be 2153 // completed when the missing line will be received. 2154 //////////////////////////////////////////////////////////////////////////////////// 2155 2156 //std::cout << std::endl << "read_fsm" << std::endl; 2157 2158 switch(r_read_fsm.read()) 2159 { 2160 /////////////// 2161 case READ_IDLE: // waiting a read request 2162 { 2163 if(m_cmd_read_addr_fifo.rok()) 2164 { 2160 /////////////// 2161 case READ_IDLE: // waiting a read request 2162 { 2163 if(m_cmd_read_addr_fifo.rok()) 2164 { 2165 2165 2166 2166 #if DEBUG_MEMC_READ 2167 if(m_debug)2168 std::cout << " <MEMC " << name() << " READ_IDLE> Read request"2169 << " : address = " << std::hex << m_cmd_read_addr_fifo.read()2170 << " / srcid = " << m_cmd_read_srcid_fifo.read()2171 << " / trdid = " << m_cmd_read_trdid_fifo.read()2172 << " / pktid = " << m_cmd_read_pktid_fifo.read()2173 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl;2174 #endif 2175 r_read_fsm = READ_DIR_REQ;2176 }2177 break;2178 }2179 //////////////////2180 case READ_DIR_REQ: // Get the lock to the directory2181 {2182 if(r_alloc_dir_fsm.read() == ALLOC_DIR_READ)2183 {2184 r_read_fsm = READ_DIR_LOCK;2185 m_cpt_read_fsm_n_dir_lock++;2186 }2167 if(m_debug) 2168 std::cout << " <MEMC " << name() << " READ_IDLE> Read request" 2169 << " : address = " << std::hex << m_cmd_read_addr_fifo.read() 2170 << " / srcid = " << m_cmd_read_srcid_fifo.read() 2171 << " / trdid = " << m_cmd_read_trdid_fifo.read() 2172 << " / pktid = " << m_cmd_read_pktid_fifo.read() 2173 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 2174 #endif 2175 r_read_fsm = READ_DIR_REQ; 2176 } 2177 break; 2178 } 2179 ////////////////// 2180 case READ_DIR_REQ: // Get the lock to the directory 2181 { 2182 if(r_alloc_dir_fsm.read() == ALLOC_DIR_READ) 2183 { 2184 r_read_fsm = READ_DIR_LOCK; 2185 m_cpt_read_fsm_n_dir_lock++; 2186 } 2187 2187 2188 2188 #if DEBUG_MEMC_READ 2189 if(m_debug)2190 std::cout << " <MEMC " << name() << " READ_DIR_REQ> Requesting DIR lock " << std::endl;2191 #endif 2192 2193 m_cpt_read_fsm_dir_lock++;2194 2195 break;2196 }2197 2198 ///////////////////2199 case READ_DIR_LOCK: // check directory for hit / miss2200 {2201 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and2202 "MEMC ERROR in READ_DIR_LOCK state: Bad DIR allocation");2203 2204 size_t way = 0;2205 DirectoryEntry entry = m_cache_directory.read(m_cmd_read_addr_fifo.read(), way);2206 2207 // access the global table ONLY when we have an LL cmd2208 if((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL)2209 {2210 r_read_ll_key = m_llsc_table.ll(m_cmd_read_addr_fifo.read());2211 }2212 r_read_is_cnt = entry.is_cnt;2213 r_read_dirty = entry.dirty;2214 r_read_lock = entry.lock;2215 r_read_tag = entry.tag;2216 r_read_way = way;2217 r_read_count = entry.count;2218 r_read_copy = entry.owner.srcid;2219 r_read_copy_inst = entry.owner.inst;2220 r_read_ptr = entry.ptr; // pointer to the heap2221 2222 // check if this is a cached read, this means pktid is either2223 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding2224 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding2225 bool cached_read = (m_cmd_read_pktid_fifo.read() & 0x1);2226 if(entry.valid) // hit2227 {2228 // test if we need to register a new copy in the heap2229 if(entry.is_cnt or (entry.count == 0) or !cached_read)2230 {2231 r_read_fsm = READ_DIR_HIT;2232 }2233 else2234 {2235 r_read_fsm = READ_HEAP_REQ;2236 }2237 }2238 else // miss2239 {2240 r_read_fsm = READ_TRT_LOCK;2241 }2189 if(m_debug) 2190 std::cout << " <MEMC " << name() << " READ_DIR_REQ> Requesting DIR lock " << std::endl; 2191 #endif 2192 2193 m_cpt_read_fsm_dir_lock++; 2194 2195 break; 2196 } 2197 2198 /////////////////// 2199 case READ_DIR_LOCK: // check directory for hit / miss 2200 { 2201 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2202 "MEMC ERROR in READ_DIR_LOCK state: Bad DIR allocation"); 2203 2204 size_t way = 0; 2205 DirectoryEntry entry = m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 2206 2207 // access the global table ONLY when we have an LL cmd 2208 if((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) 2209 { 2210 r_read_ll_key = m_llsc_table.ll(m_cmd_read_addr_fifo.read()); 2211 } 2212 r_read_is_cnt = entry.is_cnt; 2213 r_read_dirty = entry.dirty; 2214 r_read_lock = entry.lock; 2215 r_read_tag = entry.tag; 2216 r_read_way = way; 2217 r_read_count = entry.count; 2218 r_read_copy = entry.owner.srcid; 2219 r_read_copy_inst = entry.owner.inst; 2220 r_read_ptr = entry.ptr; // pointer to the heap 2221 2222 // check if this is a cached read, this means pktid is either 2223 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 2224 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2225 bool cached_read = (m_cmd_read_pktid_fifo.read() & 0x1); 2226 if(entry.valid) // hit 2227 { 2228 // test if we need to register a new copy in the heap 2229 if(entry.is_cnt or (entry.count == 0) or !cached_read) 2230 { 2231 r_read_fsm = READ_DIR_HIT; 2232 } 2233 else 2234 { 2235 r_read_fsm = READ_HEAP_REQ; 2236 } 2237 } 2238 else // miss 2239 { 2240 r_read_fsm = READ_TRT_LOCK; 2241 } 2242 2242 2243 2243 #if DEBUG_MEMC_READ 2244 if(m_debug)2245 {2246 std::cout << " <MEMC " << name() << " READ_DIR_LOCK> Accessing directory: "2247 << " address = " << std::hex << m_cmd_read_addr_fifo.read()2248 << " / hit = " << std::dec << entry.valid2249 << " / count = " <<std::dec << entry.count2250 << " / is_cnt = " << entry.is_cnt;2251 if((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) std::cout << " / LL access" << std::endl;2252 else std::cout << std::endl;2253 }2254 #endif 2255 break;2256 }2257 //////////////////2258 case READ_DIR_HIT: // read data in cache & update the directory2259 2260 2261 2262 2263 2264 {2265 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and2266 "MEMC ERROR in READ_DIR_HIT state: Bad DIR allocation");2267 2268 // check if this is an instruction read, this means pktid is either2269 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding2270 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding2271 bool inst_read = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0);2272 // check if this is a cached read, this means pktid is either2273 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding2274 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding2275 bool cached_read = (m_cmd_read_pktid_fifo.read() & 0x1);2276 bool is_cnt = r_read_is_cnt.read();2277 2278 // read data in the cache2279 size_t set = m_y[(addr_t)(m_cmd_read_addr_fifo.read())];2280 size_t way = r_read_way.read();2281 2282 m_cache_data.read_line(way, set, r_read_data);2283 2284 // update the cache directory2285 DirectoryEntry entry;2286 entry.valid = true;2287 entry.is_cnt = is_cnt;2288 entry.dirty = r_read_dirty.read();2289 entry.tag = r_read_tag.read();2290 entry.lock = r_read_lock.read();2291 entry.ptr = r_read_ptr.read();2292 2293 /*ODCCP*/ // if pktid = 0x9 that means read on no coherent line2294 if(m_cmd_read_pktid_fifo.read() == 0x9){2295 entry.coherent = false;2296 }2297 else{2298 entry.coherent = true;2299 }2300 2301 if(cached_read) // Cached read => we must update the copies2302 {2303 if(!is_cnt) // Not counter mode2304 {2305 entry.owner.srcid = m_cmd_read_srcid_fifo.read();2306 entry.owner.inst = inst_read;2307 entry.count = r_read_count.read() + 1;2308 }2309 else // Counter mode2310 {2311 entry.owner.srcid = 0;2312 entry.owner.inst = false;2313 entry.count = r_read_count.read() + 1;2314 }2315 }2316 else // Uncached read2317 {2318 entry.owner.srcid = r_read_copy.read();2319 entry.owner.inst = r_read_copy_inst.read();2320 entry.count = r_read_count.read();2321 }2244 if(m_debug) 2245 { 2246 std::cout << " <MEMC " << name() << " READ_DIR_LOCK> Accessing directory: " 2247 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 2248 << " / hit = " << std::dec << entry.valid 2249 << " / count = " <<std::dec << entry.count 2250 << " / is_cnt = " << entry.is_cnt; 2251 if((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) std::cout << " / LL access" << std::endl; 2252 else std::cout << std::endl; 2253 } 2254 #endif 2255 break; 2256 } 2257 ////////////////// 2258 case READ_DIR_HIT: // read data in cache & update the directory 2259 // we enter this state in 3 cases: 2260 // - the read request is uncachable 2261 // - the cache line is in counter mode 2262 // - the cache line is valid but not replicated 2263 2264 { 2265 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2266 "MEMC ERROR in READ_DIR_HIT state: Bad DIR allocation"); 2267 2268 // check if this is an instruction read, this means pktid is either 2269 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 2270 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2271 bool inst_read = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2272 // check if this is a cached read, this means pktid is either 2273 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 2274 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2275 bool cached_read = (m_cmd_read_pktid_fifo.read() & 0x1); 2276 bool is_cnt = r_read_is_cnt.read(); 2277 2278 // read data in the cache 2279 size_t set = m_y[(addr_t)(m_cmd_read_addr_fifo.read())]; 2280 size_t way = r_read_way.read(); 2281 2282 m_cache_data.read_line(way, set, r_read_data); 2283 2284 // update the cache directory 2285 DirectoryEntry entry; 2286 entry.valid = true; 2287 entry.is_cnt = is_cnt; 2288 entry.dirty = r_read_dirty.read(); 2289 entry.tag = r_read_tag.read(); 2290 entry.lock = r_read_lock.read(); 2291 entry.ptr = r_read_ptr.read(); 2292 2293 /*ODCCP*/ // if pktid = 0x9 that means read on no coherent line 2294 if(m_cmd_read_pktid_fifo.read() == 0x9){ 2295 entry.coherent = false; 2296 } 2297 else{ 2298 entry.coherent = true; 2299 } 2300 2301 if(cached_read) // Cached read => we must update the copies 2302 { 2303 if(!is_cnt) // Not counter mode 2304 { 2305 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2306 entry.owner.inst = inst_read; 2307 entry.count = r_read_count.read() + 1; 2308 } 2309 else // Counter mode 2310 { 2311 entry.owner.srcid = 0; 2312 entry.owner.inst = false; 2313 entry.count = r_read_count.read() + 1; 2314 } 2315 } 2316 else // Uncached read 2317 { 2318 entry.owner.srcid = r_read_copy.read(); 2319 entry.owner.inst = r_read_copy_inst.read(); 2320 entry.count = r_read_count.read(); 2321 } 2322 2322 2323 2323 #if DEBUG_MEMC_READ 2324 if(m_debug)2325 std::cout << " <MEMC " << name() << " READ_DIR_HIT> Update directory entry:"2326 << " addr = " << std::hex << m_cmd_read_addr_fifo.read()2327 << " / set = " << std::dec << set2328 << " / way = " << way2329 << " / owner_id = " << std::hex << entry.owner.srcid2330 << " / owner_ins = " << std::dec << entry.owner.inst2331 << " / count = " << entry.count2332 << " / is_cnt = " << entry.is_cnt << std::endl;2333 #endif 2334 /*if(m_monitor_ok)2335 {2336 char buf[80];2337 snprintf(buf, 80, "READ_DIR_HIT srcid %d, ins %d",2338 (int)m_cmd_read_srcid_fifo.read(),2339 (int)((m_cmd_read_pktid_fifo.read()&0x2)!=0));2340 check_monitor(m_cmd_read_addr_fifo.read(), r_read_data[0], true);2341 }*/2342 m_cache_directory.write(set, way, entry);2343 r_read_fsm = READ_RSP;2344 break;2345 }2346 ///////////////////2347 case READ_HEAP_REQ: // Get the lock to the HEAP directory2348 {2349 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ)2350 {2351 r_read_fsm = READ_HEAP_LOCK;2352 m_cpt_read_fsm_n_heap_lock++;2353 }2324 if(m_debug) 2325 std::cout << " <MEMC " << name() << " READ_DIR_HIT> Update directory entry:" 2326 << " addr = " << std::hex << m_cmd_read_addr_fifo.read() 2327 << " / set = " << std::dec << set 2328 << " / way = " << way 2329 << " / owner_id = " << std::hex << entry.owner.srcid 2330 << " / owner_ins = " << std::dec << entry.owner.inst 2331 << " / count = " << entry.count 2332 << " / is_cnt = " << entry.is_cnt << std::endl; 2333 #endif 2334 /*if(m_monitor_ok) 2335 { 2336 char buf[80]; 2337 snprintf(buf, 80, "READ_DIR_HIT srcid %d, ins %d", 2338 (int)m_cmd_read_srcid_fifo.read(), 2339 (int)((m_cmd_read_pktid_fifo.read()&0x2)!=0)); 2340 check_monitor(m_cmd_read_addr_fifo.read(), r_read_data[0], true); 2341 }*/ 2342 m_cache_directory.write(set, way, entry); 2343 r_read_fsm = READ_RSP; 2344 break; 2345 } 2346 /////////////////// 2347 case READ_HEAP_REQ: // Get the lock to the HEAP directory 2348 { 2349 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2350 { 2351 r_read_fsm = READ_HEAP_LOCK; 2352 m_cpt_read_fsm_n_heap_lock++; 2353 } 2354 2354 2355 2355 #if DEBUG_MEMC_READ 2356 if(m_debug)2357 std::cout << " <MEMC " << name() << " READ_HEAP_REQ>"2358 << " Requesting HEAP lock " << std::endl;2359 #endif 2360 2361 m_cpt_read_fsm_heap_lock++;2362 2363 break;2364 }2365 2366 ////////////////////2367 case READ_HEAP_LOCK: // read data in cache, update the directory2368 2369 {2370 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ)2371 {2372 // enter counter mode when we reach the limit of copies or the heap is full2373 bool go_cnt = (r_read_count.read() >= m_max_copies) or m_heap.is_full();2374 2375 // read data in the cache2376 size_t set = m_y[(addr_t)(m_cmd_read_addr_fifo.read())];2377 size_t way = r_read_way.read();2378 2379 m_cache_data.read_line(way, set, r_read_data);2380 2381 // update the cache directory2382 DirectoryEntry entry;2383 entry.valid = true;2384 entry.is_cnt = go_cnt;2385 entry.dirty = r_read_dirty.read();2386 entry.tag = r_read_tag.read();2387 entry.lock = r_read_lock.read();2388 entry.count = r_read_count.read() + 1;2389 2390 if(not go_cnt) // Not entering counter mode2391 {2392 entry.owner.srcid = r_read_copy.read();2393 entry.owner.inst = r_read_copy_inst.read();2394 entry.ptr = m_heap.next_free_ptr(); // set pointer on the heap2395 }2396 else // Entering Counter mode2397 {2398 entry.owner.srcid = 0;2399 entry.owner.inst = false;2400 entry.ptr = 0;2401 }2402 2403 m_cache_directory.write(set, way, entry);2404 2405 // prepare the heap update (add an entry, or clear the linked list)2406 if(not go_cnt) // not switching to counter mode2407 {2408 // We test if the next free entry in the heap is the last2409 HeapEntry heap_entry = m_heap.next_free_entry();2410 r_read_next_ptr = heap_entry.next;2411 r_read_last_free = (heap_entry.next == m_heap.next_free_ptr());2412 2413 r_read_fsm = READ_HEAP_WRITE; // add an entry in the HEAP2414 }2415 else // switching to counter mode2416 {2417 if(r_read_count.read() >1) // heap must be cleared2418 {2419 HeapEntry next_entry = m_heap.read(r_read_ptr.read());2420 r_read_next_ptr = m_heap.next_free_ptr();2421 m_heap.write_free_ptr(r_read_ptr.read());2422 2423 if(next_entry.next == r_read_ptr.read()) // last entry2424 {2425 r_read_fsm = READ_HEAP_LAST; // erase the entry2426 }2427 else // not the last entry2428 {2429 r_read_ptr = next_entry.next;2430 r_read_fsm = READ_HEAP_ERASE; // erase the list2431 }2432 }2433 else // the heap is not used / nothing to do2434 {2435 r_read_fsm = READ_RSP;2436 }2437 }2356 if(m_debug) 2357 std::cout << " <MEMC " << name() << " READ_HEAP_REQ>" 2358 << " Requesting HEAP lock " << std::endl; 2359 #endif 2360 2361 m_cpt_read_fsm_heap_lock++; 2362 2363 break; 2364 } 2365 2366 //////////////////// 2367 case READ_HEAP_LOCK: // read data in cache, update the directory 2368 // and prepare the HEAP update 2369 { 2370 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2371 { 2372 // enter counter mode when we reach the limit of copies or the heap is full 2373 bool go_cnt = (r_read_count.read() >= m_max_copies) or m_heap.is_full(); 2374 2375 // read data in the cache 2376 size_t set = m_y[(addr_t)(m_cmd_read_addr_fifo.read())]; 2377 size_t way = r_read_way.read(); 2378 2379 m_cache_data.read_line(way, set, r_read_data); 2380 2381 // update the cache directory 2382 DirectoryEntry entry; 2383 entry.valid = true; 2384 entry.is_cnt = go_cnt; 2385 entry.dirty = r_read_dirty.read(); 2386 entry.tag = r_read_tag.read(); 2387 entry.lock = r_read_lock.read(); 2388 entry.count = r_read_count.read() + 1; 2389 2390 if(not go_cnt) // Not entering counter mode 2391 { 2392 entry.owner.srcid = r_read_copy.read(); 2393 entry.owner.inst = r_read_copy_inst.read(); 2394 entry.ptr = m_heap.next_free_ptr(); // set pointer on the heap 2395 } 2396 else // Entering Counter mode 2397 { 2398 entry.owner.srcid = 0; 2399 entry.owner.inst = false; 2400 entry.ptr = 0; 2401 } 2402 2403 m_cache_directory.write(set, way, entry); 2404 2405 // prepare the heap update (add an entry, or clear the linked list) 2406 if(not go_cnt) // not switching to counter mode 2407 { 2408 // We test if the next free entry in the heap is the last 2409 HeapEntry heap_entry = m_heap.next_free_entry(); 2410 r_read_next_ptr = heap_entry.next; 2411 r_read_last_free = (heap_entry.next == m_heap.next_free_ptr()); 2412 2413 r_read_fsm = READ_HEAP_WRITE; // add an entry in the HEAP 2414 } 2415 else // switching to counter mode 2416 { 2417 if(r_read_count.read() >1) // heap must be cleared 2418 { 2419 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 2420 r_read_next_ptr = m_heap.next_free_ptr(); 2421 m_heap.write_free_ptr(r_read_ptr.read()); 2422 2423 if(next_entry.next == r_read_ptr.read()) // last entry 2424 { 2425 r_read_fsm = READ_HEAP_LAST; // erase the entry 2426 } 2427 else // not the last entry 2428 { 2429 r_read_ptr = next_entry.next; 2430 r_read_fsm = READ_HEAP_ERASE; // erase the list 2431 } 2432 } 2433 else // the heap is not used / nothing to do 2434 { 2435 r_read_fsm = READ_RSP; 2436 } 2437 } 2438 2438 2439 2439 #if DEBUG_MEMC_READ 2440 if(m_debug)2441 std::cout << " <MEMC " << name() << " READ_HEAP_LOCK> Update directory:"2442 << " tag = " << std::hex << entry.tag2443 << " set = " << std::dec << set2444 << " way = " << way2445 << " count = " << entry.count2446 << " is_cnt = " << entry.is_cnt << std::endl;2447 #endif 2448 }2449 else2450 {2451 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_LOCK"2452 << "Bad HEAP allocation" << std::endl;2453 exit(0);2454 }2455 break;2456 }2457 /////////////////////2458 case READ_HEAP_WRITE: // add an entry in the heap2459 {2460 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ)2461 {2462 HeapEntry heap_entry;2463 heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read();2464 heap_entry.owner.inst = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0);2465 2466 if(r_read_count.read() == 1) // creation of a new linked list2467 {2468 heap_entry.next = m_heap.next_free_ptr();2469 }2470 else // head insertion in existing list2471 {2472 heap_entry.next = r_read_ptr.read();2473 }2474 m_heap.write_free_entry(heap_entry);2475 m_heap.write_free_ptr(r_read_next_ptr.read());2476 if(r_read_last_free.read()) m_heap.set_full();2477 2478 r_read_fsm = READ_RSP;2440 if(m_debug) 2441 std::cout << " <MEMC " << name() << " READ_HEAP_LOCK> Update directory:" 2442 << " tag = " << std::hex << entry.tag 2443 << " set = " << std::dec << set 2444 << " way = " << way 2445 << " count = " << entry.count 2446 << " is_cnt = " << entry.is_cnt << std::endl; 2447 #endif 2448 } 2449 else 2450 { 2451 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_LOCK" 2452 << "Bad HEAP allocation" << std::endl; 2453 exit(0); 2454 } 2455 break; 2456 } 2457 ///////////////////// 2458 case READ_HEAP_WRITE: // add an entry in the heap 2459 { 2460 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2461 { 2462 HeapEntry heap_entry; 2463 heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2464 heap_entry.owner.inst = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2465 2466 if(r_read_count.read() == 1) // creation of a new linked list 2467 { 2468 heap_entry.next = m_heap.next_free_ptr(); 2469 } 2470 else // head insertion in existing list 2471 { 2472 heap_entry.next = r_read_ptr.read(); 2473 } 2474 m_heap.write_free_entry(heap_entry); 2475 m_heap.write_free_ptr(r_read_next_ptr.read()); 2476 if(r_read_last_free.read()) m_heap.set_full(); 2477 2478 r_read_fsm = READ_RSP; 2479 2479 2480 2480 #if DEBUG_MEMC_READ 2481 if(m_debug)2482 std::cout << " <MEMC " << name() << " READ_HEAP_WRITE> Add an entry in the heap:"2483 << " owner_id = " << std::hex << heap_entry.owner.srcid2484 << " owner_ins = " << std::dec << heap_entry.owner.inst << std::endl;2485 #endif 2486 }2487 else2488 {2489 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_WRITE"2490 << "Bad HEAP allocation" << std::endl;2491 exit(0);2492 }2493 break;2494 }2495 /////////////////////2496 case READ_HEAP_ERASE:2497 {2498 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ)2499 {2500 HeapEntry next_entry = m_heap.read(r_read_ptr.read());2501 if(next_entry.next == r_read_ptr.read())2502 {2503 r_read_fsm = READ_HEAP_LAST;2504 }2505 else2506 {2507 r_read_ptr = next_entry.next;2508 r_read_fsm = READ_HEAP_ERASE;2509 }2510 }2511 else2512 {2513 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_ERASE"2514 << "Bad HEAP allocation" << std::endl;2515 exit(0);2516 }2517 break;2518 }2519 2520 ////////////////////2521 case READ_HEAP_LAST:2522 {2523 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ)2524 {2525 HeapEntry last_entry;2526 last_entry.owner.srcid = 0;2527 last_entry.owner.inst = false;2528 2529 if(m_heap.is_full())2530 {2531 last_entry.next = r_read_ptr.read();2532 m_heap.unset_full();2533 }2534 else2535 {2536 last_entry.next = r_read_next_ptr.read();2537 }2538 m_heap.write(r_read_ptr.read(),last_entry);2539 r_read_fsm = READ_RSP;2540 }2541 else2542 {2543 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_LAST"2544 << "Bad HEAP allocation" << std::endl;2545 exit(0);2546 }2547 break;2548 }2549 //////////////2550 case READ_RSP: // request the TGT_RSP FSM to return data2551 {2552 if(!r_read_to_tgt_rsp_req)2553 {2554 for(size_t i=0 ; i<m_words ; i++) r_read_to_tgt_rsp_data[i] = r_read_data[i];2555 r_read_to_tgt_rsp_word = m_x[(addr_t) m_cmd_read_addr_fifo.read()];2556 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read();2557 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read();2558 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read();2559 r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read();2560 r_read_to_tgt_rsp_ll_key = r_read_ll_key.read();2561 cmd_read_fifo_get = true;2562 r_read_to_tgt_rsp_req = true;2563 r_read_fsm = READ_IDLE;2481 if(m_debug) 2482 std::cout << " <MEMC " << name() << " READ_HEAP_WRITE> Add an entry in the heap:" 2483 << " owner_id = " << std::hex << heap_entry.owner.srcid 2484 << " owner_ins = " << std::dec << heap_entry.owner.inst << std::endl; 2485 #endif 2486 } 2487 else 2488 { 2489 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_WRITE" 2490 << "Bad HEAP allocation" << std::endl; 2491 exit(0); 2492 } 2493 break; 2494 } 2495 ///////////////////// 2496 case READ_HEAP_ERASE: 2497 { 2498 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2499 { 2500 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 2501 if(next_entry.next == r_read_ptr.read()) 2502 { 2503 r_read_fsm = READ_HEAP_LAST; 2504 } 2505 else 2506 { 2507 r_read_ptr = next_entry.next; 2508 r_read_fsm = READ_HEAP_ERASE; 2509 } 2510 } 2511 else 2512 { 2513 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_ERASE" 2514 << "Bad HEAP allocation" << std::endl; 2515 exit(0); 2516 } 2517 break; 2518 } 2519 2520 //////////////////// 2521 case READ_HEAP_LAST: 2522 { 2523 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2524 { 2525 HeapEntry last_entry; 2526 last_entry.owner.srcid = 0; 2527 last_entry.owner.inst = false; 2528 2529 if(m_heap.is_full()) 2530 { 2531 last_entry.next = r_read_ptr.read(); 2532 m_heap.unset_full(); 2533 } 2534 else 2535 { 2536 last_entry.next = r_read_next_ptr.read(); 2537 } 2538 m_heap.write(r_read_ptr.read(),last_entry); 2539 r_read_fsm = READ_RSP; 2540 } 2541 else 2542 { 2543 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_LAST" 2544 << "Bad HEAP allocation" << std::endl; 2545 exit(0); 2546 } 2547 break; 2548 } 2549 ////////////// 2550 case READ_RSP: // request the TGT_RSP FSM to return data 2551 { 2552 if(!r_read_to_tgt_rsp_req) 2553 { 2554 for(size_t i=0 ; i<m_words ; i++) r_read_to_tgt_rsp_data[i] = r_read_data[i]; 2555 r_read_to_tgt_rsp_word = m_x[(addr_t) m_cmd_read_addr_fifo.read()]; 2556 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read(); 2557 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read(); 2558 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); 2559 r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read(); 2560 r_read_to_tgt_rsp_ll_key = r_read_ll_key.read(); 2561 cmd_read_fifo_get = true; 2562 r_read_to_tgt_rsp_req = true; 2563 r_read_fsm = READ_IDLE; 2564 2564 2565 2565 #if DEBUG_MEMC_READ 2566 if(m_debug)2567 std::cout << " <MEMC " << name() << " READ_RSP> Request TGT_RSP FSM to return data:"2568 << " rsrcid = " << std::hex << m_cmd_read_srcid_fifo.read()2569 << " / address = " << std::hex << m_cmd_read_addr_fifo.read()2570 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl;2571 #endif 2572 }2573 break;2574 }2575 ///////////////////2576 case READ_TRT_LOCK: // read miss : check the Transaction Table2577 {2578 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ)2579 {2580 size_t index = 0;2581 size_t index_write = 0;2582 addr_t addr = (addr_t) m_cmd_read_addr_fifo.read();2583 bool hit_read = m_trt.hit_read(m_nline[addr], index);2566 if(m_debug) 2567 std::cout << " <MEMC " << name() << " READ_RSP> Request TGT_RSP FSM to return data:" 2568 << " rsrcid = " << std::hex << m_cmd_read_srcid_fifo.read() 2569 << " / address = " << std::hex << m_cmd_read_addr_fifo.read() 2570 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 2571 #endif 2572 } 2573 break; 2574 } 2575 /////////////////// 2576 case READ_TRT_LOCK: // read miss : check the Transaction Table 2577 { 2578 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 2579 { 2580 size_t index = 0; 2581 size_t index_write = 0; 2582 addr_t addr = (addr_t) m_cmd_read_addr_fifo.read(); 2583 bool hit_read = m_trt.hit_read(m_nline[addr], index); 2584 2584 #if ODCCP_NON_INCLUSIVE 2585 bool hit_write = (m_trt.hit_write(m_nline[addr], &index_write) or2585 bool hit_write = (m_trt.hit_write(m_nline[addr], &index_write) or 2586 2586 ((r_cleanup_to_ixr_cmd_nline.read() == m_nline[addr]) and r_cleanup_to_ixr_cmd_req.read())); 2587 2587 #else 2588 bool hit_write = m_trt.hit_write(m_nline[addr], &index_write); 2589 #endif 2590 bool wok = !m_trt.full(index); 2591 2592 if(hit_read or !wok or hit_write) // missing line already requested or no space 2588 bool hit_write = m_trt.hit_write(m_nline[addr], &index_write); 2589 #endif 2590 bool wok = !m_trt.full(index); 2591 2592 if(hit_read or !wok or hit_write) // missing line already requested or no space 2593 { 2594 if(!wok) m_cpt_trt_full++; 2595 if(hit_read or hit_write) m_cpt_trt_rb++; 2596 r_read_fsm = READ_IDLE; 2597 } 2598 else // missing line is requested to the XRAM 2599 { 2600 m_cpt_read_miss++; 2601 r_read_trt_index = index; 2602 r_read_fsm = READ_TRT_SET; 2603 } 2604 2605 #if DEBUG_MEMC_READ 2606 if(m_debug) 2607 std::cout << " <MEMC " << name() << " READ_TRT_LOCK> Check TRT:" 2608 << " hit_read = " << hit_read 2609 << " / hit_write = " << hit_write 2610 << " / Index = " << index_write 2611 << " / full = " << !wok << std::endl; 2612 m_cpt_read_fsm_n_trt_lock++; 2613 #endif 2614 } 2615 2616 m_cpt_read_fsm_trt_lock++; 2617 2618 break; 2619 } 2620 ////////////////// 2621 case READ_TRT_SET: // register get transaction in TRT 2622 { 2623 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 2624 { 2625 m_trt.set( r_read_trt_index.read(), 2626 true, // GET 2627 m_nline[(addr_t)(m_cmd_read_addr_fifo.read())], 2628 m_cmd_read_srcid_fifo.read(), 2629 m_cmd_read_trdid_fifo.read(), 2630 m_cmd_read_pktid_fifo.read(), 2631 true, // proc read 2632 m_cmd_read_length_fifo.read(), 2633 m_x[(addr_t)(m_cmd_read_addr_fifo.read())], 2634 std::vector<be_t> (m_words,0), 2635 std::vector<data_t> (m_words,0), 2636 r_read_ll_key.read() ); 2637 #if DEBUG_MEMC_READ 2638 if(m_debug) 2639 std::cout << " <MEMC " << name() << " READ_TRT_SET> Set a GET in TRT:" 2640 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 2641 << " / srcid = " << std::hex << m_cmd_read_srcid_fifo.read() << std::endl; 2642 #endif 2643 r_read_fsm = READ_TRT_REQ; 2644 } 2645 break; 2646 } 2647 2648 ////////////////// 2649 case READ_TRT_REQ: // consume the read request in FIFO and send it to IXR_CMD_FSM 2650 { 2651 if(not r_read_to_ixr_cmd_req) 2652 { 2653 cmd_read_fifo_get = true; 2654 r_read_to_ixr_cmd_req = true; 2655 r_read_to_ixr_cmd_index = r_read_trt_index.read(); 2656 r_read_fsm = READ_IDLE; 2657 2658 #if DEBUG_MEMC_READ 2659 if(m_debug) 2660 std::cout << " <MEMC " << name() << " READ_TRT_REQ> Request GET transaction for address " 2661 << std::hex << m_cmd_read_addr_fifo.read() << std::endl; 2662 #endif 2663 } 2664 break; 2665 } 2666 } // end switch read_fsm 2667 2668 /////////////////////////////////////////////////////////////////////////////////// 2669 // WRITE FSM 2670 /////////////////////////////////////////////////////////////////////////////////// 2671 // The WRITE FSM handles the write bursts and sc requests sent by the processors. 2672 // All addresses in a burst must be in the same cache line. 2673 // A complete write burst is consumed in the FIFO & copied to a local buffer. 2674 // Then the FSM takes the lock protecting the cache directory, to check 2675 // if the line is in the cache. 2676 // 2677 // - In case of HIT, the cache is updated. 2678 // If there is no other copy, an acknowledge response is immediately 2679 // returned to the writing processor. 2680 // If the data is cached by other processors, a coherence transaction must 2681 // be launched (sc requests always require a coherence transaction): 2682 // It is a multicast update if the line is not in counter mode: the processor 2683 // takes the lock protecting the Update Table (UPT) to register this transaction. 2684 // If the UPT is full, it releases the lock(s) and retry. Then, it sends 2685 // a multi-update request to all owners of the line (but the writer), 2686 // through the CC_SEND FSM. In case of coherence transaction, the WRITE FSM 2687 // does not respond to the writing processor, as this response will be sent by 2688 // the MULTI_ACK FSM when all update responses have been received. 2689 // It is a broadcast invalidate if the line is in counter mode: The line 2690 // should be erased in memory cache, and written in XRAM with a PUT transaction, 2691 // after registration in TRT. 2692 // 2693 // - In case of MISS, the WRITE FSM takes the lock protecting the transaction 2694 // table (TRT). If a read transaction to the XRAM for this line already exists, 2695 // it writes in the TRT (write buffer). Otherwise, if a TRT entry is free, 2696 // the WRITE FSM register a new transaction in TRT, and sends a GET request 2697 // to the XRAM. If the TRT is full, it releases the lock, and waits. 2698 // Finally, the WRITE FSM returns an aknowledge response to the writing processor. 2699 ///////////////////////////////////////////////////////////////////////////////////// 2700 2701 //std::cout << std::endl << "write_fsm" << std::endl; 2702 2703 switch(r_write_fsm.read()) 2593 2704 { 2594 if(!wok) m_cpt_trt_full++; 2595 if(hit_read or hit_write) m_cpt_trt_rb++; 2596 r_read_fsm = READ_IDLE; 2597 } 2598 else // missing line is requested to the XRAM 2599 { 2600 m_cpt_read_miss++; 2601 r_read_trt_index = index; 2602 r_read_fsm = READ_TRT_SET; 2603 } 2604 2605 #if DEBUG_MEMC_READ 2606 if(m_debug) 2607 std::cout << " <MEMC " << name() << " READ_TRT_LOCK> Check TRT:" 2608 << " hit_read = " << hit_read 2609 << " / hit_write = " << hit_write 2610 << " / Index = " << index_write 2611 << " / full = " << !wok << std::endl; 2612 m_cpt_read_fsm_n_trt_lock++; 2613 #endif 2614 } 2615 2616 m_cpt_read_fsm_trt_lock++; 2617 2618 break; 2619 } 2620 ////////////////// 2621 case READ_TRT_SET: // register get transaction in TRT 2622 { 2623 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 2624 { 2625 m_trt.set( r_read_trt_index.read(), 2626 true, // GET 2627 m_nline[(addr_t)(m_cmd_read_addr_fifo.read())], 2628 m_cmd_read_srcid_fifo.read(), 2629 m_cmd_read_trdid_fifo.read(), 2630 m_cmd_read_pktid_fifo.read(), 2631 true, // proc read 2632 m_cmd_read_length_fifo.read(), 2633 m_x[(addr_t)(m_cmd_read_addr_fifo.read())], 2634 std::vector<be_t> (m_words,0), 2635 std::vector<data_t> (m_words,0), 2636 r_read_ll_key.read() ); 2637 #if DEBUG_MEMC_READ 2638 if(m_debug) 2639 std::cout << " <MEMC " << name() << " READ_TRT_SET> Set a GET in TRT:" 2640 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 2641 << " / srcid = " << std::hex << m_cmd_read_srcid_fifo.read() << std::endl; 2642 #endif 2643 r_read_fsm = READ_TRT_REQ; 2644 } 2645 break; 2646 } 2647 2648 ////////////////// 2649 case READ_TRT_REQ: // consume the read request in FIFO and send it to IXR_CMD_FSM 2650 { 2651 if(not r_read_to_ixr_cmd_req) 2652 { 2653 cmd_read_fifo_get = true; 2654 r_read_to_ixr_cmd_req = true; 2655 r_read_to_ixr_cmd_index = r_read_trt_index.read(); 2656 r_read_fsm = READ_IDLE; 2657 2658 #if DEBUG_MEMC_READ 2659 if(m_debug) 2660 std::cout << " <MEMC " << name() << " READ_TRT_REQ> Request GET transaction for address " 2661 << std::hex << m_cmd_read_addr_fifo.read() << std::endl; 2662 #endif 2663 } 2664 break; 2665 } 2666 } // end switch read_fsm 2667 2668 /////////////////////////////////////////////////////////////////////////////////// 2669 // WRITE FSM 2670 /////////////////////////////////////////////////////////////////////////////////// 2671 // The WRITE FSM handles the write bursts and sc requests sent by the processors. 2672 // All addresses in a burst must be in the same cache line. 2673 // A complete write burst is consumed in the FIFO & copied to a local buffer. 2674 // Then the FSM takes the lock protecting the cache directory, to check 2675 // if the line is in the cache. 2676 // 2677 // - In case of HIT, the cache is updated. 2678 // If there is no other copy, an acknowledge response is immediately 2679 // returned to the writing processor. 2680 // If the data is cached by other processors, a coherence transaction must 2681 // be launched (sc requests always require a coherence transaction): 2682 // It is a multicast update if the line is not in counter mode: the processor 2683 // takes the lock protecting the Update Table (UPT) to register this transaction. 2684 // If the UPT is full, it releases the lock(s) and retry. Then, it sends 2685 // a multi-update request to all owners of the line (but the writer), 2686 // through the CC_SEND FSM. In case of coherence transaction, the WRITE FSM 2687 // does not respond to the writing processor, as this response will be sent by 2688 // the MULTI_ACK FSM when all update responses have been received. 2689 // It is a broadcast invalidate if the line is in counter mode: The line 2690 // should be erased in memory cache, and written in XRAM with a PUT transaction, 2691 // after registration in TRT. 2692 // 2693 // - In case of MISS, the WRITE FSM takes the lock protecting the transaction 2694 // table (TRT). If a read transaction to the XRAM for this line already exists, 2695 // it writes in the TRT (write buffer). Otherwise, if a TRT entry is free, 2696 // the WRITE FSM register a new transaction in TRT, and sends a GET request 2697 // to the XRAM. If the TRT is full, it releases the lock, and waits. 2698 // Finally, the WRITE FSM returns an aknowledge response to the writing processor. 2699 ///////////////////////////////////////////////////////////////////////////////////// 2700 2701 //std::cout << std::endl << "write_fsm" << std::endl; 2702 2703 switch(r_write_fsm.read()) 2704 { 2705 //////////////// 2706 case WRITE_IDLE: // copy first word of a write burst in local buffer 2707 { 2708 if(m_cmd_write_addr_fifo.rok()) 2709 { 2710 if((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 2711 { 2712 m_cpt_sc++; 2713 } 2714 else 2715 { 2716 m_cpt_write++; 2717 m_cpt_write_cells++; 2718 } 2719 2720 // consume a word in the FIFO & write it in the local buffer 2721 cmd_write_fifo_get = true; 2722 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 2723 2724 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 2725 r_write_word_index = index; 2726 r_write_word_count = 1; 2727 r_write_data[index] = m_cmd_write_data_fifo.read(); 2728 r_write_srcid = m_cmd_write_srcid_fifo.read(); 2729 r_write_trdid = m_cmd_write_trdid_fifo.read(); 2730 r_write_pktid = m_cmd_write_pktid_fifo.read(); 2731 r_write_pending_sc = false; 2732 2733 // initialize the be field for all words 2734 for(size_t word=0 ; word<m_words ; word++) 2735 { 2736 if(word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 2737 else r_write_be[word] = 0x0; 2738 } 2739 2740 if (m_cmd_write_eop_fifo.read() or ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC)) 2741 { 2742 r_write_fsm = WRITE_DIR_REQ; 2743 } 2744 else 2745 { 2746 r_write_fsm = WRITE_NEXT; 2747 } 2705 //////////////// 2706 case WRITE_IDLE: // copy first word of a write burst in local buffer 2707 { 2708 if(m_cmd_write_addr_fifo.rok()) 2709 { 2710 if((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 2711 { 2712 m_cpt_sc++; 2713 } 2714 else 2715 { 2716 m_cpt_write++; 2717 m_cpt_write_cells++; 2718 } 2719 2720 // consume a word in the FIFO & write it in the local buffer 2721 cmd_write_fifo_get = true; 2722 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 2723 2724 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 2725 r_write_word_index = index; 2726 r_write_word_count = 1; 2727 r_write_data[index] = m_cmd_write_data_fifo.read(); 2728 r_write_srcid = m_cmd_write_srcid_fifo.read(); 2729 r_write_trdid = m_cmd_write_trdid_fifo.read(); 2730 r_write_pktid = m_cmd_write_pktid_fifo.read(); 2731 r_write_pending_sc = false; 2732 2733 // initialize the be field for all words 2734 for(size_t word=0 ; word<m_words ; word++) 2735 { 2736 if(word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 2737 else r_write_be[word] = 0x0; 2738 } 2739 2740 if (m_cmd_write_eop_fifo.read() or ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC)) 2741 { 2742 r_write_fsm = WRITE_DIR_REQ; 2743 } 2744 else 2745 { 2746 r_write_fsm = WRITE_NEXT; 2747 } 2748 2748 2749 2749 #if DEBUG_MEMC_WRITE 2750 if(m_debug)2751 std::cout << " <MEMC " << name() << " WRITE_IDLE> Write request "2752 << " srcid = " << std::hex << m_cmd_write_srcid_fifo.read()2753 << " / address = " << std::hex << m_cmd_write_addr_fifo.read()2754 << " / data = " << m_cmd_write_data_fifo.read() << std::endl;2755 #endif 2756 }2757 break;2758 }2759 ////////////////2760 case WRITE_NEXT: // copy next word of a write burst in local buffer2761 {2762 if(m_cmd_write_addr_fifo.rok())2763 {2750 if(m_debug) 2751 std::cout << " <MEMC " << name() << " WRITE_IDLE> Write request " 2752 << " srcid = " << std::hex << m_cmd_write_srcid_fifo.read() 2753 << " / address = " << std::hex << m_cmd_write_addr_fifo.read() 2754 << " / data = " << m_cmd_write_data_fifo.read() << std::endl; 2755 #endif 2756 } 2757 break; 2758 } 2759 //////////////// 2760 case WRITE_NEXT: // copy next word of a write burst in local buffer 2761 { 2762 if(m_cmd_write_addr_fifo.rok()) 2763 { 2764 2764 2765 2765 #if DEBUG_MEMC_WRITE 2766 if(m_debug) 2767 std::cout << " <MEMC " << name() 2768 << " WRITE_NEXT> Write another word in local buffer" 2769 << std::endl; 2770 #endif 2771 m_cpt_write_cells++; 2772 2773 // check that the next word is in the same cache line 2774 assert( (m_nline[(addr_t)(r_write_address.read())] == 2775 m_nline[(addr_t)(m_cmd_write_addr_fifo.read())]) and 2776 "MEMC ERROR in WRITE_NEXT state: Illegal write burst"); 2777 2778 // consume a word in the FIFO & write it in the local buffer 2779 cmd_write_fifo_get = true; 2780 size_t index = r_write_word_index.read() + r_write_word_count.read(); 2781 2782 r_write_be[index] = m_cmd_write_be_fifo.read(); 2783 r_write_data[index] = m_cmd_write_data_fifo.read(); 2784 r_write_word_count = r_write_word_count.read() + 1; 2785 2786 if(m_cmd_write_eop_fifo.read()) r_write_fsm = WRITE_DIR_REQ; 2787 } 2788 break; 2789 } 2790 /////////////////// 2791 case WRITE_DIR_REQ: // Get the lock to the directory 2792 // and access the llsc_global_table 2793 { 2794 if( r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE ) 2795 { 2796 if(((r_write_pktid.read() & 0x7) == TYPE_SC) and not r_write_pending_sc.read()) 2797 { 2798 // We enter here if it is a new SC command 2799 // If r_write_pending_sc is set the SC is not new and has already been tested 2800 2801 if(not m_cmd_write_addr_fifo.rok()) break; 2802 2803 assert( m_cmd_write_eop_fifo.read() and 2804 "MEMC ERROR in WRITE_DIR_REQ state: invalid packet format for SC command"); 2805 2806 size_t index = r_write_word_index.read(); 2807 bool sc_success = m_llsc_table.sc(r_write_address.read() , 2808 r_write_data[index].read()); 2809 2810 // consume a word in the FIFO & write it in the local buffer 2811 cmd_write_fifo_get = true; 2812 r_write_data[index] = m_cmd_write_data_fifo.read(); 2813 r_write_sc_fail = not sc_success; 2814 r_write_pending_sc = true; 2815 2816 if(not sc_success) r_write_fsm = WRITE_RSP; 2817 else r_write_fsm = WRITE_DIR_LOCK; 2818 } 2819 else 2820 { 2821 // We enter here if it is a SW command or an already tested SC command 2822 2823 m_llsc_table.sw( m_nline[(addr_t)r_write_address.read()], 2824 r_write_word_index.read(), 2825 r_write_word_index.read() + r_write_word_count.read() ); 2826 2827 r_write_fsm = WRITE_DIR_LOCK; 2828 } 2829 } 2766 if(m_debug) 2767 std::cout << " <MEMC " << name() 2768 << " WRITE_NEXT> Write another word in local buffer" 2769 << std::endl; 2770 #endif 2771 m_cpt_write_cells++; 2772 2773 // check that the next word is in the same cache line 2774 assert( (m_nline[(addr_t)(r_write_address.read())] == 2775 m_nline[(addr_t)(m_cmd_write_addr_fifo.read())]) and 2776 "MEMC ERROR in WRITE_NEXT state: Illegal write burst"); 2777 2778 // consume a word in the FIFO & write it in the local buffer 2779 cmd_write_fifo_get = true; 2780 size_t index = r_write_word_index.read() + r_write_word_count.read(); 2781 2782 r_write_be[index] = m_cmd_write_be_fifo.read(); 2783 r_write_data[index] = m_cmd_write_data_fifo.read(); 2784 r_write_word_count = r_write_word_count.read() + 1; 2785 2786 if(m_cmd_write_eop_fifo.read()) r_write_fsm = WRITE_DIR_REQ; 2787 } 2788 break; 2789 } 2790 /////////////////// 2791 case WRITE_DIR_REQ: // Get the lock to the directory 2792 // and access the llsc_global_table 2793 { 2794 if( r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE ) 2795 { 2796 if(((r_write_pktid.read() & 0x7) == TYPE_SC) and not r_write_pending_sc.read()) 2797 { 2798 // We enter here if it is a new SC command 2799 // If r_write_pending_sc is set the SC is not new and has already been tested 2800 2801 if(not m_cmd_write_addr_fifo.rok()) break; 2802 2803 assert( m_cmd_write_eop_fifo.read() and 2804 "MEMC ERROR in WRITE_DIR_REQ state: invalid packet format for SC command"); 2805 2806 size_t index = r_write_word_index.read(); 2807 bool sc_success = m_llsc_table.sc(r_write_address.read() , 2808 r_write_data[index].read()); 2809 2810 // consume a word in the FIFO & write it in the local buffer 2811 cmd_write_fifo_get = true; 2812 r_write_data[index] = m_cmd_write_data_fifo.read(); 2813 r_write_sc_fail = not sc_success; 2814 r_write_pending_sc = true; 2815 2816 if(not sc_success) r_write_fsm = WRITE_RSP; 2817 else r_write_fsm = WRITE_DIR_LOCK; 2818 } 2819 else 2820 { 2821 // We enter here if it is a SW command or an already tested SC command 2822 2823 #define L2 soclib::common::uint32_log2 2824 addr_t min = r_write_address.read(); 2825 addr_t max = r_write_address.read() + 2826 ((r_write_word_count.read()-1) << L2(vci_param_int::B)); 2827 #undef L2 2828 2829 m_llsc_table.sw(min, max); 2830 2831 r_write_fsm = WRITE_DIR_LOCK; 2832 } 2833 } 2830 2834 2831 2835 #if DEBUG_MEMC_WRITE 2832 if(m_debug)2833 std::cout << " <MEMC " << name() << " WRITE_DIR_REQ> Requesting DIR lock "2834 << std::endl;2835 #endif 2836 break;2837 }2838 ////////////////////2839 case WRITE_DIR_LOCK: // access directory to check hit/miss2840 {2841 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and2842 "MEMC ERROR in ALLOC_DIR_LOCK state: Bad DIR allocation");2843 2844 size_t way = 0;2845 DirectoryEntry entry(m_cache_directory.read(r_write_address.read(), way));2846 2847 if(entry.valid) // hit2848 {2849 // copy directory entry in local buffer in case of hit2850 r_write_is_cnt = entry.is_cnt;2851 r_write_lock = entry.lock;2852 r_write_tag = entry.tag;2853 r_write_copy = entry.owner.srcid;2854 r_write_copy_inst = entry.owner.inst;2855 r_write_count = entry.count;2856 r_write_ptr = entry.ptr;2857 r_write_way = way;2858 2859 if(entry.is_cnt and entry.count) r_write_fsm = WRITE_BC_DIR_READ;2860 else r_write_fsm = WRITE_DIR_HIT;2861 }2862 else // miss2863 {2864 r_write_fsm = WRITE_MISS_TRT_LOCK;2865 }2836 if(m_debug) 2837 std::cout << " <MEMC " << name() << " WRITE_DIR_REQ> Requesting DIR lock " 2838 << std::endl; 2839 #endif 2840 break; 2841 } 2842 //////////////////// 2843 case WRITE_DIR_LOCK: // access directory to check hit/miss 2844 { 2845 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 2846 "MEMC ERROR in ALLOC_DIR_LOCK state: Bad DIR allocation"); 2847 2848 size_t way = 0; 2849 DirectoryEntry entry(m_cache_directory.read(r_write_address.read(), way)); 2850 2851 if(entry.valid) // hit 2852 { 2853 // copy directory entry in local buffer in case of hit 2854 r_write_is_cnt = entry.is_cnt; 2855 r_write_lock = entry.lock; 2856 r_write_tag = entry.tag; 2857 r_write_copy = entry.owner.srcid; 2858 r_write_copy_inst = entry.owner.inst; 2859 r_write_count = entry.count; 2860 r_write_ptr = entry.ptr; 2861 r_write_way = way; 2862 2863 if(entry.is_cnt and entry.count) r_write_fsm = WRITE_BC_DIR_READ; 2864 else r_write_fsm = WRITE_DIR_HIT; 2865 } 2866 else // miss 2867 { 2868 r_write_fsm = WRITE_MISS_TRT_LOCK; 2869 } 2866 2870 2867 2871 #if DEBUG_MEMC_WRITE 2868 if(m_debug)2869 {2870 std::cout << " <MEMC " << name() << " WRITE_DIR_LOCK> Check the directory: "2871 << " address = " << std::hex << r_write_address.read()2872 << " / hit = " << std::dec << entry.valid2873 << " / count = " << entry.count2874 << " / is_cnt = " << entry.is_cnt ;2875 if((r_write_pktid.read() & 0x7) == TYPE_SC)2876 std::cout << " / SC access" << std::endl;2877 else2878 std::cout << " / SW access" << std::endl;2879 }2880 #endif 2881 break;2882 }2883 ///////////////////2884 case WRITE_DIR_HIT: // update the cache directory with Dirty bit2885 2886 {2887 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and2888 "MEMC ERROR in ALLOC_DIR_HIT state: Bad DIR allocation");2889 2890 DirectoryEntry entry;2891 entry.valid = true;2892 entry.dirty = true;2893 entry.tag = r_write_tag.read();2894 entry.is_cnt = r_write_is_cnt.read();2895 entry.lock = r_write_lock.read();2896 entry.owner.srcid = r_write_copy.read();2897 entry.owner.inst = r_write_copy_inst.read();2898 entry.count = r_write_count.read();2899 entry.ptr = r_write_ptr.read();2900 2901 size_t set = m_y[(addr_t)(r_write_address.read())];2902 size_t way = r_write_way.read();2903 2904 // update directory2905 m_cache_directory.write(set, way, entry);2906 2907 // owner is true when the the first registered copy is the writer itself2908 bool owner = ( (r_write_copy.read() == r_write_srcid.read())2909 and not r_write_copy_inst.read() );2910 2911 // no_update is true when there is no need for coherence transaction2912 bool no_update = ( (r_write_count.read() == 0) or2913 (owner and (r_write_count.read() ==1) and2914 (r_write_pktid.read() != TYPE_SC)));2915 2916 // write data in the cache if no coherence transaction2917 if(no_update)2918 {2919 for(size_t word=0 ; word<m_words ; word++)2920 {2921 m_cache_data.write( way,2872 if(m_debug) 2873 { 2874 std::cout << " <MEMC " << name() << " WRITE_DIR_LOCK> Check the directory: " 2875 << " address = " << std::hex << r_write_address.read() 2876 << " / hit = " << std::dec << entry.valid 2877 << " / count = " << entry.count 2878 << " / is_cnt = " << entry.is_cnt ; 2879 if((r_write_pktid.read() & 0x7) == TYPE_SC) 2880 std::cout << " / SC access" << std::endl; 2881 else 2882 std::cout << " / SW access" << std::endl; 2883 } 2884 #endif 2885 break; 2886 } 2887 /////////////////// 2888 case WRITE_DIR_HIT: // update the cache directory with Dirty bit 2889 // and update data cache 2890 { 2891 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 2892 "MEMC ERROR in ALLOC_DIR_HIT state: Bad DIR allocation"); 2893 2894 DirectoryEntry entry; 2895 entry.valid = true; 2896 entry.dirty = true; 2897 entry.tag = r_write_tag.read(); 2898 entry.is_cnt = r_write_is_cnt.read(); 2899 entry.lock = r_write_lock.read(); 2900 entry.owner.srcid = r_write_copy.read(); 2901 entry.owner.inst = r_write_copy_inst.read(); 2902 entry.count = r_write_count.read(); 2903 entry.ptr = r_write_ptr.read(); 2904 2905 size_t set = m_y[(addr_t)(r_write_address.read())]; 2906 size_t way = r_write_way.read(); 2907 2908 // update directory 2909 m_cache_directory.write(set, way, entry); 2910 2911 // owner is true when the the first registered copy is the writer itself 2912 bool owner = ( (r_write_copy.read() == r_write_srcid.read()) 2913 and not r_write_copy_inst.read() ); 2914 2915 // no_update is true when there is no need for coherence transaction 2916 bool no_update = ( (r_write_count.read() == 0) or 2917 (owner and (r_write_count.read() ==1) and 2918 (r_write_pktid.read() != TYPE_SC))); 2919 2920 // write data in the cache if no coherence transaction 2921 if(no_update) 2922 { 2923 for(size_t word=0 ; word<m_words ; word++) 2924 { 2925 m_cache_data.write( way, 2922 2926 set, 2923 2927 word, 2924 2928 r_write_data[word].read(), 2925 2929 r_write_be[word].read()); 2926 }2927 }2928 2929 if(owner and not no_update and(r_write_pktid.read() != TYPE_SC))2930 {2931 r_write_count = r_write_count.read() - 1;2932 }2933 2934 if(no_update) // Write transaction completed2935 {2936 r_write_fsm = WRITE_RSP;2937 }2938 else // coherence update required2939 {2940 if(!r_write_to_cc_send_multi_req.read() and2941 !r_write_to_cc_send_brdcast_req.read())2942 {2943 r_write_fsm = WRITE_UPT_LOCK;2944 }2945 else2946 {2947 r_write_fsm = WRITE_WAIT;2948 }2949 }2930 } 2931 } 2932 2933 if(owner and not no_update and(r_write_pktid.read() != TYPE_SC)) 2934 { 2935 r_write_count = r_write_count.read() - 1; 2936 } 2937 2938 if(no_update) // Write transaction completed 2939 { 2940 r_write_fsm = WRITE_RSP; 2941 } 2942 else // coherence update required 2943 { 2944 if(!r_write_to_cc_send_multi_req.read() and 2945 !r_write_to_cc_send_brdcast_req.read()) 2946 { 2947 r_write_fsm = WRITE_UPT_LOCK; 2948 } 2949 else 2950 { 2951 r_write_fsm = WRITE_WAIT; 2952 } 2953 } 2950 2954 2951 2955 #if DEBUG_MEMC_WRITE 2952 if(m_debug)2953 {2954 if(no_update)2955 {2956 std::cout << " <MEMC " << name()2957 << " WRITE_DIR_HIT> Write into cache / No coherence transaction" << std::endl;2958 }2959 else2960 {2961 std::cout << " <MEMC " << name() << " WRITE_DIR_HIT> Coherence update required:"2962 << " is_cnt = " << r_write_is_cnt.read()2963 << " nb_copies = " << std::dec << r_write_count.read() << std::endl;2964 if(owner) std::cout << " ... but the first copy is the writer" << std::endl;2965 }2966 }2967 #endif 2968 break;2969 }2970 ////////////////////2971 case WRITE_UPT_LOCK: // Try to register the update request in UPT2972 {2973 if(r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE)2974 {2975 bool wok = false;2976 size_t index = 0;2977 size_t srcid = r_write_srcid.read();2978 size_t trdid = r_write_trdid.read();2979 size_t pktid = r_write_pktid.read();2980 addr_t nline = m_nline[(addr_t)(r_write_address.read())];2981 size_t nb_copies = r_write_count.read();2982 size_t set = m_y[(addr_t)(r_write_address.read())];2983 size_t way = r_write_way.read();2984 2985 wok = m_upt.set( true, // it's an update transaction2986 false, // it's not a broadcast2987 true, // response required2988 false, // no acknowledge required2989 srcid,2990 trdid,2991 pktid,2992 nline,2993 nb_copies,2994 index);2995 2996 if( wok ) // write data in cache2997 {2998 for(size_t word=0 ; word<m_words ; word++)2999 {3000 m_cache_data.write( way,2956 if(m_debug) 2957 { 2958 if(no_update) 2959 { 2960 std::cout << " <MEMC " << name() 2961 << " WRITE_DIR_HIT> Write into cache / No coherence transaction" << std::endl; 2962 } 2963 else 2964 { 2965 std::cout << " <MEMC " << name() << " WRITE_DIR_HIT> Coherence update required:" 2966 << " is_cnt = " << r_write_is_cnt.read() 2967 << " nb_copies = " << std::dec << r_write_count.read() << std::endl; 2968 if(owner) std::cout << " ... but the first copy is the writer" << std::endl; 2969 } 2970 } 2971 #endif 2972 break; 2973 } 2974 //////////////////// 2975 case WRITE_UPT_LOCK: // Try to register the update request in UPT 2976 { 2977 if(r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE) 2978 { 2979 bool wok = false; 2980 size_t index = 0; 2981 size_t srcid = r_write_srcid.read(); 2982 size_t trdid = r_write_trdid.read(); 2983 size_t pktid = r_write_pktid.read(); 2984 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 2985 size_t nb_copies = r_write_count.read(); 2986 size_t set = m_y[(addr_t)(r_write_address.read())]; 2987 size_t way = r_write_way.read(); 2988 2989 wok = m_upt.set( true, // it's an update transaction 2990 false, // it's not a broadcast 2991 true, // response required 2992 false, // no acknowledge required 2993 srcid, 2994 trdid, 2995 pktid, 2996 nline, 2997 nb_copies, 2998 index); 2999 3000 if( wok ) // write data in cache 3001 { 3002 for(size_t word=0 ; word<m_words ; word++) 3003 { 3004 m_cache_data.write( way, 3001 3005 set, 3002 3006 word, 3003 3007 r_write_data[word].read(), 3004 3008 r_write_be[word].read()); 3005 }3006 }3009 } 3010 } 3007 3011 3008 3012 #if DEBUG_MEMC_WRITE 3009 if(m_debug and wok) 3010 { 3011 std::cout << " <MEMC " << name() 3012 << " WRITE_UPT_LOCK> Register the multicast update in UPT / " 3013 << " nb_copies = " << r_write_count.read() << std::endl; 3014 } 3015 #endif 3016 r_write_upt_index = index; 3017 // releases the lock protecting UPT and the DIR if no entry... 3018 if(wok) r_write_fsm = WRITE_UPT_HEAP_LOCK; 3019 else r_write_fsm = WRITE_WAIT; 3013 if(m_debug and wok) 3014 { 3015 std::cout << " <MEMC " << name() 3016 << " WRITE_UPT_LOCK> Register the multicast update in UPT / " 3017 << " nb_copies = " << r_write_count.read() << std::endl; 3018 } 3019 #endif 3020 r_write_upt_index = index; 3021 // releases the lock protecting UPT and the DIR if no entry... 3022 if(wok) r_write_fsm = WRITE_UPT_HEAP_LOCK; 3023 else r_write_fsm = WRITE_WAIT; 3024 } 3025 break; 3026 } 3027 3028 ///////////////////////// 3029 case WRITE_UPT_HEAP_LOCK: // get access to heap 3030 { 3031 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE) 3032 { 3033 3034 #if DEBUG_MEMC_WRITE 3035 if(m_debug) 3036 std::cout << " <MEMC " << name() 3037 << " WRITE_UPT_HEAP_LOCK> Get acces to the HEAP" << std::endl; 3038 #endif 3039 r_write_fsm = WRITE_UPT_REQ; 3040 m_cpt_write_fsm_n_heap_lock++; 3041 } 3042 3043 m_cpt_write_fsm_heap_lock++; 3044 3045 break; 3046 } 3047 3048 ////////////////// 3049 case WRITE_UPT_REQ: // prepare the coherence transaction for the CC_SEND FSM 3050 // and write the first copy in the FIFO 3051 // send the request if only one copy 3052 { 3053 assert(not r_write_to_cc_send_multi_req.read() and 3054 not r_write_to_cc_send_brdcast_req.read() and 3055 "Error in VCI_MEM_CACHE : pending multicast or broadcast\n" 3056 "transaction in WRITE_UPT_REQ state" 3057 ); 3058 3059 r_write_to_cc_send_brdcast_req = false; 3060 r_write_to_cc_send_trdid = r_write_upt_index.read(); 3061 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())]; 3062 r_write_to_cc_send_index = r_write_word_index.read(); 3063 r_write_to_cc_send_count = r_write_word_count.read(); 3064 3065 for(size_t i=0; i<m_words ; i++) r_write_to_cc_send_be[i]=r_write_be[i].read(); 3066 3067 size_t min = r_write_word_index.read(); 3068 size_t max = r_write_word_index.read() + r_write_word_count.read(); 3069 for(size_t i=min ; i<max ; i++) r_write_to_cc_send_data[i] = r_write_data[i]; 3070 3071 if( (r_write_copy.read() != r_write_srcid.read()) or 3072 (r_write_pktid.read() == TYPE_SC) or r_write_copy_inst.read()) 3073 { 3074 // put the first srcid in the fifo 3075 write_to_cc_send_fifo_put = true; 3076 write_to_cc_send_fifo_inst = r_write_copy_inst.read(); 3077 write_to_cc_send_fifo_srcid = r_write_copy.read(); 3078 if(r_write_count.read() == 1) 3079 { 3080 r_write_fsm = WRITE_IDLE; 3081 r_write_to_cc_send_multi_req = true; 3082 } 3083 else 3084 { 3085 r_write_fsm = WRITE_UPT_NEXT; 3086 r_write_to_dec = false; 3087 3088 } 3089 } 3090 else 3091 { 3092 r_write_fsm = WRITE_UPT_NEXT; 3093 r_write_to_dec = false; 3094 } 3095 3096 #if DEBUG_MEMC_WRITE 3097 if(m_debug) 3098 { 3099 std::cout 3100 << " <MEMC " << name() 3101 << " WRITE_UPT_REQ> Post first request to CC_SEND FSM" 3102 << " / srcid = " << std::dec << r_write_copy.read() 3103 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3104 3105 if(r_write_count.read() == 1) 3106 std::cout << " ... and this is the last" << std::endl; 3107 } 3108 #endif 3109 break; 3110 } 3111 3112 /////////////////// 3113 case WRITE_UPT_NEXT: 3114 { 3115 // continue the multi-update request to CC_SEND fsm 3116 // when there is copies in the heap. 3117 // if one copy in the heap is the writer itself 3118 // the corresponding SRCID should not be written in the fifo, 3119 // but the UPT counter must be decremented. 3120 // As this decrement is done in the WRITE_UPT_DEC state, 3121 // after the last copy has been found, the decrement request 3122 // must be registered in the r_write_to_dec flip-flop. 3123 3124 HeapEntry entry = m_heap.read(r_write_ptr.read()); 3125 3126 bool dec_upt_counter; 3127 3128 // put the next srcid in the fifo 3129 if( (entry.owner.srcid != r_write_srcid.read()) or 3130 (r_write_pktid.read() == TYPE_SC) or entry.owner.inst) 3131 { 3132 dec_upt_counter = false; 3133 write_to_cc_send_fifo_put = true; 3134 write_to_cc_send_fifo_inst = entry.owner.inst; 3135 write_to_cc_send_fifo_srcid = entry.owner.srcid; 3136 3137 #if DEBUG_MEMC_WRITE 3138 if(m_debug) 3139 { 3140 std::cout << " <MEMC " << name() << " WRITE_UPT_NEXT> Post another request to CC_SEND FSM" 3141 << " / heap_index = " << std::dec << r_write_ptr.read() 3142 << " / srcid = " << std::dec << r_write_copy.read() 3143 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3144 if(entry.next == r_write_ptr.read()) 3145 std::cout << " ... and this is the last" << std::endl; 3146 } 3147 #endif 3148 } 3149 else // the UPT counter must be decremented 3150 { 3151 dec_upt_counter = true; 3152 3153 #if DEBUG_MEMC_WRITE 3154 if(m_debug) 3155 { 3156 std::cout << " <MEMC " << name() << " WRITE_UPT_NEXT> Skip one entry in heap matching the writer" 3157 << " / heap_index = " << std::dec << r_write_ptr.read() 3158 << " / srcid = " << std::dec << r_write_copy.read() 3159 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3160 if(entry.next == r_write_ptr.read()) 3161 std::cout << " ... and this is the last" << std::endl; 3162 } 3163 #endif 3164 } 3165 3166 // register the possible UPT decrement request 3167 r_write_to_dec = dec_upt_counter or r_write_to_dec.read(); 3168 3169 if(not m_write_to_cc_send_inst_fifo.wok()) 3170 { 3171 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_UPT_NEXT state" << std::endl 3172 << "The write_to_cc_send_fifo should not be full" << std::endl 3173 << "as the depth should be larger than the max number of copies" << std::endl; 3174 exit(0); 3175 } 3176 3177 r_write_ptr = entry.next; 3178 3179 if(entry.next == r_write_ptr.read()) // last copy 3180 { 3181 r_write_to_cc_send_multi_req = true; 3182 if(r_write_to_dec.read() or dec_upt_counter) r_write_fsm = WRITE_UPT_DEC; 3183 else r_write_fsm = WRITE_IDLE; 3184 } 3185 break; 3186 } 3187 3188 ////////////////// 3189 case WRITE_UPT_DEC: 3190 { 3191 // If the initial writer has a copy, it should not 3192 // receive an update request, but the counter in the 3193 // update table must be decremented by the MULTI_ACK FSM. 3194 3195 if(!r_write_to_multi_ack_req.read()) 3196 { 3197 r_write_to_multi_ack_req = true; 3198 r_write_to_multi_ack_upt_index = r_write_upt_index.read(); 3199 r_write_fsm = WRITE_IDLE; 3200 } 3201 break; 3202 } 3203 3204 /////////////// 3205 case WRITE_RSP: // Post a request to TGT_RSP FSM to acknowledge the write 3206 // In order to increase the Write requests throughput, 3207 // we don't wait to return in the IDLE state to consume 3208 // a new request in the write FIFO 3209 { 3210 if(!r_write_to_tgt_rsp_req.read()) 3211 { 3212 // post the request to TGT_RSP_FSM 3213 r_write_to_tgt_rsp_req = true; 3214 r_write_to_tgt_rsp_srcid = r_write_srcid.read(); 3215 r_write_to_tgt_rsp_trdid = r_write_trdid.read(); 3216 r_write_to_tgt_rsp_pktid = r_write_pktid.read(); 3217 r_write_to_tgt_rsp_sc_fail = r_write_sc_fail.read(); 3218 3219 // try to get a new write request from the FIFO 3220 if(m_cmd_write_addr_fifo.rok()) 3221 { 3222 if((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 3223 m_cpt_sc++; 3224 else 3225 { 3226 m_cpt_write++; 3227 m_cpt_write_cells++; 3228 } 3229 3230 // consume a word in the FIFO & write it in the local buffer 3231 cmd_write_fifo_get = true; 3232 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 3233 3234 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 3235 r_write_word_index = index; 3236 r_write_word_count = 1; 3237 r_write_data[index] = m_cmd_write_data_fifo.read(); 3238 r_write_srcid = m_cmd_write_srcid_fifo.read(); 3239 r_write_trdid = m_cmd_write_trdid_fifo.read(); 3240 r_write_pktid = m_cmd_write_pktid_fifo.read(); 3241 r_write_pending_sc = false; 3242 3243 // initialize the be field for all words 3244 for(size_t word=0 ; word<m_words ; word++) 3245 { 3246 if(word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 3247 else r_write_be[word] = 0x0; 3248 } 3249 3250 if(m_cmd_write_eop_fifo.read() or ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC)) 3251 { 3252 r_write_fsm = WRITE_DIR_REQ; 3253 } 3254 else 3255 { 3256 r_write_fsm = WRITE_NEXT; 3257 } 3258 } 3259 else 3260 { 3261 r_write_fsm = WRITE_IDLE; 3262 } 3263 3264 #if DEBUG_MEMC_WRITE 3265 if(m_debug) 3266 { 3267 std::cout << " <MEMC " << name() << " WRITE_RSP> Post a request to TGT_RSP FSM" 3268 << " : rsrcid = " << std::hex << r_write_srcid.read() << std::endl; 3269 if(m_cmd_write_addr_fifo.rok()) 3270 { 3271 std::cout << " New Write request: " 3272 << " srcid = " << std::hex << m_cmd_write_srcid_fifo.read() 3273 << " / address = " << m_cmd_write_addr_fifo.read() 3274 << " / data = " << m_cmd_write_data_fifo.read() << std::endl; 3275 } 3276 } 3277 #endif 3278 } 3279 break; 3280 } 3281 3282 ///////////////////////// 3283 case WRITE_MISS_TRT_LOCK: // Miss : check Transaction Table 3284 { 3285 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3286 { 3287 3288 #if DEBUG_MEMC_WRITE 3289 if(m_debug) 3290 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_LOCK> Check the TRT" << std::endl; 3291 #endif 3292 size_t hit_index = 0; 3293 size_t wok_index = 0; 3294 addr_t addr = (addr_t) r_write_address.read(); 3295 bool hit_read = m_trt.hit_read(m_nline[addr], hit_index); 3296 #if ODCCP_NON_INCLUSIVE 3297 bool hit_write = (m_trt.hit_write(m_nline[addr]) or 3298 ((r_cleanup_to_ixr_cmd_nline.read() == m_nline[addr]) and r_cleanup_to_ixr_cmd_req.read())); 3299 #else 3300 bool hit_write = m_trt.hit_write(m_nline[addr]); 3301 #endif 3302 bool wok = not m_trt.full(wok_index); 3303 3304 if(hit_read) // register the modified data in TRT 3305 { 3306 r_write_trt_index = hit_index; 3307 r_write_fsm = WRITE_MISS_TRT_DATA; 3308 m_cpt_write_miss++; 3309 } 3310 else if(wok and !hit_write) // set a new entry in TRT 3311 { 3312 r_write_trt_index = wok_index; 3313 r_write_fsm = WRITE_MISS_TRT_SET; 3314 m_cpt_write_miss++; 3315 } 3316 else // wait an empty entry in TRT 3317 { 3318 r_write_fsm = WRITE_WAIT; 3319 m_cpt_trt_full++; 3320 } 3321 m_cpt_write_fsm_n_trt_lock++; 3322 } 3323 3324 m_cpt_write_fsm_trt_lock++; 3325 3326 break; 3327 } 3328 3329 //////////////// 3330 case WRITE_WAIT: // release the locks protecting the shared ressources 3331 { 3332 3333 #if DEBUG_MEMC_WRITE 3334 if(m_debug) 3335 std::cout << " <MEMC " << name() << " WRITE_WAIT> Releases the locks before retry" << std::endl; 3336 #endif 3337 r_write_fsm = WRITE_DIR_REQ; 3338 break; 3339 } 3340 3341 //////////////////////// 3342 case WRITE_MISS_TRT_SET: // register a new transaction in TRT (Write Buffer) 3343 { 3344 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3345 { 3346 std::vector<be_t> be_vector; 3347 std::vector<data_t> data_vector; 3348 be_vector.clear(); 3349 data_vector.clear(); 3350 for(size_t i=0; i<m_words; i++) 3351 { 3352 be_vector.push_back(r_write_be[i]); 3353 data_vector.push_back(r_write_data[i]); 3354 } 3355 m_trt.set(r_write_trt_index.read(), 3356 true, // read request to XRAM 3357 m_nline[(addr_t)(r_write_address.read())], 3358 r_write_srcid.read(), 3359 r_write_trdid.read(), 3360 r_write_pktid.read(), 3361 false, // not a processor read 3362 0, // not a single word 3363 0, // word index 3364 be_vector, 3365 data_vector); 3366 r_write_fsm = WRITE_MISS_XRAM_REQ; 3367 3368 #if DEBUG_MEMC_WRITE 3369 if(m_debug) 3370 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_SET> Set a new entry in TRT" << std::endl; 3371 #endif 3372 } 3373 break; 3374 } 3375 3376 ///////////////////////// 3377 case WRITE_MISS_TRT_DATA: // update an entry in TRT (used as a Write Buffer) 3378 { 3379 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3380 { 3381 std::vector<be_t> be_vector; 3382 std::vector<data_t> data_vector; 3383 be_vector.clear(); 3384 data_vector.clear(); 3385 for(size_t i=0; i<m_words; i++) 3386 { 3387 be_vector.push_back(r_write_be[i]); 3388 data_vector.push_back(r_write_data[i]); 3389 } 3390 m_trt.write_data_mask( r_write_trt_index.read(), 3391 be_vector, 3392 data_vector ); 3393 r_write_fsm = WRITE_RSP; 3394 3395 #if DEBUG_MEMC_WRITE 3396 if(m_debug) 3397 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_DATA> Modify an existing entry in TRT" << std::endl; 3398 #endif 3399 } 3400 break; 3401 } 3402 ///////////////////////// 3403 case WRITE_MISS_XRAM_REQ: // send a GET request to IXR_CMD FSM 3404 { 3405 if( not r_write_to_ixr_cmd_req.read() ) 3406 { 3407 r_write_to_ixr_cmd_req = true; 3408 r_write_to_ixr_cmd_put = false; 3409 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 3410 r_write_fsm = WRITE_RSP; 3411 3412 #if DEBUG_MEMC_WRITE 3413 if(m_debug) 3414 std::cout << " <MEMC " << name() << " WRITE_MISS_XRAM_REQ> Post a GET request to the IXR_CMD FSM" << std::endl; 3415 #endif 3416 } 3417 break; 3418 } 3419 /////////////////////// 3420 case WRITE_BC_DIR_READ: // enter this state if a broadcast-inval is required 3421 // the cache line must be erased in mem-cache, and written 3422 // into XRAM. we read the cache and complete the buffer 3423 { 3424 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3425 "MEMC ERROR in WRITE_BC_DIR_READ state: Bad DIR allocation"); 3426 3427 // update local buffer 3428 size_t set = m_y[(addr_t)(r_write_address.read())]; 3429 size_t way = r_write_way.read(); 3430 for(size_t word=0 ; word<m_words ; word++) 3431 { 3432 data_t mask = 0; 3433 if(r_write_be[word].read() & 0x1) mask = mask | 0x000000FF; 3434 if(r_write_be[word].read() & 0x2) mask = mask | 0x0000FF00; 3435 if(r_write_be[word].read() & 0x4) mask = mask | 0x00FF0000; 3436 if(r_write_be[word].read() & 0x8) mask = mask | 0xFF000000; 3437 3438 // complete only if mask is not null (for energy consumption) 3439 r_write_data[word] = (r_write_data[word].read() & mask) | 3440 (m_cache_data.read(way, set, word) & ~mask); 3441 } // end for 3442 3443 r_write_fsm = WRITE_BC_TRT_LOCK; 3444 3445 #if DEBUG_MEMC_WRITE 3446 if(m_debug) 3447 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_READ>" 3448 << " Read the cache to complete local buffer" << std::endl; 3449 #endif 3450 break; 3451 } 3452 /////////////////////// 3453 case WRITE_BC_TRT_LOCK: // get TRT lock to check TRT not full 3454 { 3455 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3456 "MEMC ERROR in WRITE_BC_TRT_LOCK state: Bad DIR allocation"); 3457 3458 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3459 { 3460 size_t wok_index = 0; 3461 bool wok = not m_trt.full(wok_index); 3462 if( wok ) 3463 { 3464 r_write_trt_index = wok_index; 3465 r_write_fsm = WRITE_BC_IVT_LOCK; 3466 } 3467 else // wait an empty slot in TRT 3468 { 3469 r_write_fsm = WRITE_WAIT; 3470 } 3471 3472 #if DEBUG_MEMC_WRITE 3473 if(m_debug) 3474 std::cout << " <MEMC " << name() << " WRITE_BC_TRT_LOCK> Check TRT" 3475 << " : wok = " << wok << " / index = " << wok_index << std::endl; 3476 #endif 3477 m_cpt_write_fsm_n_trt_lock++; 3478 } 3479 3480 m_cpt_write_fsm_trt_lock++; 3481 3482 break; 3483 } 3484 ////////////////////// 3485 case WRITE_BC_IVT_LOCK: // get IVT lock and register BC transaction in IVT 3486 { 3487 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3488 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad DIR allocation"); 3489 3490 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 3491 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad TRT allocation"); 3492 3493 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 3494 { 3495 bool wok = false; 3496 size_t index = 0; 3497 size_t srcid = r_write_srcid.read(); 3498 size_t trdid = r_write_trdid.read(); 3499 size_t pktid = r_write_pktid.read(); 3500 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 3501 size_t nb_copies = r_write_count.read(); 3502 3503 wok = m_ivt.set(false, // it's an inval transaction 3504 true, // it's a broadcast 3505 true, // response required 3506 false, // no acknowledge required 3507 srcid, 3508 trdid, 3509 pktid, 3510 nline, 3511 nb_copies, 3512 index); 3513 #if DEBUG_MEMC_WRITE 3514 if( m_debug and wok ) 3515 std::cout << " <MEMC " << name() << " WRITE_BC_IVT_LOCK> Register broadcast inval in IVT" 3516 << " / nb_copies = " << r_write_count.read() << std::endl; 3517 #endif 3518 r_write_upt_index = index; 3519 3520 if( wok ) r_write_fsm = WRITE_BC_DIR_INVAL; 3521 else r_write_fsm = WRITE_WAIT; 3522 } 3523 break; 3524 } 3525 //////////////////////// 3526 case WRITE_BC_DIR_INVAL: // Register a put transaction in TRT 3527 // and invalidate the line in directory 3528 { 3529 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3530 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad DIR allocation"); 3531 3532 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 3533 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad TRT allocation"); 3534 3535 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) and 3536 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad IVT allocation"); 3537 3538 // register PUT request in TRT 3539 std::vector<data_t> data_vector; 3540 data_vector.clear(); 3541 for(size_t i=0; i<m_words; i++) data_vector.push_back(r_write_data[i].read()); 3542 m_trt.set( r_write_trt_index.read(), 3543 false, // PUT request 3544 m_nline[(addr_t)(r_write_address.read())], 3545 0, // unused 3546 0, // unused 3547 0, // unused 3548 false, // not a processor read 3549 0, // unused 3550 0, // unused 3551 std::vector<be_t> (m_words,0), 3552 data_vector ); 3553 3554 // invalidate directory entry 3555 DirectoryEntry entry; 3556 entry.valid = false; 3557 entry.dirty = false; 3558 entry.tag = 0; 3559 entry.is_cnt = false; 3560 entry.lock = false; 3561 entry.owner.srcid = 0; 3562 entry.owner.inst = false; 3563 entry.ptr = 0; 3564 entry.count = 0; 3565 size_t set = m_y[(addr_t)(r_write_address.read())]; 3566 size_t way = r_write_way.read(); 3567 3568 m_cache_directory.write(set, way, entry); 3569 3570 #if DEBUG_MEMC_WRITE 3571 if(m_debug) 3572 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_INVAL> Inval DIR and register in TRT:" 3573 << " address = " << r_write_address.read() << std::endl; 3574 #endif 3575 r_write_fsm = WRITE_BC_CC_SEND; 3576 break; 3577 } 3578 3579 ////////////////////// 3580 case WRITE_BC_CC_SEND: // Post a coherence broadcast request to CC_SEND FSM 3581 { 3582 if(!r_write_to_cc_send_multi_req.read() and !r_write_to_cc_send_brdcast_req.read()) 3583 { 3584 r_write_to_cc_send_multi_req = false; 3585 r_write_to_cc_send_brdcast_req = true; 3586 r_write_to_cc_send_trdid = r_write_upt_index.read(); 3587 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())]; 3588 r_write_to_cc_send_index = 0; 3589 r_write_to_cc_send_count = 0; 3590 3591 for(size_t i=0; i<m_words ; i++) // Ã quoi sert ce for? (AG) 3592 { 3593 r_write_to_cc_send_be[i]=0; 3594 r_write_to_cc_send_data[i] = 0; 3595 } 3596 r_write_fsm = WRITE_BC_XRAM_REQ; 3597 3598 #if DEBUG_MEMC_WRITE 3599 if(m_debug) 3600 std::cout << " <MEMC " << name() 3601 << " WRITE_BC_CC_SEND> Post a broadcast request to CC_SEND FSM" << std::endl; 3602 #endif 3603 } 3604 break; 3605 } 3606 3607 /////////////////////// 3608 case WRITE_BC_XRAM_REQ: // Post a PUT request to IXR_CMD FSM 3609 { 3610 if( not r_write_to_ixr_cmd_req.read() ) 3611 { 3612 r_write_to_ixr_cmd_req = true; 3613 r_write_to_ixr_cmd_put = true; 3614 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 3615 r_write_fsm = WRITE_IDLE; 3616 3617 #if DEBUG_MEMC_WRITE 3618 if(m_debug) 3619 std::cout << " <MEMC " << name() 3620 << " WRITE_BC_XRAM_REQ> Post a put request to IXR_CMD FSM" << std::endl; 3621 #endif 3622 } 3623 break; 3624 } 3625 } // end switch r_write_fsm 3626 3627 /////////////////////////////////////////////////////////////////////// 3628 // IXR_CMD FSM 3629 /////////////////////////////////////////////////////////////////////// 3630 // The IXR_CMD fsm controls the command packets to the XRAM : 3631 // It handles requests from 5 FSMs with a round-robin priority: 3632 // READ > WRITE > CAS > XRAM_RSP > CONFIG 3633 // 3634 // - It sends a single flit VCI read to the XRAM in case of 3635 // GET request posted by the READ, WRITE or CAS FSMs. 3636 // - It sends a multi-flit VCI write in case of PUT request posted by 3637 // the XRAM_RSP, WRITE, CAS, or CONFIG FSMs. 3638 // 3639 // For each client, there is three steps: 3640 // - IXR_CMD_*_IDLE : round-robin allocation to a client 3641 // - IXR_CMD_*_TRT : access to TRT for address and data 3642 // - IXR_CMD_*_SEND : send the PUT or GET VCI command 3643 // 3644 // The address and data to be written (for a PUT) are stored in TRT. 3645 // The trdid field contains always the TRT entry index. 3646 //////////////////////////////////////////////////////////////////////// 3647 3648 //std::cout << std::endl << "ixr_cmd_fsm" << std::endl; 3649 3650 switch(r_ixr_cmd_fsm.read()) 3651 { 3652 /////////////////////// 3653 case IXR_CMD_READ_IDLE: 3654 if (r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3655 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3656 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3657 #if ODCCP_NON_INCLUSIVE 3658 else if(r_cleanup_to_ixr_cmd_req.read()) 3659 { 3660 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3661 r_ixr_cmd_word = 0; 3662 } 3663 #else 3664 else if(r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 3665 #endif 3666 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3667 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3668 break; 3669 //////////////////////// 3670 case IXR_CMD_WRITE_IDLE: 3671 if (r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3672 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3673 #if ODCCP_NON_INCLUSIVE 3674 else if(r_cleanup_to_ixr_cmd_req.read()) 3675 { 3676 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3677 r_ixr_cmd_word = 0; 3678 } 3679 #else 3680 else if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 3681 #endif 3682 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3683 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3684 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3685 break; 3686 ////////////////////// 3687 case IXR_CMD_CAS_IDLE: 3688 if (r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3689 #if ODCCP_NON_INCLUSIVE 3690 else if(r_cleanup_to_ixr_cmd_req.read()) 3691 { 3692 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3693 r_ixr_cmd_word = 0; 3694 } 3695 #else 3696 else if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 3697 #endif 3698 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3699 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3700 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3701 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3702 break; 3703 /////////////////////// 3704 case IXR_CMD_XRAM_IDLE: 3705 #if ODCCP_NON_INCLUSIVE 3706 if(r_cleanup_to_ixr_cmd_req.read()) 3707 { 3708 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3709 r_ixr_cmd_word = 0; 3710 } 3711 #else 3712 if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 3713 #endif 3714 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3715 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3716 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3717 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3718 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3719 break; 3720 //////////////////////// 3721 case IXR_CMD_CLEANUP_IDLE: 3722 if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3723 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3724 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3725 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3726 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3727 #if ODCCP_NON_INCLUSIVE 3728 else if(r_cleanup_to_ixr_cmd_req.read()) 3729 { 3730 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3731 r_ixr_cmd_word = 0; 3732 } 3733 #else 3734 else if(r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 3735 #endif 3736 break; 3737 ///////////////////////// 3738 case IXR_CMD_CONFIG_IDLE: 3739 if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3740 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3741 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3742 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3743 #if ODCCP_NON_INCLUSIVE 3744 else if(r_cleanup_to_ixr_cmd_req.read()) 3745 { 3746 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3747 r_ixr_cmd_word = 0; 3748 } 3749 #else 3750 else if(r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 3751 #endif 3752 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3753 break; 3754 3755 3756 ////////////////////// 3757 case IXR_CMD_READ_TRT: // access TRT for a GET 3758 { 3759 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3760 { 3761 TransactionTabEntry entry = m_trt.read( r_read_to_ixr_cmd_index.read() ); 3762 r_ixr_cmd_address = entry.nline * (m_words<<2); 3763 r_ixr_cmd_trdid = r_read_to_ixr_cmd_index.read(); 3764 r_ixr_cmd_get = true; 3765 r_ixr_cmd_word = 0; 3766 r_ixr_cmd_fsm = IXR_CMD_READ_SEND; 3767 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3768 3769 #if DEBUG_MEMC_IXR_CMD 3770 if(m_debug) 3771 std::cout << " <MEMC " << name() << " IXR_CMD_READ_TRT> TRT access" 3772 << " index = " << std::dec << r_read_to_ixr_cmd_index.read() 3773 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3774 #endif 3775 } 3776 break; 3777 } 3778 /////////////////////// 3779 case IXR_CMD_WRITE_TRT: // access TRT for a PUT or a GET 3780 { 3781 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3782 { 3783 TransactionTabEntry entry = m_trt.read( r_write_to_ixr_cmd_index.read() ); 3784 r_ixr_cmd_address = entry.nline * (m_words<<2); 3785 r_ixr_cmd_trdid = r_write_to_ixr_cmd_index.read(); 3786 r_ixr_cmd_get = entry.xram_read; 3787 r_ixr_cmd_word = 0; 3788 r_ixr_cmd_fsm = IXR_CMD_WRITE_SEND; 3789 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3790 3791 #if DEBUG_MEMC_IXR_CMD 3792 if(m_debug) 3793 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_TRT> TRT access" 3794 << " index = " << std::dec << r_write_to_ixr_cmd_index.read() 3795 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3796 #endif 3797 } 3798 break; 3799 } 3800 ///////////////////// 3801 case IXR_CMD_CAS_TRT: // access TRT for a PUT or a GET 3802 { 3803 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3804 { 3805 TransactionTabEntry entry = m_trt.read( r_cas_to_ixr_cmd_index.read() ); 3806 r_ixr_cmd_address = entry.nline * (m_words<<2); 3807 r_ixr_cmd_trdid = r_cas_to_ixr_cmd_index.read(); 3808 r_ixr_cmd_get = entry.xram_read; 3809 r_ixr_cmd_word = 0; 3810 r_ixr_cmd_fsm = IXR_CMD_CAS_SEND; 3811 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3812 3813 #if DEBUG_MEMC_IXR_CMD 3814 if(m_debug) 3815 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_TRT> TRT access" 3816 << " index = " << std::dec << r_cas_to_ixr_cmd_index.read() 3817 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3818 #endif 3819 } 3820 break; 3821 } 3822 ////////////////////// 3823 case IXR_CMD_XRAM_TRT: // access TRT for a PUT 3824 { 3825 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3826 { 3827 TransactionTabEntry entry = m_trt.read( r_xram_rsp_to_ixr_cmd_index.read() ); 3828 r_ixr_cmd_address = entry.nline * (m_words<<2); 3829 r_ixr_cmd_trdid = r_xram_rsp_to_ixr_cmd_index.read(); 3830 r_ixr_cmd_get = false; 3831 r_ixr_cmd_word = 0; 3832 r_ixr_cmd_fsm = IXR_CMD_XRAM_SEND; 3833 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3834 3835 #if DEBUG_MEMC_IXR_CMD 3836 if(m_debug) 3837 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_TRT> TRT access" 3838 << " index = " << std::dec << r_xram_rsp_to_ixr_cmd_index.read() 3839 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3840 #endif 3841 } 3842 break; 3843 } 3844 ////////////////////// 3845 case IXR_CMD_CLEANUP_TRT: // access TRT for a PUT 3846 { 3847 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3848 { 3849 3850 TransactionTabEntry entry = m_trt.read( r_cleanup_to_ixr_cmd_index.read() ); 3851 r_ixr_cmd_address = entry.nline * (m_words<<2); 3852 r_ixr_cmd_trdid = r_cleanup_to_ixr_cmd_index.read(); 3853 r_ixr_cmd_get = false; 3854 r_ixr_cmd_word = 0; 3855 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3856 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3857 3858 #if DEBUG_MEMC_IXR_CMD 3859 if(m_debug) 3860 std::cout << " <MEMC " << name() << " IXR_CMD_CLEANUP_TRT> TRT access" 3861 << " index = " << std::dec << r_cleanup_to_ixr_cmd_index.read() 3862 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3863 #endif 3864 } 3865 break; 3866 } 3867 //////////////////////// 3868 case IXR_CMD_CONFIG_TRT: // access TRT for a PUT 3869 { 3870 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3871 { 3872 TransactionTabEntry entry = m_trt.read( r_config_to_ixr_cmd_index.read() ); 3873 r_ixr_cmd_address = entry.nline * (m_words<<2); 3874 r_ixr_cmd_trdid = r_config_to_ixr_cmd_index.read(); 3875 r_ixr_cmd_get = false; 3876 r_ixr_cmd_word = 0; 3877 r_ixr_cmd_fsm = IXR_CMD_CONFIG_SEND; 3878 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3879 3880 #if DEBUG_MEMC_IXR_CMD 3881 if(m_debug) 3882 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_TRT> TRT access" 3883 << " index = " << std::dec << r_config_to_ixr_cmd_index.read() 3884 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3885 #endif 3886 } 3887 break; 3888 } 3889 3890 /////////////////////// 3891 case IXR_CMD_READ_SEND: // send a get from READ FSM 3892 { 3893 if(p_vci_ixr.cmdack) 3894 { 3895 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 3896 r_read_to_ixr_cmd_req = false; 3897 3898 #if DEBUG_MEMC_IXR_CMD 3899 if(m_debug) 3900 std::cout << " <MEMC " << name() << " IXR_CMD_READ_SEND> GET request:" << std::hex 3901 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3902 #endif 3903 } 3904 break; 3905 } 3906 //////////////////////// 3907 case IXR_CMD_WRITE_SEND: // send a put or get from WRITE FSM 3908 { 3909 if(p_vci_ixr.cmdack) 3910 { 3911 if(r_write_to_ixr_cmd_put.read()) // PUT 3912 { 3913 if(r_ixr_cmd_word.read() == (m_words - 2)) 3914 { 3915 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3916 r_write_to_ixr_cmd_req = false; 3917 } 3918 else 3919 { 3920 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3921 } 3922 3923 #if DEBUG_MEMC_IXR_CMD 3924 if(m_debug) 3925 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> PUT request:" << std::hex 3926 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3927 #endif 3928 } 3929 else // GET 3930 { 3931 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3932 r_write_to_ixr_cmd_req = false; 3933 3934 #if DEBUG_MEMC_IXR_CMD 3935 if(m_debug) 3936 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> GET request:" << std::hex 3937 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3938 #endif 3939 } 3940 } 3941 break; 3942 } 3943 ////////////////////// 3944 case IXR_CMD_CAS_SEND: // send a put or get command from CAS FSM 3945 { 3946 if(p_vci_ixr.cmdack) 3947 { 3948 if(r_cas_to_ixr_cmd_put.read()) // PUT 3949 { 3950 if(r_ixr_cmd_word.read() == (m_words - 2)) 3951 { 3952 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3953 r_cas_to_ixr_cmd_req = false; 3954 } 3955 else 3956 { 3957 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3958 } 3959 3960 #if DEBUG_MEMC_IXR_CMD 3961 if(m_debug) 3962 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> PUT request:" << std::hex 3963 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3964 #endif 3965 } 3966 else // GET 3967 { 3968 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3969 r_cas_to_ixr_cmd_req = false; 3970 3971 #if DEBUG_MEMC_IXR_CMD 3972 if(m_debug) 3973 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> GET request:" << std::hex 3974 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3975 #endif 3976 } 3977 } 3978 break; 3979 } 3980 /////////////////////// 3981 case IXR_CMD_XRAM_SEND: // send a put from XRAM_RSP FSM 3982 { 3983 if(p_vci_ixr.cmdack.read()) 3984 { 3985 if(r_ixr_cmd_word.read() == (m_words - 2)) 3986 { 3987 r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE; 3988 r_xram_rsp_to_ixr_cmd_req = false; 3989 } 3990 else 3991 { 3992 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3993 } 3994 #if DEBUG_MEMC_IXR_CMD 3995 if(m_debug) 3996 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_SEND> PUT request:" << std::hex 3997 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3998 #endif 3999 } 4000 break; 4001 } 4002 4003 //////////////////////// 4004 case IXR_CMD_CLEANUP_DATA_SEND: // send a put command to XRAM 4005 { 4006 if(p_vci_ixr.cmdack.read()) 4007 { 4008 if(r_ixr_cmd_word.read() == (m_words - 2)) 4009 { 4010 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_IDLE; 4011 r_cleanup_to_ixr_cmd_req = false; 4012 //r_ixr_cmd_word = 0; 4013 //r_xram_rsp_to_ixr_cmd_inval_ncc_pending = false; 4014 } 4015 else 4016 { 4017 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 4018 } 4019 4020 #if DEBUG_MEMC_IXR_CMD 4021 if(m_debug) 4022 { 4023 std::cout << " <MEMC " << name() << ".IXR_CMD_CLEANUP_DATA_SEND> Send a put request to xram" << std::endl; 4024 } 4025 #endif 4026 } 4027 break; 4028 } 4029 4030 ///////////////////////// 4031 case IXR_CMD_CONFIG_SEND: // send a put from CONFIG FSM 4032 { 4033 if(p_vci_ixr.cmdack.read()) 4034 { 4035 if(r_ixr_cmd_word.read() == (m_words - 2)) 4036 { 4037 r_ixr_cmd_fsm = IXR_CMD_CONFIG_IDLE; 4038 r_config_to_ixr_cmd_req = false; 4039 } 4040 else 4041 { 4042 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 4043 } 4044 4045 #if DEBUG_MEMC_IXR_CMD 4046 if(m_debug) 4047 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_SEND> PUT request:" << std::hex 4048 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4049 #endif 4050 } 4051 break; 4052 } 4053 } // end switch r_ixr_cmd_fsm 4054 4055 //////////////////////////////////////////////////////////////////////////// 4056 // IXR_RSP FSM 4057 //////////////////////////////////////////////////////////////////////////// 4058 // The IXR_RSP FSM receives the response packets from the XRAM, 4059 // for both PUT transaction, and GET transaction. 4060 // 4061 // - A response to a PUT request is a single-cell VCI packet. 4062 // The TRT index is contained in the RTRDID field. 4063 // The FSM takes the lock protecting the TRT, and the corresponding 4064 // entry is erased. If an acknowledge was required (in case of software SYNC) 4065 // the r_config_rsp_lines counter is decremented. 4066 // 4067 // - A response to a GET request is a multi-cell VCI packet. 4068 // The TRT index is contained in the RTRDID field. 4069 // The N cells contain the N words of the cache line in the RDATA field. 4070 // The FSM takes the lock protecting the TRT to store the line in the TRT 4071 // (taking into account the write requests already stored in the TRT). 4072 // When the line is completely written, the r_ixr_rsp_to_xram_rsp_rok[index] 4073 // signal is set to inform the XRAM_RSP FSM. 4074 /////////////////////////////////////////////////////////////////////////////// 4075 4076 //std::cout << std::endl << "ixr_rsp_fsm" << std::endl; 4077 4078 switch(r_ixr_rsp_fsm.read()) 4079 { 4080 ////////////////// 4081 case IXR_RSP_IDLE: // test transaction type: PUT/GET 4082 { 4083 if(p_vci_ixr.rspval.read()) 4084 { 4085 r_ixr_rsp_cpt = 0; 4086 r_ixr_rsp_trt_index = p_vci_ixr.rtrdid.read(); 4087 4088 assert( ((p_vci_ixr.rerror.read() & 0x1) == 0) and 4089 "MEMC ERROR in IXR_RSP state: XRAM response error !"); 4090 4091 if(p_vci_ixr.reop.read()) // PUT 4092 { 4093 #if ODCCP_NON_INCLUSIVE 4094 if (p_vci_ixr.rtrdid.read() == m_trt_lines) 4095 r_ixr_rsp_fsm = IXR_RSP_ACK; 4096 else 4097 r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 4098 #else 4099 r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 4100 #endif 4101 4102 #if DEBUG_MEMC_IXR_RSP 4103 if(m_debug) 4104 std::cout << " <MEMC " << name() 4105 << " IXR_RSP_IDLE> Response from XRAM to a put transaction" << std::endl; 4106 #endif 4107 } 4108 else // GET 4109 { 4110 r_ixr_rsp_fsm = IXR_RSP_TRT_READ; 4111 4112 #if DEBUG_MEMC_IXR_RSP 4113 if(m_debug) 4114 std::cout << " <MEMC " << name() 4115 << " IXR_RSP_IDLE> Response from XRAM to a get transaction" << std::endl; 4116 #endif 4117 } 4118 } 4119 break; 4120 } 4121 //////////////////////// 4122 case IXR_RSP_ACK: // Acknowledge PUT transaction 4123 { 4124 r_ixr_rsp_fsm = IXR_RSP_IDLE; 4125 break; 4126 } 4127 //////////////////////// 4128 case IXR_RSP_TRT_ERASE: // erase the entry in the TRT 4129 // decrease the line counter if config request 4130 { 4131 if(r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) 4132 { 4133 size_t index = r_ixr_rsp_trt_index.read(); 4134 if (m_trt.is_config(index) ) r_config_rsp_lines = r_config_rsp_lines.read() - 1; 4135 m_trt.erase(index); 4136 r_ixr_rsp_fsm = IXR_RSP_IDLE; 4137 4138 #if DEBUG_MEMC_IXR_RSP 4139 if(m_debug) 4140 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_ERASE> Erase TRT entry " 4141 << r_ixr_rsp_trt_index.read() << std::endl; 4142 #endif 4143 } 4144 break; 4145 } 4146 ////////////////////// 4147 case IXR_RSP_TRT_READ: // write a 64 bits data word in TRT 4148 { 4149 if((r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) and p_vci_ixr.rspval) 4150 { 4151 size_t index = r_ixr_rsp_trt_index.read(); 4152 size_t word = r_ixr_rsp_cpt.read(); 4153 bool eop = p_vci_ixr.reop.read(); 4154 wide_data_t data = p_vci_ixr.rdata.read(); 4155 bool error = ((p_vci_ixr.rerror.read() & 0x1) == 1); 4156 4157 assert(((eop == (word == (m_words-2))) or error) and 4158 "MEMC ERROR in IXR_RSP_TRT_READ state : invalid response from XRAM"); 4159 4160 m_trt.write_rsp( index, 4161 word, 4162 data ); 4163 4164 r_ixr_rsp_cpt = word + 2; 4165 4166 if(eop) 4167 { 4168 r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()]=true; 4169 /*if(p_vci_ixr.rpktid.read()&0xF == 0x9) 4170 r_ixr_rsp_to_xram_rsp_no_coherent[r_ixr_rsp_trt_index.read()] = true; 4171 else 4172 r_ixr_rsp_to_xram_rsp_no_coherent[r_ixr_rsp_trt_index.read()] = false;*/ 4173 r_ixr_rsp_fsm = IXR_RSP_IDLE; 4174 } 4175 4176 #if DEBUG_MEMC_IXR_RSP 4177 if(m_debug) 4178 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_READ> Writing 2 words in TRT : " 4179 << " index = " << std::dec << index 4180 << " / word = " << word 4181 << " / data = " << std::hex << data << std::endl; 4182 #endif 4183 } 4184 break; 4185 } 4186 } // end swich r_ixr_rsp_fsm 4187 4188 //////////////////////////////////////////////////////////////////////////// 4189 // XRAM_RSP FSM 4190 //////////////////////////////////////////////////////////////////////////// 4191 // The XRAM_RSP FSM handles the incoming cache lines after an XRAM GET. 4192 // The cache line has been written in the TRT by the IXR_CMD_FSM. 4193 // As the IXR_RSP FSM and the XRAM_RSP FSM are running in parallel, 4194 // there is as many flip-flops r_ixr_rsp_to_xram_rsp_rok[i] as the number 4195 // of entries in the TRT, that are handled with a round-robin priority... 4196 // 4197 // The FSM takes the lock protecting TRT, and the lock protecting DIR. 4198 // The selected TRT entry is copied in the local buffer r_xram_rsp_trt_buf. 4199 // It selects a cache slot and save the victim line in another local buffer 4200 // r_xram_rsp_victim_***. 4201 // It writes the line extracted from TRT in the cache. 4202 // If it was a read MISS, the XRAM_RSP FSM send a request to the TGT_RSP 4203 // FSM to return the cache line to the registered processor. 4204 // If there is no empty slot, a victim line is evicted, and 4205 // invalidate requests are sent to the L1 caches containing copies. 4206 // If this line is dirty, the XRAM_RSP FSM send a request to the IXR_CMD 4207 // FSM to save the victim line to the XRAM, and register the write transaction 4208 // in the TRT (using the entry previously used by the read transaction). 4209 /////////////////////////////////////////////////////////////////////////////// 4210 4211 //std::cout << std::endl << "xram_rsp_fsm" << std::endl; 4212 4213 switch(r_xram_rsp_fsm.read()) 4214 { 4215 /////////////////// 4216 case XRAM_RSP_IDLE: // scan the XRAM responses / select a TRT index (round robin) 4217 { 4218 size_t old = r_xram_rsp_trt_index.read(); 4219 size_t lines = m_trt_lines; 4220 for(size_t i=0 ; i<lines ; i++) 4221 { 4222 size_t index = (i+old+1) %lines; 4223 if(r_ixr_rsp_to_xram_rsp_rok[index]) 4224 { 4225 r_xram_rsp_trt_index = index; 4226 r_ixr_rsp_to_xram_rsp_rok[index] = false; 4227 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 4228 4229 #if DEBUG_MEMC_XRAM_RSP 4230 if(m_debug) 4231 std::cout << " <MEMC " << name() << " XRAM_RSP_IDLE>" 4232 << " Available cache line in TRT:" 4233 << " index = " << std::dec << index << std::endl; 4234 #endif 4235 break; 4236 } 4237 } 4238 break; 4239 } 4240 /////////////////////// 4241 case XRAM_RSP_DIR_LOCK: // Takes the DIR lock and the TRT lock 4242 // Copy the TRT entry in a local buffer 4243 { 4244 if( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4245 (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) ) 4246 { 4247 // copy the TRT entry in the r_xram_rsp_trt_buf local buffer 4248 size_t index = r_xram_rsp_trt_index.read(); 4249 r_xram_rsp_trt_buf.copy( m_trt.read(index) ); 4250 r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; 4251 4252 #if DEBUG_MEMC_XRAM_RSP 4253 if(m_debug) 4254 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_LOCK>" 4255 << " Get access to DIR and TRT" << std::endl; 4256 #endif 4257 } 4258 break; 4259 } 4260 /////////////////////// 4261 case XRAM_RSP_TRT_COPY: // Select a victim cache line 4262 // and copy it in a local buffer 4263 { 4264 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4265 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad DIR allocation"); 4266 4267 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4268 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad TRT allocation"); 4269 4270 // selects & extracts a victim line from cache 4271 size_t way = 0; 4272 size_t set = m_y[(addr_t)(r_xram_rsp_trt_buf.nline * m_words * 4)]; 4273 4274 DirectoryEntry victim(m_cache_directory.select(set, way)); 4275 4276 #if ODCCP_NON_INCLUSIVE 4277 bool inval = (victim.count and victim.valid and victim.coherent) ; 4278 #else 4279 bool inval = (victim.count and victim.valid) ; 4280 #endif 4281 4282 // copy the victim line in a local buffer (both data dir) 4283 m_cache_data.read_line(way, set, r_xram_rsp_victim_data); 4284 4285 r_xram_rsp_victim_copy = victim.owner.srcid; 4286 4287 r_xram_rsp_victim_coherent = victim.coherent; 4288 r_xram_rsp_victim_copy_inst = victim.owner.inst; 4289 r_xram_rsp_victim_count = victim.count; 4290 r_xram_rsp_victim_ptr = victim.ptr; 4291 r_xram_rsp_victim_way = way; 4292 r_xram_rsp_victim_set = set; 4293 r_xram_rsp_victim_nline = victim.tag*m_sets + set; 4294 r_xram_rsp_victim_is_cnt = victim.is_cnt; 4295 r_xram_rsp_victim_inval = inval ; 4296 r_xram_rsp_victim_dirty = victim.dirty; 4297 4298 if( not r_xram_rsp_trt_buf.rerror ) r_xram_rsp_fsm = XRAM_RSP_IVT_LOCK; 4299 else r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 4300 4301 #if DEBUG_MEMC_XRAM_RSP 4302 if(m_debug) 4303 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_COPY>" 4304 << " Select a victim slot: " 4305 << " way = " << std::dec << way 4306 << " / set = " << set 4307 << " / victim coherent = " << victim.coherent 4308 << " / victim owner id = " << victim.owner.srcid 4309 << " / inval_required = " << inval << std::endl; 4310 #endif 4311 break; 4312 } 4313 /////////////////////// 4314 case XRAM_RSP_IVT_LOCK: // Keep DIR and TRT locks and take the IVT lock 4315 // to check a possible pending inval 4316 { 4317 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4318 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad DIR allocation"); 4319 4320 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4321 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad TRT allocation"); 4322 4323 if(r_alloc_ivt_fsm == ALLOC_IVT_XRAM_RSP) 4324 { 4325 size_t index = 0; 4326 if(m_ivt.search_inval(r_xram_rsp_trt_buf.nline, index)) // pending inval 4327 { 4328 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 4329 4330 #if DEBUG_MEMC_XRAM_RSP 4331 if(m_debug) 4332 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 4333 << " Get acces to IVT, but line invalidation registered" 4334 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline*m_words*4 4335 << " / index = " << std::dec << index << std::endl; 4336 #endif 4337 4338 } 4339 else if(m_ivt.is_full() and r_xram_rsp_victim_inval.read()) // IVT full 4340 { 4341 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 4342 4343 #if DEBUG_MEMC_XRAM_RSP 4344 if(m_debug) 4345 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 4346 << " Get acces to IVT, but inval required and IVT full" << std::endl; 4347 #endif 4348 } 4349 else 4350 { 4351 r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; 4352 4353 #if DEBUG_MEMC_XRAM_RSP 4354 if(m_debug) 4355 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 4356 << " Get acces to IVT / no pending inval request" << std::endl; 4357 #endif 4358 } 4359 } 4360 break; 4361 } 4362 ///////////////////////// 4363 case XRAM_RSP_INVAL_WAIT: // release all locks and returns to DIR_LOCK to retry 4364 { 4365 4366 #if DEBUG_MEMC_XRAM_RSP 4367 if(m_debug) 4368 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL_WAIT>" 4369 << " Release all locks and retry" << std::endl; 4370 #endif 4371 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 4372 break; 4373 } 4374 /////////////////////// 4375 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory), 4376 // erases the TRT entry if victim not dirty, 4377 // and set inval request in IVT if required 4378 { 4379 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4380 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad DIR allocation"); 4381 4382 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4383 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad TRT allocation"); 4384 4385 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_XRAM_RSP) and 4386 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad IVT allocation"); 4387 4388 // check if this is an instruction read, this means pktid is either 4389 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 4390 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4391 bool inst_read = (r_xram_rsp_trt_buf.pktid & 0x2) and r_xram_rsp_trt_buf.proc_read; 4392 4393 // check if this is a cached read, this means pktid is either 4394 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 4395 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4396 bool cached_read = (r_xram_rsp_trt_buf.pktid & 0x1) and r_xram_rsp_trt_buf.proc_read; 4397 4398 bool dirty = false; 4399 4400 // update cache data 4401 size_t set = r_xram_rsp_victim_set.read(); 4402 size_t way = r_xram_rsp_victim_way.read(); 4403 4404 for(size_t word=0; word<m_words ; word++) 4405 { 4406 m_cache_data.write(way, set, word, r_xram_rsp_trt_buf.wdata[word]); 4407 dirty = dirty or (r_xram_rsp_trt_buf.wdata_be[word] != 0); 4408 } 4409 4410 // update cache directory 4411 DirectoryEntry entry; 4412 entry.valid = true; 4413 entry.is_cnt = false; 4414 entry.lock = false; 4415 entry.dirty = dirty; 4416 entry.tag = r_xram_rsp_trt_buf.nline / m_sets; 4417 entry.ptr = 0; 4418 if(cached_read) 4419 { 4420 entry.owner.srcid = r_xram_rsp_trt_buf.srcid; 4421 #if L1_MULTI_CACHE 4422 entry.owner.cache_id= r_xram_rsp_trt_buf.pktid; 4423 #endif 4424 entry.owner.inst = inst_read; 4425 entry.count = 1; 4426 4427 } 4428 else 4429 { 4430 entry.owner.srcid = 0; 4431 #if L1_MULTI_CACHE 4432 entry.owner.cache_id = 0; 4433 #endif 4434 entry.owner.inst = 0; 4435 entry.count = 0; 4436 } 4437 4438 /*ODCCP*/ //if pktid = 0x9 that means line no coherent 4439 if(r_xram_rsp_trt_buf.pktid == 0x9){ 4440 entry.coherent = false; 4441 } 4442 else{ 4443 entry.coherent = true; 4444 } 4445 4446 m_cache_directory.write(set, way, entry); 4447 4448 // register invalid request in IVT for victim line if required 4449 if(r_xram_rsp_victim_inval.read()) 4450 { 4451 bool broadcast = r_xram_rsp_victim_is_cnt.read(); 4452 size_t index = 0; 4453 size_t count_copies = r_xram_rsp_victim_count.read(); 4454 4455 bool wok = m_ivt.set(false, // it's an inval transaction 4456 broadcast, // set broadcast bit 4457 false, // no response required 4458 false, // no acknowledge required 4459 0, // srcid 4460 0, // trdid 4461 0, // pktid 4462 r_xram_rsp_victim_nline.read(), 4463 count_copies, 4464 index); 4465 4466 r_xram_rsp_ivt_index = index; 4467 4468 assert( wok and 4469 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: IVT should not be full"); 4470 } 4471 4472 #if DEBUG_MEMC_XRAM_RSP 4473 if(m_debug) 4474 { 4475 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_UPDT>" 4476 << " Cache update: " 4477 << " way = " << std::dec << way 4478 << " / set = " << set 4479 << " / owner_id = " << std::hex << entry.owner.srcid 4480 << " / owner_ins = " << std::dec << entry.owner.inst 4481 << " / count = " << entry.count 4482 << " / is_cnt = " << entry.is_cnt << std::endl; 4483 if(r_xram_rsp_victim_inval.read()) 4484 std::cout << " Invalidation request for address " 4485 << std::hex << r_xram_rsp_victim_nline.read()*m_words*4 4486 << " / broadcast = " << r_xram_rsp_victim_is_cnt.read() << std::endl; 4487 } 4488 #endif 4489 4490 #if ODCCP_NON_INCLUSIVE 4491 if (!r_xram_rsp_victim_dirty.read()) m_trt.erase(r_xram_rsp_trt_index.read()); 4492 4493 if (r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; 4494 else if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4495 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4496 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4497 #else 4498 // If the victim is not dirty and coherent or victim's count egal 0 , we don't need another XRAM put transaction, 4499 // and we can erase the TRT entry 4500 if(!r_xram_rsp_victim_dirty.read() and (r_xram_rsp_victim_coherent.read() or (r_xram_rsp_victim_count.read() == 0))) m_trt.erase(r_xram_rsp_trt_index.read()); 4501 4502 // Next state 4503 if(r_xram_rsp_victim_dirty.read() or (!r_xram_rsp_victim_coherent.read() and (r_xram_rsp_victim_count.read() == 1))) 4504 r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; 4505 else if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4506 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4507 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4508 #endif 4509 break; 4510 } 4511 //////////////////////// 4512 case XRAM_RSP_TRT_DIRTY: // set the TRT entry (PUT to XRAM) if the victim is dirty 4513 { 4514 if(r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) 4515 { 4516 std::vector<data_t> data_vector; 4517 data_vector.clear(); 4518 for(size_t i=0; i<m_words; i++) 4519 { 4520 data_vector.push_back(r_xram_rsp_victim_data[i].read()); 4521 } 4522 m_trt.set( r_xram_rsp_trt_index.read(), 4523 false, // PUT 4524 r_xram_rsp_victim_nline.read(), // line index 4525 0, // unused 4526 0, // unused 4527 0, // unused 4528 false, // not proc_read 4529 0, // unused 4530 0, // unused 4531 std::vector<be_t>(m_words,0xF), 4532 data_vector); 4533 4534 #if DEBUG_MEMC_XRAM_RSP 4535 if(m_debug) 4536 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_DIRTY>" 4537 << " Set TRT entry for the put transaction" 4538 << " / address = " << (r_xram_rsp_victim_nline.read()*m_words*4) << std::endl; 4539 #endif 4540 if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4541 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4542 else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4543 } 4544 break; 4545 } 4546 ////////////////////// 4547 case XRAM_RSP_DIR_RSP: // Request a response to TGT_RSP FSM 4548 { 4549 if ( not r_xram_rsp_to_tgt_rsp_req.read() ) 4550 { 4551 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 4552 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 4553 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 4554 for(size_t i=0; i < m_words; i++) 4555 { 4556 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 4557 } 4558 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 4559 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 4560 r_xram_rsp_to_tgt_rsp_ll_key = r_xram_rsp_trt_buf.ll_key; 4561 r_xram_rsp_to_tgt_rsp_rerror = false; 4562 r_xram_rsp_to_tgt_rsp_req = true; 4563 4564 #if ODCCP_NON_INCLUSIVE 4565 if (r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4566 else if(r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4567 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4568 #else 4569 if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4570 else if(r_xram_rsp_victim_dirty.read() or 4571 (!r_xram_rsp_victim_coherent.read() and (r_xram_rsp_victim_count.read() == 1))) 4572 r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4573 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4574 #endif 4575 4576 #if DEBUG_MEMC_XRAM_RSP 4577 if(m_debug) 4578 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_RSP>" 4579 << " Request the TGT_RSP FSM to return data:" 4580 << " rsrcid = " << std::hex << r_xram_rsp_trt_buf.srcid 4581 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline*m_words*4 4582 << " / nwords = " << std::dec << r_xram_rsp_trt_buf.read_length << std::endl; 4583 #endif 4584 } 4585 break; 4586 } 4587 //////////////////// 4588 case XRAM_RSP_INVAL: // send invalidate request to CC_SEND FSM 4589 { 4590 if(!r_xram_rsp_to_cc_send_multi_req.read() and 4591 !r_xram_rsp_to_cc_send_brdcast_req.read()) 4592 { 4593 bool multi_req = !r_xram_rsp_victim_is_cnt.read(); 4594 bool last_multi_req = multi_req and (r_xram_rsp_victim_count.read() == 1); 4595 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 4596 4597 r_xram_rsp_to_cc_send_multi_req = last_multi_req; 4598 r_xram_rsp_to_cc_send_brdcast_req = r_xram_rsp_victim_is_cnt.read(); 4599 r_xram_rsp_to_cc_send_nline = r_xram_rsp_victim_nline.read(); 4600 r_xram_rsp_to_cc_send_trdid = r_xram_rsp_ivt_index; 4601 xram_rsp_to_cc_send_fifo_srcid = r_xram_rsp_victim_copy.read(); 4602 xram_rsp_to_cc_send_fifo_inst = r_xram_rsp_victim_copy_inst.read(); 4603 xram_rsp_to_cc_send_fifo_put = multi_req; 4604 r_xram_rsp_next_ptr = r_xram_rsp_victim_ptr.read(); 4605 4606 #if ODCCP_NON_INCLUSIVE 4607 if(r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4608 else if(not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 4609 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4610 #else 4611 if(r_xram_rsp_victim_dirty or (!r_xram_rsp_victim_coherent.read() and (r_xram_rsp_victim_count.read() == 1))) 4612 r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4613 else if(not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 4614 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4615 #endif 4616 4617 #if DEBUG_MEMC_XRAM_RSP 4618 if(m_debug) 4619 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL>" 4620 << " Send an inval request to CC_SEND FSM" 4621 << " / address = " << r_xram_rsp_victim_nline.read()*m_words*4 << std::endl; 4622 #endif 4623 } 4624 break; 4625 } 4626 ////////////////////////// 4627 case XRAM_RSP_WRITE_DIRTY: // send a write request to IXR_CMD FSM 4628 { 4629 if((!r_xram_rsp_to_ixr_cmd_req.read()) /*and (!r_xram_rsp_to_ixr_cmd_inval_ncc_pending.read())*/) 4630 { 4631 4632 r_xram_rsp_to_ixr_cmd_req = true; 4633 //r_xram_rsp_to_ixr_cmd_nline = r_xram_rsp_victim_nline.read(); 4634 r_xram_rsp_to_ixr_cmd_index = r_xram_rsp_trt_index.read(); 4635 /*for(size_t i=0; i<m_words ; i++) 4636 { 4637 r_xram_rsp_to_ixr_cmd_data[i] = r_xram_rsp_victim_data[i]; 4638 }*/ 4639 #if (ODCCP_NON_INCLUSIVE == 0) 4640 // if victim is no coherent, we dont request a ixr command 4641 if( (!r_xram_rsp_victim_coherent.read()) and (r_xram_rsp_victim_count.read() == 1) ) 4642 { 4643 //r_xram_rsp_to_ixr_cmd_inval_ncc_pending = true; // inval no coherent pending 4644 r_xram_rsp_to_ixr_cmd_req = false; 4645 r_xram_rsp_fsm = XRAM_RSP_IDLE; 4646 break; 4647 } 4648 #endif 4649 4650 m_cpt_write_dirty++; 4651 4652 bool multi_req = not r_xram_rsp_victim_is_cnt.read() and 4653 r_xram_rsp_victim_inval.read(); 4654 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 4655 4656 if(not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 4657 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4658 4659 #if DEBUG_MEMC_XRAM_RSP 4660 if(m_debug) 4661 std::cout << " <MEMC " << name() << " XRAM_RSP_WRITE_DIRTY>" 4662 << " Send the put request to IXR_CMD FSM" 4663 << " / address = " << r_xram_rsp_victim_nline.read()*m_words*4 << std::endl; 4664 #endif 4665 } 4666 break; 4667 } 4668 ///////////////////////// 4669 case XRAM_RSP_HEAP_REQ: // Get the lock to the HEAP 4670 { 4671 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 4672 { 4673 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4674 } 4675 4676 #if DEBUG_MEMC_XRAM_RSP 4677 if(m_debug) 4678 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_REQ>" 4679 << " Requesting HEAP lock" << std::endl; 4680 #endif 4681 break; 4682 } 4683 ///////////////////////// 4684 case XRAM_RSP_HEAP_ERASE: // erase the copies and send invalidations 4685 { 4686 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 4687 { 4688 HeapEntry entry = m_heap.read(r_xram_rsp_next_ptr.read()); 4689 4690 xram_rsp_to_cc_send_fifo_srcid = entry.owner.srcid; 4691 xram_rsp_to_cc_send_fifo_inst = entry.owner.inst; 4692 xram_rsp_to_cc_send_fifo_put = true; 4693 if(m_xram_rsp_to_cc_send_inst_fifo.wok()) 4694 { 4695 r_xram_rsp_next_ptr = entry.next; 4696 if(entry.next == r_xram_rsp_next_ptr.read()) // last copy 4697 { 4698 r_xram_rsp_to_cc_send_multi_req = true; 4699 r_xram_rsp_fsm = XRAM_RSP_HEAP_LAST; 4700 } 4701 else 4702 { 4703 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4704 } 4705 } 4706 else 4707 { 4708 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4709 } 4710 4711 #if DEBUG_MEMC_XRAM_RSP 4712 if(m_debug) 4713 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_ERASE>" 4714 << " Erase copy:" 4715 << " srcid = " << std::hex << entry.owner.srcid 4716 << " / inst = " << std::dec << entry.owner.inst << std::endl; 4717 #endif 4718 } 4719 break; 4720 } 4721 ///////////////////////// 4722 case XRAM_RSP_HEAP_LAST: // last copy 4723 { 4724 if(r_alloc_heap_fsm.read() != ALLOC_HEAP_XRAM_RSP) 4725 { 4726 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_HEAP_LAST" 4727 << " bad HEAP allocation" << std::endl; 4728 exit(0); 4729 } 4730 size_t free_pointer = m_heap.next_free_ptr(); 4731 4732 HeapEntry last_entry; 4733 last_entry.owner.srcid = 0; 4734 last_entry.owner.inst = false; 4735 if(m_heap.is_full()) 4736 { 4737 last_entry.next = r_xram_rsp_next_ptr.read(); 4738 m_heap.unset_full(); 4739 } 4740 else 4741 { 4742 last_entry.next = free_pointer; 4743 } 4744 4745 m_heap.write_free_ptr(r_xram_rsp_victim_ptr.read()); 4746 m_heap.write(r_xram_rsp_next_ptr.read(),last_entry); 4747 4748 r_xram_rsp_fsm = XRAM_RSP_IDLE; 4749 4750 #if DEBUG_MEMC_XRAM_RSP 4751 if(m_debug) 4752 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_LAST>" 4753 << " Heap housekeeping" << std::endl; 4754 #endif 4755 break; 4756 } 4757 ////////////////////////// 4758 case XRAM_RSP_ERROR_ERASE: // erase TRT entry in case of error 4759 { 4760 m_trt.erase(r_xram_rsp_trt_index.read()); 4761 4762 // Next state 4763 if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_ERROR_RSP; 4764 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4765 4766 #if DEBUG_MEMC_XRAM_RSP 4767 if(m_debug) 4768 std::cout << " <MEMC " << name() << " XRAM_RSP_ERROR_ERASE>" 4769 << " Error reported by XRAM / erase the TRT entry" << std::endl; 4770 #endif 4771 break; 4772 } 4773 //////////////////////// 4774 case XRAM_RSP_ERROR_RSP: // Request an error response to TGT_RSP FSM 4775 { 4776 if(!r_xram_rsp_to_tgt_rsp_req.read()) 4777 { 4778 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 4779 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 4780 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 4781 for(size_t i=0; i < m_words; i++) 4782 { 4783 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 4784 } 4785 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 4786 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 4787 r_xram_rsp_to_tgt_rsp_rerror = true; 4788 r_xram_rsp_to_tgt_rsp_req = true; 4789 4790 r_xram_rsp_fsm = XRAM_RSP_IDLE; 4791 4792 #if DEBUG_MEMC_XRAM_RSP 4793 if(m_debug) 4794 std::cout << " <MEMC " << name() 4795 << " XRAM_RSP_ERROR_RSP> Request a response error to TGT_RSP FSM:" 4796 << " srcid = " << std::dec << r_xram_rsp_trt_buf.srcid << std::endl; 4797 #endif 4798 } 4799 break; 4800 } 4801 } // end swich r_xram_rsp_fsm 4802 4803 //////////////////////////////////////////////////////////////////////////////////// 4804 // CLEANUP FSM 4805 //////////////////////////////////////////////////////////////////////////////////// 4806 // The CLEANUP FSM handles the cleanup request from L1 caches. 4807 // It accesses the cache directory and the heap to update the list of copies. 4808 //////////////////////////////////////////////////////////////////////////////////// 4809 4810 //std::cout << std::endl << "cleanup_fsm" << std::endl; 4811 4812 switch(r_cleanup_fsm.read()) 4813 { 4814 ////////////////// 4815 case CLEANUP_IDLE: // Get first DSPIN flit of the CLEANUP command 4816 { 4817 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 4818 4819 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 4820 uint32_t srcid = DspinDhccpParam::dspin_get( flit, 4821 DspinDhccpParam::CLEANUP_SRCID); 4822 4823 uint8_t type = DspinDhccpParam::dspin_get( flit, 4824 DspinDhccpParam::P2M_TYPE); 4825 4826 r_cleanup_way_index = DspinDhccpParam::dspin_get( flit, 4827 DspinDhccpParam::CLEANUP_WAY_INDEX); 4828 4829 r_cleanup_nline = DspinDhccpParam::dspin_get( flit, 4830 DspinDhccpParam::CLEANUP_NLINE_MSB) << 32; 4831 4832 /*ODCCP*/ // Cleanup on no coherent line if 1 4833 r_cleanup_ncc = 4834 DspinDhccpParam::dspin_get( 4835 flit, 4836 DspinDhccpParam::CLEANUP_NCC); 4837 4838 r_cleanup_inst = (type == DspinDhccpParam::TYPE_CLEANUP_INST); 4839 r_cleanup_srcid = srcid; 4840 4841 assert( (srcid < m_initiators) and 4842 "MEMC ERROR in CLEANUP_IDLE state : illegal SRCID value"); 4843 4844 m_cpt_cleanup++; 4845 cc_receive_to_cleanup_fifo_get = true; 4846 r_cleanup_fsm = CLEANUP_GET_NLINE; 4847 4848 #if DEBUG_MEMC_CLEANUP 4849 if(m_debug) 4850 std::cout << " <MEMC " << name() 4851 << " CLEANUP_IDLE> Cleanup request:" << std::hex 4852 << " owner_id = " << srcid 4853 << " / owner_ins = " << (type == DspinDhccpParam::TYPE_CLEANUP_INST) << std::endl; 4854 #endif 4855 break; 4856 } 4857 /////////////////////// 4858 case CLEANUP_GET_NLINE: // GET second DSPIN flit of the cleanup command 4859 { 4860 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 4861 4862 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 4863 4864 4865 addr_t nline = r_cleanup_nline.read() | 4866 DspinDhccpParam::dspin_get(flit, DspinDhccpParam::CLEANUP_NLINE_LSB); 4867 4868 cc_receive_to_cleanup_fifo_get = true; 4869 r_cleanup_nline = nline; 4870 r_cleanup_fsm = CLEANUP_DIR_REQ; 4871 4872 bool eop = DspinDhccpParam::dspin_get(flit, DspinDhccpParam::P2M_EOP); 4873 4874 /*ODCCP*/ // if not eop (more than 2 flits) there is a cleanup no coherent with data 4875 if (!eop) 4876 { 4877 r_cleanup_contains_data = true; // this cleanup contains data 4878 r_cleanup_fsm = CLEANUP_GET_DATA; 4879 r_cleanup_data_index = 0; 4880 } 4881 else 4882 { 4883 r_cleanup_contains_data = false; 4884 r_cleanup_fsm = CLEANUP_DIR_REQ; 4885 } 4886 4887 cc_receive_to_cleanup_fifo_get = true; 4888 r_cleanup_nline = nline; 4889 4890 #if DEBUG_MEMC_CLEANUP 4891 if(m_debug) 4892 std::cout << " <MEMC " << name() 4893 << " CLEANUP_GET_NLINE> Cleanup request:" 4894 << " / ncc = " << r_cleanup_ncc.read() 4895 << " / address = " << std::hex << nline * m_words * 4 << std::endl; 4896 #endif 4897 break; 4898 } 4899 ///////////////////// 4900 /*ODCCP*/ // We save the cleanup's data into a buffer 4901 case CLEANUP_GET_DATA : 4902 { 4903 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 4904 4905 assert (r_cleanup_data_index.read() < m_words and "MEM_CACHE in CLEANUP_GET_DATA : too much flits in cleanup data updt"); 4906 4907 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 4908 4909 uint32_t data = 4910 DspinDhccpParam::dspin_get (flit, DspinDhccpParam::CLEANUP_DATA_UPDT); 4911 4912 r_cleanup_data[r_cleanup_data_index.read()] = data; 4913 r_cleanup_data_index = r_cleanup_data_index.read() + 1; 4914 cc_receive_to_cleanup_fifo_get = true; 4915 m_cpt_cleanup_data++; 4916 4917 if (r_cleanup_data_index.read() == m_words - 1) // last flit 4918 { 4919 r_cleanup_fsm = CLEANUP_DIR_REQ; 4920 } 4921 break; 4922 } 4923 ///////////////////// 4924 case CLEANUP_DIR_REQ: // Get the lock to the directory 4925 { 4926 // Get the lock to the directory 4927 if(r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) break; 4928 r_cleanup_fsm = CLEANUP_DIR_LOCK; 4929 4930 #if DEBUG_MEMC_CLEANUP 4931 if(m_debug) 4932 std::cout << " <MEMC " << name() << " CLEANUP_DIR_REQ> Requesting DIR lock" << std::endl; 4933 #endif 4934 break; 4935 } 4936 ////////////////////// 4937 case CLEANUP_DIR_LOCK: // test directory status 4938 { 4939 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP) and 4940 "MEMC ERROR in CLEANUP_DIR_LOCK: bad DIR allocation"); 4941 4942 // Read the directory 4943 size_t way = 0; 4944 addr_t cleanup_address = r_cleanup_nline.read() * m_words * 4; 4945 DirectoryEntry entry = m_cache_directory.read(cleanup_address , way); 4946 r_cleanup_is_cnt = entry.is_cnt; 4947 r_cleanup_dirty = entry.dirty; 4948 r_cleanup_tag = entry.tag; 4949 r_cleanup_lock = entry.lock; 4950 r_cleanup_way = way; 4951 r_cleanup_count = entry.count; 4952 r_cleanup_ptr = entry.ptr; 4953 r_cleanup_copy = entry.owner.srcid; 4954 r_cleanup_copy_inst = entry.owner.inst; 4955 if(entry.valid) // hit : the copy must be cleared 4956 { 4957 assert( (entry.count > 0) and 4958 "MEMC ERROR in CLEANUP_DIR_LOCK state, CLEANUP on valid entry with no copies"); 4959 4960 if((entry.count == 1) or (entry.is_cnt)) // no access to the heap 4961 { 4962 r_cleanup_fsm = CLEANUP_DIR_WRITE; 4963 } 4964 else // access to the heap 4965 { 4966 r_cleanup_fsm = CLEANUP_HEAP_REQ; 4967 } 4968 } 4969 else // miss : check IVT for a pending inval 4970 { 4971 r_cleanup_fsm = CLEANUP_IVT_LOCK; 4972 } 4973 4974 #if DEBUG_MEMC_CLEANUP 4975 if(m_debug) 4976 std::cout << " <MEMC " << name() 4977 << " CLEANUP_DIR_LOCK> Test directory status: " 4978 << std::hex << " address = " << cleanup_address 4979 << " / hit = " << entry.valid 4980 << " / dir_id = " << entry.owner.srcid 4981 << " / dir_ins = " << entry.owner.inst 4982 << " / search_id = " << r_cleanup_srcid.read() 4983 << " / search_ins = " << r_cleanup_inst.read() 4984 << " / count = " << entry.count 4985 << " / is_cnt = " << entry.is_cnt << std::endl; 4986 #endif 4987 break; 4988 } 4989 /////////////////////// 4990 case CLEANUP_DIR_WRITE: // Update the directory entry without heap access 4991 { 4992 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP) and 4993 "MEMC ERROR in CLEANUP_DIR_LOCK: bad DIR allocation"); 4994 4995 size_t way = r_cleanup_way.read(); 4996 size_t set = m_y[(addr_t)(r_cleanup_nline.read()*m_words*4)]; 4997 bool match_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 4998 bool match_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 4999 bool match = match_srcid and match_inst; 5000 5001 assert( (r_cleanup_is_cnt.read() or match) and 5002 "MEMC ERROR in CLEANUP_DIR_LOCK: illegal CLEANUP on valid entry"); 5003 5004 // update the cache directory (for the copies) 5005 DirectoryEntry entry; 5006 entry.valid = true; 5007 entry.is_cnt = r_cleanup_is_cnt.read(); 5008 entry.dirty = r_cleanup_dirty.read() or r_cleanup_contains_data.read(); 5009 entry.tag = r_cleanup_tag.read(); 5010 entry.lock = r_cleanup_lock.read(); 5011 entry.ptr = r_cleanup_ptr.read(); 5012 entry.count = r_cleanup_count.read() - 1; 5013 entry.owner.srcid = 0; 5014 entry.owner.inst = 0; 5015 /*ODCCP*/ // if cleanup contains data we update the cache data 5016 if (r_cleanup_contains_data.read()) 5017 { 5018 for (size_t word = 0; word < m_words; word ++) 5019 { 5020 m_cache_data.write(way, set, word, r_cleanup_data[word].read(), 0xF); 5021 } 5022 } 5023 5024 5025 m_cache_directory.write(set, way, entry); 5026 5027 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5028 5029 #if DEBUG_MEMC_CLEANUP 5030 if(m_debug) 5031 std::cout << " <MEMC " << name() 5032 << " CLEANUP_DIR_WRITE> Update directory:" 5033 << std::hex << " address = " << r_cleanup_nline.read() * m_words * 4 5034 << " / dir_id = " << entry.owner.srcid 5035 << " / dir_ins = " << entry.owner.inst 5036 << " / count = " << entry.count 5037 << " / is_cnt = " << entry.is_cnt << std::endl; 5038 #endif 5039 5040 break; 5041 } 5042 5043 ////////////////////// 5044 case CLEANUP_HEAP_REQ: // get the lock to the HEAP directory 5045 { 5046 if(r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP) break; 5047 5048 r_cleanup_fsm = CLEANUP_HEAP_LOCK; 5049 5050 #if DEBUG_MEMC_CLEANUP 5051 if(m_debug) 5052 std::cout << " <MEMC " << name() 5053 << " CLEANUP_HEAP_REQ> HEAP lock acquired " << std::endl; 5054 #endif 5055 break; 5056 } 5057 ////////////////////// 5058 case CLEANUP_HEAP_LOCK: // two cases are handled in this state : 5059 // 1. the matching copy is directly in the directory 5060 // 2. the matching copy is the first copy in the heap 5061 { 5062 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 5063 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 5064 5065 size_t way = r_cleanup_way.read(); 5066 size_t set = m_y[(addr_t)(r_cleanup_nline.read() *m_words*4)]; 5067 5068 HeapEntry heap_entry = m_heap.read(r_cleanup_ptr.read()); 5069 bool last = (heap_entry.next == r_cleanup_ptr.read()); 5070 5071 // match_dir computation 5072 bool match_dir_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 5073 bool match_dir_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 5074 bool match_dir = match_dir_srcid and match_dir_inst; 5075 5076 // match_heap computation 5077 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 5078 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 5079 bool match_heap = match_heap_srcid and match_heap_inst; 5080 5081 r_cleanup_prev_ptr = r_cleanup_ptr.read(); 5082 r_cleanup_prev_srcid = heap_entry.owner.srcid; 5083 r_cleanup_prev_inst = heap_entry.owner.inst; 5084 5085 assert( (not last or match_dir or match_heap) and 5086 "MEMC ERROR in CLEANUP_HEAP_LOCK state: hit but no copy found"); 5087 5088 assert( (not match_dir or not match_heap) and 5089 "MEMC ERROR in CLEANUP_HEAP_LOCK state: two matching copies found"); 5090 5091 DirectoryEntry dir_entry; 5092 dir_entry.valid = true; 5093 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 5094 dir_entry.dirty = r_cleanup_dirty.read(); 5095 dir_entry.tag = r_cleanup_tag.read(); 5096 dir_entry.lock = r_cleanup_lock.read(); 5097 dir_entry.count = r_cleanup_count.read()-1; 5098 5099 // the matching copy is registered in the directory and 5100 // it must be replaced by the first copy registered in 5101 // the heap. The corresponding entry must be freed 5102 if(match_dir) 5103 { 5104 dir_entry.ptr = heap_entry.next; 5105 dir_entry.owner.srcid = heap_entry.owner.srcid; 5106 dir_entry.owner.inst = heap_entry.owner.inst; 5107 r_cleanup_next_ptr = r_cleanup_ptr.read(); 5108 r_cleanup_fsm = CLEANUP_HEAP_FREE; 5109 } 5110 5111 // the matching copy is the first copy in the heap 5112 // It must be freed and the copy registered in directory 5113 // must point to the next copy in heap 5114 else if(match_heap) 5115 { 5116 dir_entry.ptr = heap_entry.next; 5117 dir_entry.owner.srcid = r_cleanup_copy.read(); 5118 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 5119 r_cleanup_next_ptr = r_cleanup_ptr.read(); 5120 r_cleanup_fsm = CLEANUP_HEAP_FREE; 5121 } 5122 5123 // The matching copy is in the heap, but is not the first copy 5124 // The directory entry must be modified to decrement count 5125 else 5126 { 5127 dir_entry.ptr = r_cleanup_ptr.read(); 5128 dir_entry.owner.srcid = r_cleanup_copy.read(); 5129 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 5130 r_cleanup_next_ptr = heap_entry.next; 5131 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 5132 } 5133 5134 m_cache_directory.write(set,way,dir_entry); 5135 5136 #if DEBUG_MEMC_CLEANUP 5137 if(m_debug) 5138 std::cout << " <MEMC " << name() 5139 << " CLEANUP_HEAP_LOCK> Checks matching:" 5140 << " address = " << r_cleanup_nline.read() * m_words * 4 5141 << " / dir_id = " << r_cleanup_copy.read() 5142 << " / dir_ins = " << r_cleanup_copy_inst.read() 5143 << " / heap_id = " << heap_entry.owner.srcid 5144 << " / heap_ins = " << heap_entry.owner.inst 5145 << " / search_id = " << r_cleanup_srcid.read() 5146 << " / search_ins = " << r_cleanup_inst.read() << std::endl; 5147 #endif 5148 break; 5149 } 5150 //////////////////////// 5151 case CLEANUP_HEAP_SEARCH: // This state is handling the case where the copy 5152 // is in the heap, but not the first in linked list 5153 { 5154 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 5155 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 5156 5157 HeapEntry heap_entry = m_heap.read(r_cleanup_next_ptr.read()); 5158 5159 bool last = (heap_entry.next == r_cleanup_next_ptr.read()); 5160 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 5161 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 5162 bool match_heap = match_heap_srcid and match_heap_inst; 5163 5164 assert( (not last or match_heap) and 5165 "MEMC ERROR in CLEANUP_HEAP_SEARCH state: no copy found"); 5166 5167 // the matching copy must be removed 5168 if(match_heap) 5169 { 5170 // re-use ressources 5171 r_cleanup_ptr = heap_entry.next; 5172 r_cleanup_fsm = CLEANUP_HEAP_CLEAN; 5173 } 5174 // test the next in the linked list 5175 else 5176 { 5177 r_cleanup_prev_ptr = r_cleanup_next_ptr.read(); 5178 r_cleanup_prev_srcid = heap_entry.owner.srcid; 5179 r_cleanup_prev_inst = heap_entry.owner.inst; 5180 r_cleanup_next_ptr = heap_entry.next; 5181 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 5182 } 5183 5184 #if DEBUG_MEMC_CLEANUP 5185 if(m_debug) 5186 { 5187 if(not match_heap) 5188 { 5189 std::cout 5190 << " <MEMC " << name() 5191 << " CLEANUP_HEAP_SEARCH> Matching copy not found, search next:" 5192 << std::endl; 5193 } 5194 else 5195 { 5196 std::cout 5197 << " <MEMC " << name() 5198 << " CLEANUP_HEAP_SEARCH> Matching copy found:" 5199 << std::endl; 5200 } 5201 std::cout 5202 << " address = " << r_cleanup_nline.read() * m_words * 4 5203 << " / heap_id = " << heap_entry.owner.srcid 5204 << " / heap_ins = " << heap_entry.owner.inst 5205 << " / search_id = " << r_cleanup_srcid.read() 5206 << " / search_ins = " << r_cleanup_inst.read() 5207 << " / last = " << last 5208 << std::endl; 5209 } 5210 #endif 5211 break; 5212 } 5213 //////////////////////// 5214 case CLEANUP_HEAP_CLEAN: // remove a copy in the linked list 5215 { 5216 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 5217 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 5218 5219 HeapEntry heap_entry; 5220 heap_entry.owner.srcid = r_cleanup_prev_srcid.read(); 5221 heap_entry.owner.inst = r_cleanup_prev_inst.read(); 5222 bool last = (r_cleanup_next_ptr.read() == r_cleanup_ptr.read()); 5223 5224 if (last) // this is the last entry of the list of copies 5225 { 5226 heap_entry.next = r_cleanup_prev_ptr.read(); 5227 } 5228 else // this is not the last entry 5229 { 5230 heap_entry.next = r_cleanup_ptr.read(); 5231 } 5232 5233 m_heap.write(r_cleanup_prev_ptr.read(), heap_entry); 5234 5235 r_cleanup_fsm = CLEANUP_HEAP_FREE; 5236 5237 #if DEBUG_MEMC_CLEANUP 5238 if(m_debug) 5239 std::cout << " <MEMC " << name() << " CLEANUP_HEAP_SEARCH>" 5240 << " Remove the copy in the linked list" << std::endl; 5241 #endif 5242 break; 5243 } 5244 /////////////////////// 5245 case CLEANUP_HEAP_FREE: // The heap entry pointed by r_cleanup_next_ptr is freed 5246 // and becomes the head of the list of free entries 5247 { 5248 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 5249 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 5250 5251 HeapEntry heap_entry; 5252 heap_entry.owner.srcid = 0; 5253 heap_entry.owner.inst = false; 5254 5255 if(m_heap.is_full()) 5256 { 5257 heap_entry.next = r_cleanup_next_ptr.read(); 5258 } 5259 else 5260 { 5261 heap_entry.next = m_heap.next_free_ptr(); 5262 } 5263 5264 m_heap.write(r_cleanup_next_ptr.read(),heap_entry); 5265 m_heap.write_free_ptr(r_cleanup_next_ptr.read()); 5266 m_heap.unset_full(); 5267 5268 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5269 5270 #if DEBUG_MEMC_CLEANUP 5271 if(m_debug) 5272 std::cout << " <MEMC " << name() << " CLEANUP_HEAP_FREE>" 5273 << " Update the list of free entries" << std::endl; 5274 #endif 5275 break; 5276 } 5277 ////////////////////// 5278 case CLEANUP_IVT_LOCK: // get the lock protecting the IVT to search a pending 5279 // invalidate transaction matching the cleanup 5280 { 5281 if(r_alloc_ivt_fsm.read() != ALLOC_IVT_CLEANUP) break; 5282 5283 size_t index = 0; 5284 bool match_inval; 5285 5286 match_inval = m_ivt.search_inval(r_cleanup_nline.read(), index); 5287 5288 if ( not match_inval ) // no pending inval 5289 { 5290 /*ODCCP*/ // If cleanup is on no coherent line we go to CLEANUP_IXR_REQ 5291 if (r_cleanup_ncc.read()) 5292 { 5293 r_cleanup_fsm = CLEANUP_IXR_REQ; 5294 } 5295 else 5296 { 5297 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5298 } 5299 5300 #if DEBUG_MEMC_CLEANUP 5301 if(m_debug) 5302 std::cout << " <MEMC " << name() << " CLEANUP_IVT_LOCK>" 5303 << " Unexpected cleanup with no corresponding IVT entry:" 5304 << " address = " << std::hex << (r_cleanup_nline.read()*4*m_words) << std::endl; 5305 #endif 5306 break; 5307 } 5308 else // pending inval in IVT 5309 { 5310 r_cleanup_write_srcid = m_ivt.srcid(index); 5311 r_cleanup_write_trdid = m_ivt.trdid(index); 5312 r_cleanup_write_pktid = m_ivt.pktid(index); 5313 r_cleanup_need_rsp = m_ivt.need_rsp(index); 5314 r_cleanup_need_ack = m_ivt.need_ack(index); 5315 r_cleanup_index = index; 5316 r_cleanup_fsm = CLEANUP_IVT_DECREMENT; 5317 5318 #if DEBUG_MEMC_CLEANUP 5319 if(m_debug) 5320 std::cout << " <MEMC " << name() << " CLEANUP_IVT_LOCK>" 5321 << " Cleanup matching pending invalidate transaction on IVT:" 5322 << " address = " << std::hex << (r_cleanup_nline.read()*m_words*4) 5323 << " / ivt_entry = " << index << std::endl; 5324 #endif 5325 } 5326 break; 5327 } 5328 /////////////////////////// 5329 case CLEANUP_IVT_DECREMENT: // decrement response counter in IVT matching entry 5330 // and test if last 5331 { 5332 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) and 5333 "MEMC ERROR in CLEANUP_IVT_DECREMENT state: Bad IVT allocation"); 5334 5335 size_t count = 0; 5336 m_ivt.decrement(r_cleanup_index.read(), count); 5337 5338 if(count == 0) // multi inval transaction completed 5339 { 5340 r_cleanup_fsm = CLEANUP_IVT_CLEAR; 5341 } 5342 else // multi inval transaction not completed 5343 { 5344 /*ODCCP*/ // If cleanup is on no coherent line we go to CLEANUP_IXR_REQ 5345 if (r_cleanup_ncc.read()) 5346 { 5347 r_cleanup_fsm = CLEANUP_IXR_REQ; 5348 } 5349 else 5350 { 5351 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5352 } 5353 } 5354 5355 #if DEBUG_MEMC_CLEANUP 5356 if(m_debug) 5357 std::cout << " <MEMC " << name() << " CLEANUP_IVT_DECREMENT>" 5358 << " Decrement response counter in IVT:" 5359 << " IVT_index = " << r_cleanup_index.read() 5360 << " / rsp_count = " << count << std::endl; 5361 #endif 5362 break; 5363 } 5364 /////////////////////// 5365 case CLEANUP_IVT_CLEAR: // Clear IVT entry 5366 // Acknowledge CONFIG FSM if required 5367 { 5368 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) and 5369 "MEMC ERROR in CLEANUP_IVT_CLEAR state : bad IVT allocation"); 5370 5371 m_ivt.clear(r_cleanup_index.read()); 5372 5373 if ( r_cleanup_need_ack.read() ) 5374 { 5375 assert( (r_config_rsp_lines.read() > 0) and 5376 "MEMC ERROR in CLEANUP_IVT_CLEAR state"); 5377 5378 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 5379 } 5380 5381 if ( r_cleanup_need_rsp.read() ) r_cleanup_fsm = CLEANUP_WRITE_RSP; 5382 else if ( r_cleanup_ncc.read() ) r_cleanup_fsm = CLEANUP_IXR_REQ; 5383 else r_cleanup_fsm = CLEANUP_SEND_CLACK; 5384 5385 #if DEBUG_MEMC_CLEANUP 5386 if(m_debug) 5387 std::cout << " <MEMC " << name() 5388 << " CLEANUP_IVT_CLEAR> Clear entry in IVT:" 5389 << " IVT_index = " << r_cleanup_index.read() << std::endl; 5390 #endif 5391 break; 5392 } 5393 /////////////////////// 5394 case CLEANUP_WRITE_RSP: // response to a previous write on the direct network 5395 // wait if pending request to the TGT_RSP FSM 5396 { 5397 if(r_cleanup_to_tgt_rsp_req.read()) break; 5398 5399 // no pending request 5400 r_cleanup_to_tgt_rsp_req = true; 5401 r_cleanup_to_tgt_rsp_srcid = r_cleanup_write_srcid.read(); 5402 r_cleanup_to_tgt_rsp_trdid = r_cleanup_write_trdid.read(); 5403 r_cleanup_to_tgt_rsp_pktid = r_cleanup_write_pktid.read(); 5404 5405 /*ODCCP*/ // If cleanup is on no coherent line we go to CLEANUP_IXR_REQ 5406 if (r_cleanup_ncc.read()) 5407 { 5408 r_cleanup_fsm = CLEANUP_IXR_REQ; 5409 } 5410 else 5411 { 5412 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5413 } 5414 5415 #if DEBUG_MEMC_CLEANUP 5416 if(m_debug) 5417 std::cout << " <MEMC " << name() << " CLEANUP_WRITE_RSP>" 5418 << " Send a response to a previous write request: " 5419 << " rsrcid = " << std::hex << r_cleanup_write_srcid.read() 5420 << " / rtrdid = " << r_cleanup_write_trdid.read() 5421 << " / rpktid = " << r_cleanup_write_pktid.read() << std::endl; 5422 #endif 5423 break; 5424 } 5425 5426 /*ODCCP*/ 5427 case CLEANUP_IXR_REQ: 5428 { 5429 5430 //Send a request to the ixr to write the data in the XRAM and set an entry in the TRT. 5431 if (r_alloc_trt_fsm.read() == ALLOC_TRT_CLEANUP) 5432 { 5433 if(!r_cleanup_to_ixr_cmd_req.read()) 5434 { 5435 size_t index = 0; 5436 bool hit = m_trt.hit_write(r_cleanup_nline.read(), &index); // we save the index of the matching entry in TRT 5437 #if ODCCP_NON_INCLUSIVE 5438 if (!hit) 5439 { 5440 for(size_t i = 0; i < m_words; i++){ 5441 r_cleanup_to_ixr_cmd_data[i] = r_cleanup_data[i]; 5442 } 5443 r_cleanup_to_ixr_cmd_req = r_cleanup_contains_data.read(); 5444 r_cleanup_to_ixr_cmd_srcid = r_cleanup_srcid.read(); 5445 r_cleanup_to_ixr_cmd_index = m_trt_lines; 5446 r_cleanup_to_ixr_cmd_pktid = r_cleanup_pktid.read(); 5447 r_cleanup_to_ixr_cmd_nline = r_cleanup_nline.read(); 5448 //r_cleanup_to_ixr_cmd_l1_dirty_ncc = true; 5449 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5450 } 5451 else // wait until inval done 5452 { 5453 r_cleanup_fsm = CLEANUP_WAIT; 5454 } 5455 5456 #else 5457 if (!hit) 5458 { 5459 std::cout << "assert on line " << r_cleanup_nline.read() << " | at cycle " << std::dec <<m_cpt_cycles << std::endl; 5460 for (size_t i = 0; i < m_trt_lines; i++) m_trt.print(i); 5461 } 5462 assert (hit and "CLEANUP_IXR_REQ found no matching entry in TRT"); 5463 5464 r_cleanup_to_ixr_cmd_req = true; 5465 5466 if (r_cleanup_contains_data.read()) 5467 { 5468 std::vector<data_t> data_vector; 5469 data_vector.clear(); 5470 5471 for(size_t i=0; i<m_words; i++) 5472 { 5473 data_vector.push_back(r_cleanup_data[i]); 5474 } 5475 5476 m_trt.set(index, 5477 false, // write to XRAM 5478 r_cleanup_nline.read(), // line index 5479 0, 5480 0, 5481 0, 5482 false, 5483 0, 5484 0, 5485 std::vector<be_t> (m_words,0), 5486 data_vector); 5487 } 5488 r_cleanup_to_ixr_cmd_srcid = r_cleanup_srcid.read(); 5489 r_cleanup_to_ixr_cmd_index = index; 5490 r_cleanup_to_ixr_cmd_pktid = r_cleanup_pktid.read(); 5491 r_cleanup_to_ixr_cmd_nline = r_cleanup_nline.read(); 5492 //r_cleanup_to_ixr_cmd_l1_dirty_ncc = r_cleanup_contains_data.read(); 5493 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5494 #endif 5495 5496 #if DEBUG_MEMC_CLEANUP 5497 if(m_debug) 5498 { 5499 std::cout 5500 << " <MEMC " << name() 5501 << " CLEANUP_IXR_REQ> Send a put request to the ixr:" 5502 << " contains data ? = " << std::dec << r_cleanup_contains_data.read() 5503 << " srcid = " << std::dec << r_cleanup_srcid.read() 5504 << " pktid = " << std::dec << r_cleanup_pktid.read() 5505 << " trdid = " << std::dec << index 5506 << " nline = " << std::hex << r_cleanup_nline.read() << std::dec 5507 << std::endl; 5508 } 5509 #endif 5510 } 5511 else 5512 { 5513 r_cleanup_fsm = CLEANUP_WAIT; 5514 } 5515 } 5516 break; 5517 } 5518 5519 case CLEANUP_WAIT : 5520 { 5521 r_cleanup_fsm = CLEANUP_IXR_REQ; 5522 break; 5523 } 5524 5525 //////////////////////// 5526 case CLEANUP_SEND_CLACK: // acknowledgement to a cleanup command 5527 // on the coherence CLACK network. 5528 { 5529 if(not p_dspin_clack.read) break; 5530 5531 r_cleanup_fsm = CLEANUP_IDLE; 5532 5533 #if DEBUG_MEMC_CLEANUP 5534 if(m_debug) 5535 std::cout << " <MEMC " << name() 5536 << " CLEANUP_SEND_CLACK> Send the response to a cleanup request:" 5537 << " address = " << std::hex << r_cleanup_nline.read()*m_words*4 5538 << " / way = " << std::dec << r_cleanup_way.read() 5539 << " / srcid = " << std::dec << r_cleanup_srcid.read() 5540 << std::endl; 5541 #endif 5542 break; 5543 } 5544 } // end switch cleanup fsm 5545 5546 //////////////////////////////////////////////////////////////////////////////////// 5547 // CAS FSM 5548 //////////////////////////////////////////////////////////////////////////////////// 5549 // The CAS FSM handles the CAS (Compare And Swap) atomic commands. 5550 // 5551 // This command contains two or four flits: 5552 // - In case of 32 bits atomic access, the first flit contains the value read 5553 // by a previous READ instruction, the second flit contains the value to be writen. 5554 // - In case of 64 bits atomic access, the 2 first flits contains the value read 5555 // by a previous READ instruction, the 2 next flits contains the value to be writen. 5556 // 5557 // The target address is cachable. If it is replicated in other L1 caches 5558 // than the writer, a coherence operation is done. 5559 // 5560 // It access the directory to check hit / miss. 5561 // - In case of miss, the CAS FSM must register a GET transaction in TRT. 5562 // If a read transaction to the XRAM for this line already exists, 5563 // or if the transaction table is full, it goes to the WAIT state 5564 // to release the locks and try again. When the GET transaction has been 5565 // launched, it goes to the WAIT state and try again. 5566 // The CAS request is not consumed in the FIFO until a HIT is obtained. 5567 // - In case of hit... 5568 /////////////////////////////////////////////////////////////////////////////////// 5569 5570 //std::cout << std::endl << "cas_fsm" << std::endl; 5571 5572 switch(r_cas_fsm.read()) 5573 { 5574 //////////// 5575 case CAS_IDLE: // fill the local rdata buffers 5576 { 5577 if (m_cmd_cas_addr_fifo.rok() ) 5578 { 5579 5580 #if DEBUG_MEMC_CAS 5581 if(m_debug) 5582 std::cout << " <MEMC " << name() << " CAS_IDLE> CAS command: " << std::hex 5583 << " srcid = " << std::dec << m_cmd_cas_srcid_fifo.read() 5584 << " addr = " << std::hex << m_cmd_cas_addr_fifo.read() 5585 << " wdata = " << m_cmd_cas_wdata_fifo.read() 5586 << " eop = " << std::dec << m_cmd_cas_eop_fifo.read() 5587 << " cpt = " << std::dec << r_cas_cpt.read() << std::endl; 5588 #endif 5589 if(m_cmd_cas_eop_fifo.read()) 5590 { 5591 m_cpt_cas++; 5592 r_cas_fsm = CAS_DIR_REQ; 5593 } 5594 else // we keep the last word in the FIFO 5595 { 5596 cmd_cas_fifo_get = true; 5597 } 5598 5599 // We fill the two buffers 5600 if(r_cas_cpt.read() < 2) // 32 bits access 5601 r_cas_rdata[r_cas_cpt.read()] = m_cmd_cas_wdata_fifo.read(); 5602 5603 if((r_cas_cpt.read() == 1) and m_cmd_cas_eop_fifo.read()) 5604 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 5605 5606 assert( (r_cas_cpt.read() <= 3) and // no more than 4 flits... 5607 "MEMC ERROR in CAS_IDLE state: illegal CAS command"); 5608 5609 if(r_cas_cpt.read() ==2) 5610 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 5611 5612 r_cas_cpt = r_cas_cpt.read() +1; 5613 } 5614 break; 5615 } 5616 ///////////////// 5617 case CAS_DIR_REQ: 5618 { 5619 if(r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) 5620 { 5621 r_cas_fsm = CAS_DIR_LOCK; 5622 } 5623 5624 #if DEBUG_MEMC_CAS 5625 if(m_debug) 5626 std::cout << " <MEMC " << name() << " CAS_DIR_REQ> Requesting DIR lock " << std::endl; 5627 #endif 5628 break; 5629 } 5630 ///////////////// 5631 case CAS_DIR_LOCK: // Read the directory 5632 { 5633 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5634 "MEMC ERROR in CAS_DIR_LOCK: Bad DIR allocation"); 5635 5636 size_t way = 0; 5637 DirectoryEntry entry(m_cache_directory.read(m_cmd_cas_addr_fifo.read(), way)); 5638 5639 r_cas_is_cnt = entry.is_cnt; 5640 r_cas_dirty = entry.dirty; 5641 r_cas_tag = entry.tag; 5642 r_cas_way = way; 5643 r_cas_copy = entry.owner.srcid; 5644 r_cas_copy_inst = entry.owner.inst; 5645 r_cas_ptr = entry.ptr; 5646 r_cas_count = entry.count; 5647 5648 if(entry.valid) r_cas_fsm = CAS_DIR_HIT_READ; 5649 else r_cas_fsm = CAS_MISS_TRT_LOCK; 5650 5651 #if DEBUG_MEMC_CAS 5652 if(m_debug) 5653 std::cout << " <MEMC " << name() << " CAS_DIR_LOCK> Directory acces" 5654 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 5655 << " / hit = " << std::dec << entry.valid 5656 << " / count = " << entry.count 5657 << " / is_cnt = " << entry.is_cnt << std::endl; 5658 #endif 5659 5660 break; 5661 } 5662 ///////////////////// 5663 case CAS_DIR_HIT_READ: // update directory for lock and dirty bit 5664 // and check data change in cache 5665 { 5666 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5667 "MEMC ERROR in CAS_DIR_HIT_READ: Bad DIR allocation"); 5668 5669 size_t way = r_cas_way.read(); 5670 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5671 5672 // update directory (lock & dirty bits) 5673 DirectoryEntry entry; 5674 entry.valid = true; 5675 entry.is_cnt = r_cas_is_cnt.read(); 5676 entry.dirty = true; 5677 entry.lock = true; 5678 entry.tag = r_cas_tag.read(); 5679 entry.owner.srcid = r_cas_copy.read(); 5680 entry.owner.inst = r_cas_copy_inst.read(); 5681 entry.count = r_cas_count.read(); 5682 entry.ptr = r_cas_ptr.read(); 5683 5684 m_cache_directory.write(set, way, entry); 5685 5686 // Store data from cache in buffer to do the comparison in next state 5687 m_cache_data.read_line(way, set, r_cas_data); 5688 5689 r_cas_fsm = CAS_DIR_HIT_COMPARE; 5690 5691 #if DEBUG_MEMC_CAS 5692 if(m_debug) 5693 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_READ> Read data from " 5694 << " cache and store it in buffer" << std::endl; 5695 #endif 5696 break; 5697 } 5698 //////////////////////// 5699 case CAS_DIR_HIT_COMPARE: 5700 { 5701 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5702 5703 // check data change 5704 bool ok = (r_cas_rdata[0].read() == r_cas_data[word].read()); 5705 5706 if(r_cas_cpt.read() == 4) // 64 bits CAS 5707 ok &= (r_cas_rdata[1] == r_cas_data[word+1]); 5708 5709 // to avoid livelock, force the atomic access to fail pseudo-randomly 5710 bool forced_fail = ((r_cas_lfsr % (64) == 0) and RANDOMIZE_CAS); 5711 r_cas_lfsr = (r_cas_lfsr >> 1) ^ ((- (r_cas_lfsr & 1)) & 0xd0000001); 5712 5713 if(ok and not forced_fail) r_cas_fsm = CAS_DIR_HIT_WRITE; 5714 else r_cas_fsm = CAS_RSP_FAIL; 5715 5716 #if DEBUG_MEMC_CAS 5717 if(m_debug) 5718 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_COMPARE> Compare old and new data" 5719 << " / expected value = " << std::hex << r_cas_rdata[0].read() 5720 << " / actual value = " << std::hex << r_cas_data[word].read() 5721 << " / forced_fail = " << std::dec << forced_fail << std::endl; 5722 #endif 5723 break; 5724 } 5725 ////////////////////// 5726 case CAS_DIR_HIT_WRITE: // test if a CC transaction is required 5727 // write data in cache if no CC request 5728 { 5729 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5730 "MEMC ERROR in CAS_DIR_HIT_WRITE: Bad DIR allocation"); 5731 5732 // The CAS is a success => sw access to the llsc_global_table 5733 m_llsc_table.sw(m_cmd_cas_addr_fifo.read(), m_cmd_cas_addr_fifo.read()); 5734 5735 // test coherence request 5736 if(r_cas_count.read()) // replicated line 5737 { 5738 if(r_cas_is_cnt.read()) 5739 { 5740 r_cas_fsm = CAS_BC_TRT_LOCK; // broadcast invalidate required 5741 5742 #if DEBUG_MEMC_CAS 5743 if(m_debug) 5744 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 5745 << " Broacast Inval required" 5746 << " / copies = " << r_cas_count.read() << std::endl; 5747 #endif 5748 } 5749 else if( not r_cas_to_cc_send_multi_req.read() and 5750 not r_cas_to_cc_send_brdcast_req.read() ) 5751 { 5752 r_cas_fsm = CAS_UPT_LOCK; // multi update required 5753 5754 #if DEBUG_MEMC_CAS 5755 if(m_debug) 5756 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 5757 << " Multi Inval required" 5758 << " / copies = " << r_cas_count.read() << std::endl; 5759 #endif 5760 } 5761 else 5762 { 5763 r_cas_fsm = CAS_WAIT; 5764 5765 #if DEBUG_MEMC_CAS 5766 if(m_debug) 5767 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 5768 << " CC_SEND FSM busy: release all locks and retry" << std::endl; 5769 #endif 5770 } 5771 } 5772 else // no copies 5773 { 5774 size_t way = r_cas_way.read(); 5775 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5776 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5777 5778 // cache update 5779 m_cache_data.write(way, set, word, r_cas_wdata.read()); 5780 if(r_cas_cpt.read() == 4) 5781 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 5782 5783 r_cas_fsm = CAS_RSP_SUCCESS; 5784 5785 #if DEBUG_MEMC_CAS 5786 if(m_debug) 5787 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE> Update cache:" 5788 << " way = " << std::dec << way 5789 << " / set = " << set 5790 << " / word = " << word 5791 << " / value = " << r_cas_wdata.read() 5792 << " / count = " << r_cas_count.read() 5793 << " / global_llsc_table access" << std::endl; 5794 #endif 5795 } 5796 break; 5797 } 5798 ///////////////// 5799 case CAS_UPT_LOCK: // try to register the transaction in UPT 5800 // and write data in cache if successful registration 5801 // releases locks to retry later if UPT full 5802 { 5803 if(r_alloc_upt_fsm.read() == ALLOC_UPT_CAS) 5804 { 5805 bool wok = false; 5806 size_t index = 0; 5807 size_t srcid = m_cmd_cas_srcid_fifo.read(); 5808 size_t trdid = m_cmd_cas_trdid_fifo.read(); 5809 size_t pktid = m_cmd_cas_pktid_fifo.read(); 5810 addr_t nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5811 size_t nb_copies = r_cas_count.read(); 5812 5813 wok = m_upt.set( true, // it's an update transaction 5814 false, // it's not a broadcast 5815 true, // response required 5816 false, // no acknowledge required 5817 srcid, 5818 trdid, 5819 pktid, 5820 nline, 5821 nb_copies, 5822 index); 5823 if(wok) // coherence transaction registered in UPT 5824 { 5825 // cache update 5826 size_t way = r_cas_way.read(); 5827 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5828 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5829 5830 m_cache_data.write(way, set, word, r_cas_wdata.read()); 5831 if(r_cas_cpt.read() ==4) 5832 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 5833 5834 r_cas_upt_index = index; 5835 r_cas_fsm = CAS_UPT_HEAP_LOCK; 5836 } 5837 else // releases the locks protecting UPT and DIR UPT full 5838 { 5839 r_cas_fsm = CAS_WAIT; 5840 } 5841 5842 #if DEBUG_MEMC_CAS 5843 if(m_debug) 5844 std::cout << " <MEMC " << name() 5845 << " CAS_UPT_LOCK> Register multi-update transaction in UPT" 5846 << " / wok = " << wok 5847 << " / address = " << std::hex << nline*m_words*4 5848 << " / count = " << nb_copies << std::endl; 5849 #endif 5850 } 5851 break; 5852 } 5853 ///////////// 5854 case CAS_WAIT: // release all locks and retry from beginning 5855 { 5856 5857 #if DEBUG_MEMC_CAS 5858 if(m_debug) 5859 std::cout << " <MEMC " << name() << " CAS_WAIT> Release all locks" << std::endl; 5860 #endif 5861 r_cas_fsm = CAS_DIR_REQ; 5862 break; 5863 } 5864 ////////////////////// 5865 case CAS_UPT_HEAP_LOCK: // lock the heap 5866 { 5867 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) 5868 { 5869 5870 #if DEBUG_MEMC_CAS 5871 if(m_debug) 5872 { 5873 std::cout << " <MEMC " << name() 5874 << " CAS_UPT_HEAP_LOCK> Get access to the heap" << std::endl; 5875 } 5876 #endif 5877 r_cas_fsm = CAS_UPT_REQ; 5878 m_cpt_cas_fsm_n_heap_lock++; 5879 } 5880 5881 m_cpt_cas_fsm_heap_lock++; 5882 5883 break; 5884 } 5885 //////////////// 5886 case CAS_UPT_REQ: // send a first update request to CC_SEND FSM 5887 { 5888 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) and 5889 "VCI_MEM_CACHE ERROR : bad HEAP allocation"); 5890 5891 if(!r_cas_to_cc_send_multi_req.read() and !r_cas_to_cc_send_brdcast_req.read()) 5892 { 5893 r_cas_to_cc_send_brdcast_req = false; 5894 r_cas_to_cc_send_trdid = r_cas_upt_index.read(); 5895 r_cas_to_cc_send_nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5896 r_cas_to_cc_send_index = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5897 r_cas_to_cc_send_wdata = r_cas_wdata.read(); 5898 5899 if(r_cas_cpt.read() == 4) 5900 { 5901 r_cas_to_cc_send_is_long = true; 5902 r_cas_to_cc_send_wdata_high = m_cmd_cas_wdata_fifo.read(); 5903 } 5904 else 5905 { 5906 r_cas_to_cc_send_is_long = false; 5907 r_cas_to_cc_send_wdata_high = 0; 5908 } 5909 5910 // We put the first copy in the fifo 5911 cas_to_cc_send_fifo_put = true; 5912 cas_to_cc_send_fifo_inst = r_cas_copy_inst.read(); 5913 cas_to_cc_send_fifo_srcid = r_cas_copy.read(); 5914 if(r_cas_count.read() == 1) // one single copy 5915 { 5916 r_cas_fsm = CAS_IDLE; // Response will be sent after receiving 5917 // update responses 5918 cmd_cas_fifo_get = true; 5919 r_cas_to_cc_send_multi_req = true; 5920 r_cas_cpt = 0; 5921 } 5922 else // several copies 5923 { 5924 r_cas_fsm = CAS_UPT_NEXT; 5925 } 5926 5927 #if DEBUG_MEMC_CAS 5928 if(m_debug) 5929 { 5930 std::cout << " <MEMC " << name() << " CAS_UPT_REQ> Send the first update request to CC_SEND FSM " 5931 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 5932 << " / wdata = " << std::hex << r_cas_wdata.read() 5933 << " / srcid = " << std::dec << r_cas_copy.read() 5934 << " / inst = " << std::dec << r_cas_copy_inst.read() << std::endl; 5935 } 5936 #endif 5937 } 5938 break; 5939 } 5940 ///////////////// 5941 case CAS_UPT_NEXT: // send a multi-update request to CC_SEND FSM 5942 { 5943 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) 5944 and "VCI_MEM_CACHE ERROR : bad HEAP allocation"); 5945 5946 HeapEntry entry = m_heap.read(r_cas_ptr.read()); 5947 cas_to_cc_send_fifo_srcid = entry.owner.srcid; 5948 cas_to_cc_send_fifo_inst = entry.owner.inst; 5949 cas_to_cc_send_fifo_put = true; 5950 5951 if(m_cas_to_cc_send_inst_fifo.wok()) // request accepted by CC_SEND FSM 5952 { 5953 r_cas_ptr = entry.next; 5954 if(entry.next == r_cas_ptr.read()) // last copy 5955 { 5956 r_cas_to_cc_send_multi_req = true; 5957 r_cas_fsm = CAS_IDLE; // Response will be sent after receiving 5958 // all update responses 5959 cmd_cas_fifo_get = true; 5960 r_cas_cpt = 0; 5961 } 5962 } 5963 5964 #if DEBUG_MEMC_CAS 5965 if(m_debug) 5966 { 5967 std::cout << " <MEMC " << name() << " CAS_UPT_NEXT> Send the next update request to CC_SEND FSM " 5968 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 5969 << " / wdata = " << std::hex << r_cas_wdata.read() 5970 << " / srcid = " << std::dec << entry.owner.srcid 5971 << " / inst = " << std::dec << entry.owner.inst << std::endl; 5972 } 5973 #endif 5974 break; 5975 } 5976 ///////////////////// 5977 case CAS_BC_TRT_LOCK: // get TRT lock to check TRT not full 5978 { 5979 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5980 "MEMC ERROR in CAS_BC_TRT_LOCK state: Bas DIR allocation"); 5981 5982 if(r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 5983 { 5984 size_t wok_index = 0; 5985 bool wok = !m_trt.full(wok_index); 5986 if( wok ) 5987 { 5988 r_cas_trt_index = wok_index; 5989 r_cas_fsm = CAS_BC_IVT_LOCK; 5990 } 5991 else 5992 { 5993 r_cas_fsm = CAS_WAIT; 5994 } 5995 5996 #if DEBUG_MEMC_CAS 5997 if(m_debug) 5998 std::cout << " <MEMC " << name() << " CAS_BC_TRT_LOCK> Check TRT" 5999 << " : wok = " << wok << " / index = " << wok_index << std::endl; 6000 #endif 6001 } 6002 break; 6003 } 6004 ///////////////////// 6005 case CAS_BC_IVT_LOCK: // get IVT lock and register BC transaction in IVT 6006 { 6007 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 6008 "MEMC ERROR in CAS_BC_IVT_LOCK state: Bas DIR allocation"); 6009 6010 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 6011 "MEMC ERROR in CAS_BC_IVT_LOCK state: Bas TRT allocation"); 6012 6013 if( r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS ) 6014 { 6015 // register broadcast inval transaction in IVT 6016 bool wok = false; 6017 size_t index = 0; 6018 size_t srcid = m_cmd_cas_srcid_fifo.read(); 6019 size_t trdid = m_cmd_cas_trdid_fifo.read(); 6020 size_t pktid = m_cmd_cas_pktid_fifo.read(); 6021 addr_t nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6022 size_t nb_copies = r_cas_count.read(); 6023 6024 wok = m_ivt.set( false, // it's an inval transaction 6025 true, // it's a broadcast 6026 true, // response required 6027 false, // no acknowledge required 6028 srcid, 6029 trdid, 6030 pktid, 6031 nline, 6032 nb_copies, 6033 index); 6034 #if DEBUG_MEMC_CAS 6035 if( m_debug and wok ) 6036 std::cout << " <MEMC " << name() << " CAS_BC_IVT_LOCK> Register broadcast inval in IVT" 6037 << " / copies = " << r_cas_count.read() << std::endl; 6038 #endif 6039 r_cas_upt_index = index; 6040 if( wok ) r_cas_fsm = CAS_BC_DIR_INVAL; 6041 else r_cas_fsm = CAS_WAIT; 6042 } 6043 break; 6044 } 6045 ////////////////////// 6046 case CAS_BC_DIR_INVAL: // Register PUT transaction in TRT, 6047 // and inval the DIR entry 6048 { 6049 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 6050 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad DIR allocation"); 6051 6052 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 6053 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad TRT allocation"); 6054 6055 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS) and 6056 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad IVT allocation"); 6057 6058 // set TRT 6059 std::vector<data_t> data_vector; 6060 data_vector.clear(); 6061 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6062 for(size_t i=0; i<m_words; i++) 6063 { 6064 if(i == word) // first modified word 6065 data_vector.push_back( r_cas_wdata.read() ); 6066 else if((i == word+1) and (r_cas_cpt.read() == 4)) // second modified word 6067 data_vector.push_back( m_cmd_cas_wdata_fifo.read() ); 6068 else // unmodified words 6069 data_vector.push_back( r_cas_data[i].read() ); 6070 } 6071 m_trt.set( r_cas_trt_index.read(), 6072 false, // PUT request 6073 m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())], 6074 0, 6075 0, 6076 0, 6077 false, // not a processor read 6078 0, 6079 0, 6080 std::vector<be_t> (m_words,0), 6081 data_vector ); 6082 6083 // invalidate directory entry 6084 DirectoryEntry entry; 6085 entry.valid = false; 6086 entry.dirty = false; 6087 entry.tag = 0; 6088 entry.is_cnt = false; 6089 entry.lock = false; 6090 entry.count = 0; 6091 entry.owner.srcid = 0; 6092 entry.owner.inst = false; 6093 entry.ptr = 0; 6094 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6095 size_t way = r_cas_way.read(); 6096 6097 m_cache_directory.write(set, way, entry); 6098 6099 r_cas_fsm = CAS_BC_CC_SEND; 6100 6101 #if DEBUG_MEMC_CAS 6102 if(m_debug) 6103 std::cout << " <MEMC " << name() << " CAS_BC_DIR_INVAL> Inval DIR & register in TRT:" 6104 << " address = " << m_cmd_cas_addr_fifo.read() << std::endl; 6105 #endif 6106 break; 6107 } 6108 /////////////////// 6109 case CAS_BC_CC_SEND: // Request the broadcast inval to CC_SEND FSM 6110 { 6111 if( not r_cas_to_cc_send_multi_req.read() and 6112 not r_cas_to_cc_send_brdcast_req.read() ) 6113 { 6114 r_cas_to_cc_send_multi_req = false; 6115 r_cas_to_cc_send_brdcast_req = true; 6116 r_cas_to_cc_send_trdid = r_cas_upt_index.read(); 6117 r_cas_to_cc_send_nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6118 r_cas_to_cc_send_index = 0; 6119 r_cas_to_cc_send_wdata = 0; 6120 6121 r_cas_fsm = CAS_BC_XRAM_REQ; 6122 6123 #if DEBUG_MEMC_CAS 6124 if(m_debug) 6125 std::cout << " <MEMC " << name() 6126 << " CAS_BC_CC_SEND> Post a broadcast request to CC_SEND FSM" << std::endl; 6127 #endif 6128 } 6129 break; 6130 } 6131 //////////////////// 6132 case CAS_BC_XRAM_REQ: // request the IXR FSM to start a PUT transaction 6133 { 6134 if( not r_cas_to_ixr_cmd_req.read() ) 6135 { 6136 r_cas_to_ixr_cmd_req = true; 6137 r_cas_to_ixr_cmd_put = true; 6138 r_cas_to_ixr_cmd_index = r_cas_trt_index.read(); 6139 r_cas_fsm = CAS_IDLE; 6140 cmd_cas_fifo_get = true; 6141 r_cas_cpt = 0; 6142 6143 #if DEBUG_MEMC_CAS 6144 if(m_debug) 6145 std::cout << " <MEMC " << name() 6146 << " CAS_BC_XRAM_REQ> Request a PUT transaction to IXR_CMD FSM" << std::hex 6147 << " / address = " << (addr_t) m_cmd_cas_addr_fifo.read() 6148 << " / trt_index = " << r_cas_trt_index.read() << std::endl; 6149 #endif 6150 } 6151 break; 6152 } 6153 ///////////////// 6154 case CAS_RSP_FAIL: // request TGT_RSP FSM to send a failure response 6155 { 6156 if( not r_cas_to_tgt_rsp_req.read() ) 6157 { 6158 cmd_cas_fifo_get = true; 6159 r_cas_cpt = 0; 6160 r_cas_to_tgt_rsp_req = true; 6161 r_cas_to_tgt_rsp_data = 1; 6162 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read(); 6163 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read(); 6164 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read(); 6165 r_cas_fsm = CAS_IDLE; 6166 6167 #if DEBUG_MEMC_CAS 6168 if(m_debug) 6169 std::cout << " <MEMC " << name() 6170 << " CAS_RSP_FAIL> Request TGT_RSP to send a failure response" << std::endl; 6171 #endif 6172 } 6173 break; 6174 } 6175 //////////////////// 6176 case CAS_RSP_SUCCESS: // request TGT_RSP FSM to send a success response 6177 { 6178 if( not r_cas_to_tgt_rsp_req.read() ) 6179 { 6180 cmd_cas_fifo_get = true; 6181 r_cas_cpt = 0; 6182 r_cas_to_tgt_rsp_req = true; 6183 r_cas_to_tgt_rsp_data = 0; 6184 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read(); 6185 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read(); 6186 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read(); 6187 r_cas_fsm = CAS_IDLE; 6188 6189 #if DEBUG_MEMC_CAS 6190 if(m_debug) 6191 std::cout << " <MEMC " << name() 6192 << " CAS_RSP_SUCCESS> Request TGT_RSP to send a success response" << std::endl; 6193 #endif 6194 } 6195 break; 6196 } 6197 /////////////////////// 6198 case CAS_MISS_TRT_LOCK: // cache miss : request access to transaction Table 6199 { 6200 if(r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 6201 { 6202 size_t index = 0; 6203 bool hit_read = m_trt.hit_read( 6204 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()],index); 6205 bool hit_write = m_trt.hit_write( 6206 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()]); 6207 bool wok = not m_trt.full(index); 6208 6209 #if DEBUG_MEMC_CAS 6210 if(m_debug) 6211 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_LOCK> Check TRT state" 6212 << " / hit_read = " << hit_read 6213 << " / hit_write = " << hit_write 6214 << " / wok = " << wok 6215 << " / index = " << index << std::endl; 6216 #endif 6217 6218 if(hit_read or !wok or hit_write) // missing line already requested or TRT full 6219 { 6220 r_cas_fsm = CAS_WAIT; 6221 } 6222 else 6223 { 6224 r_cas_trt_index = index; 6225 r_cas_fsm = CAS_MISS_TRT_SET; 6226 } 6227 } 6228 break; 6229 } 6230 ////////////////////// 6231 case CAS_MISS_TRT_SET: // register the GET transaction in TRT 6232 { 6233 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 6234 "MEMC ERROR in CAS_MISS_TRT_SET state: Bad TRT allocation"); 6235 6236 std::vector<be_t> be_vector; 6237 std::vector<data_t> data_vector; 6238 be_vector.clear(); 6239 data_vector.clear(); 6240 for(size_t i=0; i<m_words; i++) 6241 { 6242 be_vector.push_back(0); 6243 data_vector.push_back(0); 6244 } 6245 6246 m_trt.set( r_cas_trt_index.read(), 6247 true, // GET 6248 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()], 6249 m_cmd_cas_srcid_fifo.read(), 6250 m_cmd_cas_trdid_fifo.read(), 6251 m_cmd_cas_pktid_fifo.read(), 6252 false, // write request from processor 6253 0, 6254 0, 6255 std::vector<be_t>(m_words,0), 6256 std::vector<data_t>(m_words,0) ); 6257 6258 r_cas_fsm = CAS_MISS_XRAM_REQ; 6259 6260 #if DEBUG_MEMC_CAS 6261 if(m_debug) 6262 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_SET> Register GET transaction in TRT" 6263 << " / address = " << std::hex << (addr_t)m_cmd_cas_addr_fifo.read() 6264 << " / trt_index = " << std::dec << r_cas_trt_index.read() << std::endl; 6265 #endif 6266 break; 6267 } 6268 ////////////////////// 6269 case CAS_MISS_XRAM_REQ: // request the IXR_CMD FSM a GET request 6270 { 6271 if( not r_cas_to_ixr_cmd_req.read() ) 6272 { 6273 r_cas_to_ixr_cmd_req = true; 6274 r_cas_to_ixr_cmd_put = false; 6275 r_cas_to_ixr_cmd_index = r_cas_trt_index.read(); 6276 r_cas_fsm = CAS_WAIT; 6277 6278 #if DEBUG_MEMC_CAS 6279 if(m_debug) 6280 std::cout << " <MEMC " << name() << " CAS_MISS_XRAM_REQ> Request a GET transaction" 6281 << " / address = " << std::hex << (addr_t) m_cmd_cas_addr_fifo.read() 6282 << " / trt_index = " << std::dec << r_cas_trt_index.read() << std::endl; 6283 #endif 6284 } 6285 break; 6286 } 6287 } // end switch r_cas_fsm 6288 6289 6290 ////////////////////////////////////////////////////////////////////////////// 6291 // CC_SEND FSM 6292 ////////////////////////////////////////////////////////////////////////////// 6293 // The CC_SEND fsm controls the DSPIN initiator port on the coherence 6294 // network, used to update or invalidate cache lines in L1 caches. 6295 // 6296 // It implements a round-robin priority between the four possible client FSMs 6297 // XRAM_RSP > CAS > WRITE > CONFIG 6298 // 6299 // Each FSM can request the next services: 6300 // - r_xram_rsp_to_cc_send_multi_req : multi-inval 6301 // r_xram_rsp_to_cc_send_brdcast_req : broadcast-inval 6302 // - r_write_to_cc_send_multi_req : multi-update 6303 // r_write_to_cc_send_brdcast_req : broadcast-inval 6304 // - r_cas_to_cc_send_multi_req : multi-update 6305 // r_cas_to_cc_send_brdcast_req : broadcast-inval 6306 // - r_config_to_cc_send_multi_req : multi-inval 6307 // r_config_to_cc_send_brdcast_req : broadcast-inval 6308 // 6309 // An inval request is a double DSPIN flit command containing: 6310 // 1. the index of the line to be invalidated. 6311 // 6312 // An update request is a multi-flit DSPIN command containing: 6313 // 1. the index of the cache line to be updated. 6314 // 2. the index of the first modified word in the line. 6315 // 3. the data to update 6316 /////////////////////////////////////////////////////////////////////////////// 6317 6318 //std::cout << std::endl << "cc_send_fsm" << std::endl; 6319 6320 switch(r_cc_send_fsm.read()) 6321 { 6322 ///////////////////////// 6323 case CC_SEND_CONFIG_IDLE: // XRAM_RSP FSM has highest priority 6324 { 6325 // XRAM_RSP 6326 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 6327 r_xram_rsp_to_cc_send_multi_req.read()) 6328 { 6329 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 6330 m_cpt_inval++; 6331 break; 6332 } 6333 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 6334 { 6335 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 6336 m_cpt_inval++; 6337 break; 6338 } 6339 // CAS 6340 if(m_cas_to_cc_send_inst_fifo.rok() or 6341 r_cas_to_cc_send_multi_req.read()) 6342 { 6343 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6344 m_cpt_update++; 6345 break; 6346 } 6347 if(r_cas_to_cc_send_brdcast_req.read()) 6348 { 6349 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 6350 m_cpt_inval++; 6351 break; 6352 } 6353 // WRITE 6354 if(m_write_to_cc_send_inst_fifo.rok() or 6355 r_write_to_cc_send_multi_req.read()) 6356 { 6357 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 6358 m_cpt_update++; 6359 break; 6360 } 6361 if(r_write_to_cc_send_brdcast_req.read()) 6362 { 6363 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 6364 m_cpt_inval++; 6365 break; 6366 } 6367 // CONFIG 6368 if(r_config_to_cc_send_multi_req.read()) 6369 { 6370 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 6371 m_cpt_inval++; 6372 break; 6373 } 6374 if(r_config_to_cc_send_brdcast_req.read()) 6375 { 6376 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 6377 m_cpt_inval++; 6378 break; 6379 } 6380 break; 6381 } 6382 //////////////////////// 6383 case CC_SEND_WRITE_IDLE: // CONFIG FSM has highest priority 6384 { 6385 // CONFIG 6386 if(r_config_to_cc_send_multi_req.read()) 6387 { 6388 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 6389 m_cpt_inval++; 6390 break; 6391 } 6392 if(r_config_to_cc_send_brdcast_req.read()) 6393 { 6394 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 6395 m_cpt_inval++; 6396 break; 6397 } 6398 // XRAM_RSP 6399 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 6400 r_xram_rsp_to_cc_send_multi_req.read()) 6401 { 6402 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 6403 m_cpt_inval++; 6404 break; 6405 } 6406 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 6407 { 6408 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 6409 m_cpt_inval++; 6410 break; 6411 } 6412 // CAS 6413 if(m_cas_to_cc_send_inst_fifo.rok() or 6414 r_cas_to_cc_send_multi_req.read()) 6415 { 6416 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6417 m_cpt_update++; 6418 break; 6419 } 6420 if(r_cas_to_cc_send_brdcast_req.read()) 6421 { 6422 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 6423 m_cpt_inval++; 6424 break; 6425 } 6426 // WRITE 6427 if(m_write_to_cc_send_inst_fifo.rok() or 6428 r_write_to_cc_send_multi_req.read()) 6429 { 6430 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 6431 m_cpt_update++; 6432 break; 6433 } 6434 if(r_write_to_cc_send_brdcast_req.read()) 6435 { 6436 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 6437 m_cpt_inval++; 6438 break; 6439 } 6440 break; 6441 } 6442 /////////////////////////// 6443 case CC_SEND_XRAM_RSP_IDLE: // CAS FSM has highest priority 6444 { 6445 // CAS 6446 if(m_cas_to_cc_send_inst_fifo.rok() or 6447 r_cas_to_cc_send_multi_req.read()) 6448 { 6449 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6450 m_cpt_update++; 6451 break; 6452 } 6453 if(r_cas_to_cc_send_brdcast_req.read()) 6454 { 6455 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 6456 m_cpt_inval++; 6457 break; 6458 } 6459 // WRITE 6460 if(m_write_to_cc_send_inst_fifo.rok() or 6461 r_write_to_cc_send_multi_req.read()) 6462 { 6463 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 6464 m_cpt_update++; 6465 break; 6466 } 6467 6468 if(r_write_to_cc_send_brdcast_req.read()) 6469 { 6470 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 6471 m_cpt_inval++; 6472 break; 6473 } 6474 // CONFIG 6475 if(r_config_to_cc_send_multi_req.read()) 6476 { 6477 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 6478 m_cpt_inval++; 6479 break; 6480 } 6481 if(r_config_to_cc_send_brdcast_req.read()) 6482 { 6483 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 6484 m_cpt_inval++; 6485 break; 6486 } 6487 // XRAM_RSP 6488 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 6489 r_xram_rsp_to_cc_send_multi_req.read()) 6490 { 6491 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 6492 m_cpt_inval++; 6493 break; 6494 } 6495 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 6496 { 6497 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 6498 m_cpt_inval++; 6499 break; 6500 } 6501 break; 6502 } 6503 ////////////////////// 6504 case CC_SEND_CAS_IDLE: // CLEANUP FSM has highest priority 6505 { 6506 if(m_write_to_cc_send_inst_fifo.rok() or 6507 r_write_to_cc_send_multi_req.read()) 6508 { 6509 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 6510 m_cpt_update++; 6511 break; 6512 } 6513 if(r_write_to_cc_send_brdcast_req.read()) 6514 { 6515 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 6516 m_cpt_inval++; 6517 break; 6518 } 6519 // CONFIG 6520 if(r_config_to_cc_send_multi_req.read()) 6521 { 6522 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 6523 m_cpt_inval++; 6524 break; 6525 } 6526 if(r_config_to_cc_send_brdcast_req.read()) 6527 { 6528 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 6529 m_cpt_inval++; 6530 break; 6531 } 6532 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 6533 r_xram_rsp_to_cc_send_multi_req.read()) 6534 { 6535 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 6536 m_cpt_inval++; 6537 break; 6538 } 6539 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 6540 { 6541 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 6542 m_cpt_inval++; 6543 break; 6544 } 6545 if(m_cas_to_cc_send_inst_fifo.rok() or 6546 r_cas_to_cc_send_multi_req.read()) 6547 { 6548 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6549 m_cpt_update++; 6550 break; 6551 } 6552 if(r_cas_to_cc_send_brdcast_req.read()) 6553 { 6554 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 6555 m_cpt_inval++; 6556 break; 6557 } 6558 break; 6559 } 6560 ///////////////////////////////// 6561 case CC_SEND_CONFIG_INVAL_HEADER: // send first flit multi-inval (from CONFIG FSM) 6562 { 6563 if(m_config_to_cc_send_inst_fifo.rok()) 6564 { 6565 if(not p_dspin_m2p.read) break; 6566 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_NLINE; 6567 break; 6568 } 6569 if(r_config_to_cc_send_multi_req.read()) r_config_to_cc_send_multi_req = false; 6570 r_cc_send_fsm = CC_SEND_CONFIG_IDLE; 6571 break; 6572 } 6573 //////////////////////////////// 6574 case CC_SEND_CONFIG_INVAL_NLINE: // send second flit multi-inval (from CONFIG FSM) 6575 { 6576 if(not p_dspin_m2p.read) break; 6577 m_cpt_inval_mult++; 6578 config_to_cc_send_fifo_get = true; 6579 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 6580 6581 #if DEBUG_MEMC_CC_SEND 6582 if(m_debug) 6583 std::cout << " <MEMC " << name() 6584 << " CC_SEND_CONFIG_INVAL_NLINE> multi-inval for line " 6585 << std::hex << r_config_to_cc_send_nline.read() << std::endl; 6586 #endif 6587 break; 6588 } 6589 /////////////////////////////////// 6590 case CC_SEND_CONFIG_BRDCAST_HEADER: // send first flit BC-inval (from CONFIG FSM) 6591 { 6592 if(not p_dspin_m2p.read) break; 6593 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_NLINE; 6594 break; 6595 } 6596 ////////////////////////////////// 6597 case CC_SEND_CONFIG_BRDCAST_NLINE: // send second flit BC-inval (from CONFIG FSM) 6598 { 6599 if(not p_dspin_m2p.read) break; 6600 m_cpt_inval_brdcast++; 6601 r_config_to_cc_send_brdcast_req = false; 6602 r_cc_send_fsm = CC_SEND_CONFIG_IDLE; 6603 6604 #if DEBUG_MEMC_CC_SEND 6605 if(m_debug) 6606 std::cout << " <MEMC " << name() 6607 << " CC_SEND_CONFIG_BRDCAST_NLINE> BC-Inval for line " 6608 << std::hex << r_config_to_cc_send_nline.read() << std::endl; 6609 #endif 6610 break; 6611 } 6612 /////////////////////////////////// 6613 case CC_SEND_XRAM_RSP_INVAL_HEADER: // send first flit multi-inval (from XRAM_RSP FSM) 6614 { 6615 if(m_xram_rsp_to_cc_send_inst_fifo.rok()) 6616 { 6617 if(not p_dspin_m2p.read) break; 6618 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_NLINE; 6619 break; 6620 } 6621 if(r_xram_rsp_to_cc_send_multi_req.read()) r_xram_rsp_to_cc_send_multi_req = false; 6622 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 6623 break; 6624 } 6625 ////////////////////////////////// 6626 case CC_SEND_XRAM_RSP_INVAL_NLINE: // send second flit multi-inval (from XRAM_RSP FSM) 6627 { 6628 if(not p_dspin_m2p.read) break; 6629 m_cpt_inval_mult++; 6630 xram_rsp_to_cc_send_fifo_get = true; 6631 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 6632 6633 #if DEBUG_MEMC_CC_SEND 6634 if(m_debug) 6635 std::cout << " <MEMC " << name() 6636 << " CC_SEND_XRAM_RSP_INVAL_NLINE> Multicast-Inval for line " 6637 << std::hex << r_xram_rsp_to_cc_send_nline.read() << std::endl; 6638 #endif 6639 break; 6640 } 6641 ///////////////////////////////////// 6642 case CC_SEND_XRAM_RSP_BRDCAST_HEADER: // send first flit broadcast-inval (from XRAM_RSP FSM) 6643 { 6644 if(not p_dspin_m2p.read) break; 6645 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_NLINE; 6646 break; 6647 } 6648 //////////////////////////////////// 6649 case CC_SEND_XRAM_RSP_BRDCAST_NLINE: // send second flit broadcast-inval (from XRAM_RSP FSM) 6650 { 6651 if(not p_dspin_m2p.read) break; 6652 m_cpt_inval_brdcast++; 6653 r_xram_rsp_to_cc_send_brdcast_req = false; 6654 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 6655 6656 #if DEBUG_MEMC_CC_SEND 6657 if(m_debug) 6658 std::cout << " <MEMC " << name() 6659 << " CC_SEND_XRAM_RSP_BRDCAST_NLINE> BC-Inval for line " 6660 << std::hex << r_xram_rsp_to_cc_send_nline.read() << std::endl; 6661 #endif 6662 break; 6663 } 6664 ////////////////////////////////// 6665 case CC_SEND_WRITE_BRDCAST_HEADER: // send first flit broadcast-inval (from WRITE FSM) 6666 { 6667 if(not p_dspin_m2p.read) break; 6668 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_NLINE; 6669 break; 6670 } 6671 ///////////////////////////////// 6672 case CC_SEND_WRITE_BRDCAST_NLINE: // send second flit broadcast-inval (from WRITE FSM) 6673 { 6674 if(not p_dspin_m2p.read) break; 6675 6676 m_cpt_inval_brdcast++; 6677 6678 r_write_to_cc_send_brdcast_req = false; 6679 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 6680 6681 #if DEBUG_MEMC_CC_SEND 6682 if(m_debug) 6683 std::cout << " <MEMC " << name() 6684 << " CC_SEND_WRITE_BRDCAST_NLINE> BC-Inval for line " 6685 << std::hex << r_write_to_cc_send_nline.read() << std::endl; 6686 #endif 6687 break; 6688 } 6689 /////////////////////////////// 6690 case CC_SEND_WRITE_UPDT_HEADER: // send first flit for a multi-update (from WRITE FSM) 6691 { 6692 if(m_write_to_cc_send_inst_fifo.rok()) 6693 { 6694 if(not p_dspin_m2p.read) break; 6695 6696 r_cc_send_fsm = CC_SEND_WRITE_UPDT_NLINE; 6697 break; 6698 } 6699 6700 if(r_write_to_cc_send_multi_req.read()) 6701 { 6702 r_write_to_cc_send_multi_req = false; 6703 } 6704 6705 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 6706 break; 6707 } 6708 ////////////////////////////// 6709 case CC_SEND_WRITE_UPDT_NLINE: // send second flit for a multi-update (from WRITE FSM) 6710 { 6711 if(not p_dspin_m2p.read) break; 6712 m_cpt_update_mult++; 6713 6714 r_cc_send_cpt = 0; 6715 r_cc_send_fsm = CC_SEND_WRITE_UPDT_DATA; 6716 6717 #if DEBUG_MEMC_CC_SEND 6718 if(m_debug) 6719 std::cout << " <MEMC " << name() 6720 << " CC_SEND_WRITE_UPDT_NLINE> Multicast-Update for address " 6721 << r_write_to_cc_send_nline.read()*m_words*4 << std::endl; 6722 #endif 6723 break; 6724 } 6725 ///////////////////////////// 6726 case CC_SEND_WRITE_UPDT_DATA: // send data flits for multi-update (from WRITE FSM) 6727 { 6728 if(not p_dspin_m2p.read) break; 6729 if(r_cc_send_cpt.read() == (r_write_to_cc_send_count.read()-1)) 6730 { 6731 write_to_cc_send_fifo_get = true; 6732 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 6733 break; 6734 } 6735 6736 r_cc_send_cpt = r_cc_send_cpt.read() + 1; 6737 break; 6738 } 6739 //////////////////////////////// 6740 case CC_SEND_CAS_BRDCAST_HEADER: // send first flit broadcast-inval (from CAS FSM) 6741 { 6742 if(not p_dspin_m2p.read) break; 6743 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_NLINE; 6744 break; 6745 } 6746 /////////////////////////////// 6747 case CC_SEND_CAS_BRDCAST_NLINE: // send second flit broadcast-inval (from CAS FSM) 6748 { 6749 if(not p_dspin_m2p.read) break; 6750 m_cpt_inval_brdcast++; 6751 6752 r_cas_to_cc_send_brdcast_req = false; 6753 r_cc_send_fsm = CC_SEND_CAS_IDLE; 6754 6755 #if DEBUG_MEMC_CC_SEND 6756 if(m_debug) 6757 std::cout << " <MEMC " << name() 6758 << " CC_SEND_CAS_BRDCAST_NLINE> Broadcast-Inval for address: " 6759 << r_cas_to_cc_send_nline.read()*m_words*4 << std::endl; 6760 #endif 6761 break; 6762 } 6763 ///////////////////////////// 6764 case CC_SEND_CAS_UPDT_HEADER: // send first flit for a multi-update (from CAS FSM) 6765 { 6766 if(m_cas_to_cc_send_inst_fifo.rok()) 6767 { 6768 if(not p_dspin_m2p.read) break; 6769 6770 r_cc_send_fsm = CC_SEND_CAS_UPDT_NLINE; 6771 break; 6772 } 6773 6774 // no more packets to send for the multi-update 6775 if(r_cas_to_cc_send_multi_req.read()) 6776 { 6777 r_cas_to_cc_send_multi_req = false; 6778 } 6779 6780 r_cc_send_fsm = CC_SEND_CAS_IDLE; 6781 break; 6782 } 6783 //////////////////////////// 6784 case CC_SEND_CAS_UPDT_NLINE: // send second flit for a multi-update (from CAS FSM) 6785 { 6786 if(not p_dspin_m2p.read) break; 6787 6788 m_cpt_update_mult++; 6789 6790 r_cc_send_cpt = 0; 6791 r_cc_send_fsm = CC_SEND_CAS_UPDT_DATA; 6792 6793 #if DEBUG_MEMC_CC_SEND 6794 if(m_debug) 6795 std::cout << " <MEMC " << name() 6796 << " CC_SEND_CAS_UPDT_NLINE> Multicast-Update for address " 6797 << r_cas_to_cc_send_nline.read()*m_words*4 << std::endl; 6798 #endif 6799 break; 6800 } 6801 /////////////////////////// 6802 case CC_SEND_CAS_UPDT_DATA: // send first data for a multi-update (from CAS FSM) 6803 { 6804 if(not p_dspin_m2p.read) break; 6805 6806 if(r_cas_to_cc_send_is_long.read()) 6807 { 6808 r_cc_send_fsm = CC_SEND_CAS_UPDT_DATA_HIGH; 6809 break; 6810 } 6811 6812 cas_to_cc_send_fifo_get = true; 6813 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6814 break; 6815 } 6816 //////////////////////////////// 6817 case CC_SEND_CAS_UPDT_DATA_HIGH: // send second data for multi-update (from CAS FSM) 6818 { 6819 if(not p_dspin_m2p.read) break; 6820 cas_to_cc_send_fifo_get = true; 6821 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6822 break; 6823 } 3020 6824 } 3021 break; 3022 } 3023 3024 ///////////////////////// 3025 case WRITE_UPT_HEAP_LOCK: // get access to heap 3026 { 3027 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE) 3028 { 3029 3030 #if DEBUG_MEMC_WRITE 3031 if(m_debug) 3032 std::cout << " <MEMC " << name() 3033 << " WRITE_UPT_HEAP_LOCK> Get acces to the HEAP" << std::endl; 3034 #endif 3035 r_write_fsm = WRITE_UPT_REQ; 3036 m_cpt_write_fsm_n_heap_lock++; 3037 } 3038 3039 m_cpt_write_fsm_heap_lock++; 3040 3041 break; 3042 } 3043 3044 ////////////////// 3045 case WRITE_UPT_REQ: // prepare the coherence transaction for the CC_SEND FSM 3046 // and write the first copy in the FIFO 3047 // send the request if only one copy 3048 { 3049 assert(not r_write_to_cc_send_multi_req.read() and 3050 not r_write_to_cc_send_brdcast_req.read() and 3051 "Error in VCI_MEM_CACHE : pending multicast or broadcast\n" 3052 "transaction in WRITE_UPT_REQ state" 3053 ); 3054 3055 r_write_to_cc_send_brdcast_req = false; 3056 r_write_to_cc_send_trdid = r_write_upt_index.read(); 3057 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())]; 3058 r_write_to_cc_send_index = r_write_word_index.read(); 3059 r_write_to_cc_send_count = r_write_word_count.read(); 3060 3061 for(size_t i=0; i<m_words ; i++) r_write_to_cc_send_be[i]=r_write_be[i].read(); 3062 3063 size_t min = r_write_word_index.read(); 3064 size_t max = r_write_word_index.read() + r_write_word_count.read(); 3065 for(size_t i=min ; i<max ; i++) r_write_to_cc_send_data[i] = r_write_data[i]; 3066 3067 if( (r_write_copy.read() != r_write_srcid.read()) or 3068 (r_write_pktid.read() == TYPE_SC) or r_write_copy_inst.read()) 3069 { 3070 // put the first srcid in the fifo 3071 write_to_cc_send_fifo_put = true; 3072 write_to_cc_send_fifo_inst = r_write_copy_inst.read(); 3073 write_to_cc_send_fifo_srcid = r_write_copy.read(); 3074 if(r_write_count.read() == 1) 6825 // end switch r_cc_send_fsm 6826 6827 ////////////////////////////////////////////////////////////////////////////// 6828 // CC_RECEIVE FSM 6829 ////////////////////////////////////////////////////////////////////////////// 6830 // The CC_RECEIVE fsm controls the DSPIN target port on the coherence 6831 // network. 6832 ////////////////////////////////////////////////////////////////////////////// 6833 6834 //std::cout << std::endl << "cc_receive_fsm" << std::endl; 6835 6836 switch(r_cc_receive_fsm.read()) 3075 6837 { 3076 r_write_fsm = WRITE_IDLE; 3077 r_write_to_cc_send_multi_req = true; 6838 ///////////////////// 6839 case CC_RECEIVE_IDLE: 6840 { 6841 if(not p_dspin_p2m.write) break; 6842 6843 uint8_t type = 6844 DspinDhccpParam::dspin_get( 6845 p_dspin_p2m.data.read(), 6846 DspinDhccpParam::P2M_TYPE); 6847 6848 if((type == DspinDhccpParam::TYPE_CLEANUP_DATA) or 6849 (type == DspinDhccpParam::TYPE_CLEANUP_INST)) 6850 { 6851 r_cc_receive_fsm = CC_RECEIVE_CLEANUP; 6852 break; 6853 } 6854 6855 if(type == DspinDhccpParam::TYPE_MULTI_ACK) 6856 { 6857 r_cc_receive_fsm = CC_RECEIVE_MULTI_ACK; 6858 break; 6859 } 6860 6861 assert(false and 6862 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 6863 "Illegal type in coherence request"); 6864 6865 break; 6866 } 6867 //////////////////////// 6868 case CC_RECEIVE_CLEANUP: 6869 { 6870 // write first CLEANUP flit in CC_RECEIVE to CLEANUP fifo 6871 6872 if(not p_dspin_p2m.write or not m_cc_receive_to_cleanup_fifo.wok()) 6873 break; 6874 6875 assert(not p_dspin_p2m.eop.read() and 6876 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 6877 "CLEANUP command must have two flits"); 6878 6879 cc_receive_to_cleanup_fifo_put = true; 6880 r_cc_receive_fsm = CC_RECEIVE_CLEANUP_EOP; 6881 6882 break; 6883 } 6884 //////////////////////////// 6885 case CC_RECEIVE_CLEANUP_EOP: 6886 { 6887 // write second CLEANUP flit in CC_RECEIVE to CLEANUP fifo or more in case of cleanup data (ODCCP) 6888 6889 if(not p_dspin_p2m.write or not m_cc_receive_to_cleanup_fifo.wok()) 6890 break; 6891 6892 cc_receive_to_cleanup_fifo_put = true; 6893 if(p_dspin_p2m.eop.read()) 6894 r_cc_receive_fsm = CC_RECEIVE_IDLE; 6895 6896 break; 6897 } 6898 6899 ////////////////////////// 6900 case CC_RECEIVE_MULTI_ACK: 6901 { 6902 // write MULTI_ACK flit in CC_RECEIVE to MULTI_ACK fifo 6903 6904 // wait for a WOK in the CC_RECEIVE to MULTI_ACK fifo 6905 if(not p_dspin_p2m.write or not m_cc_receive_to_multi_ack_fifo.wok()) 6906 break; 6907 6908 assert(p_dspin_p2m.eop.read() and 6909 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 6910 "MULTI_ACK command must have one flit"); 6911 6912 cc_receive_to_multi_ack_fifo_put = true; 6913 r_cc_receive_fsm = CC_RECEIVE_IDLE; 6914 break; 6915 } 6916 } 6917 6918 ////////////////////////////////////////////////////////////////////////// 6919 // TGT_RSP FSM 6920 ////////////////////////////////////////////////////////////////////////// 6921 // The TGT_RSP fsm sends the responses on the VCI target port 6922 // with a round robin priority between eigth requests : 6923 // - r_config_to_tgt_rsp_req 6924 // - r_tgt_cmd_to_tgt_rsp_req 6925 // - r_read_to_tgt_rsp_req 6926 // - r_write_to_tgt_rsp_req 6927 // - r_cas_to_tgt_rsp_req 6928 // - r_cleanup_to_tgt_rsp_req 6929 // - r_xram_rsp_to_tgt_rsp_req 6930 // - r_multi_ack_to_tgt_rsp_req 6931 // 6932 // The ordering is : 6933 // config >tgt_cmd > read > write > cas > xram > multi_ack > cleanup 6934 ////////////////////////////////////////////////////////////////////////// 6935 6936 //std::cout << std::endl << "tgt_rsp_fsm" << std::endl; 6937 6938 switch(r_tgt_rsp_fsm.read()) 6939 { 6940 ///////////////////////// 6941 case TGT_RSP_CONFIG_IDLE: // tgt_cmd requests have the highest priority 6942 { 6943 if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6944 else if(r_read_to_tgt_rsp_req) 6945 { 6946 r_tgt_rsp_fsm = TGT_RSP_READ; 6947 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6948 } 6949 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6950 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 6951 else if(r_xram_rsp_to_tgt_rsp_req) 6952 { 6953 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6954 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6955 } 6956 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 6957 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6958 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6959 break; 6960 } 6961 ////////////////////////// 6962 case TGT_RSP_TGT_CMD_IDLE: // read requests have the highest priority 6963 { 6964 if(r_read_to_tgt_rsp_req) 6965 { 6966 r_tgt_rsp_fsm = TGT_RSP_READ; 6967 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6968 } 6969 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6970 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 6971 else if(r_xram_rsp_to_tgt_rsp_req) 6972 { 6973 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6974 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6975 } 6976 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 6977 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6978 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6979 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6980 break; 6981 } 6982 /////////////////////// 6983 case TGT_RSP_READ_IDLE: // write requests have the highest priority 6984 { 6985 if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6986 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 6987 else if(r_xram_rsp_to_tgt_rsp_req) 6988 { 6989 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6990 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6991 } 6992 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 6993 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6994 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6995 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6996 else if(r_read_to_tgt_rsp_req) 6997 { 6998 r_tgt_rsp_fsm = TGT_RSP_READ; 6999 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7000 } 7001 break; 7002 } 7003 //////////////////////// 7004 case TGT_RSP_WRITE_IDLE: // cas requests have the highest priority 7005 { 7006 if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 7007 else if(r_xram_rsp_to_tgt_rsp_req) 7008 { 7009 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7010 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7011 } 7012 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 7013 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7014 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7015 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7016 else if(r_read_to_tgt_rsp_req) 7017 { 7018 r_tgt_rsp_fsm = TGT_RSP_READ; 7019 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7020 } 7021 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7022 break; 7023 } 7024 /////////////////////// 7025 case TGT_RSP_CAS_IDLE: // xram_rsp requests have the highest priority 7026 { 7027 if(r_xram_rsp_to_tgt_rsp_req) 7028 { 7029 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7030 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7031 } 7032 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK ; 7033 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7034 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7035 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7036 else if(r_read_to_tgt_rsp_req) 7037 { 7038 r_tgt_rsp_fsm = TGT_RSP_READ; 7039 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7040 } 7041 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7042 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 7043 break; 7044 } 7045 /////////////////////// 7046 case TGT_RSP_XRAM_IDLE: // multi ack requests have the highest priority 7047 { 7048 7049 if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK ; 7050 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7051 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7052 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7053 else if(r_read_to_tgt_rsp_req) 7054 { 7055 r_tgt_rsp_fsm = TGT_RSP_READ; 7056 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7057 } 7058 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7059 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 7060 else if(r_xram_rsp_to_tgt_rsp_req) 7061 { 7062 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7063 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7064 } 7065 break; 7066 } 7067 //////////////////////////// 7068 case TGT_RSP_MULTI_ACK_IDLE: // cleanup requests have the highest priority 7069 { 7070 if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7071 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7072 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7073 else if(r_read_to_tgt_rsp_req) 7074 { 7075 r_tgt_rsp_fsm = TGT_RSP_READ; 7076 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7077 } 7078 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7079 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 7080 else if(r_xram_rsp_to_tgt_rsp_req) 7081 { 7082 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7083 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7084 } 7085 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 7086 break; 7087 } 7088 ////////////////////////// 7089 case TGT_RSP_CLEANUP_IDLE: // tgt cmd requests have the highest priority 7090 { 7091 if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7092 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7093 else if(r_read_to_tgt_rsp_req) 7094 { 7095 r_tgt_rsp_fsm = TGT_RSP_READ; 7096 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7097 } 7098 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7099 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 7100 else if(r_xram_rsp_to_tgt_rsp_req) 7101 { 7102 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7103 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7104 } 7105 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK ; 7106 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7107 break; 7108 } 7109 //////////////////// 7110 case TGT_RSP_CONFIG: // send the response for a config transaction 7111 { 7112 if ( p_vci_tgt.rspack ) 7113 { 7114 r_config_to_tgt_rsp_req = false; 7115 r_tgt_rsp_fsm = TGT_RSP_CONFIG_IDLE; 7116 7117 #if DEBUG_MEMC_TGT_RSP 7118 if( m_debug ) 7119 { 7120 std::cout 7121 << " <MEMC " << name() 7122 << " TGT_RSP_CONFIG> Config transaction completed response" 7123 << " / rsrcid = " << std::hex << r_config_to_tgt_rsp_srcid.read() 7124 << " / rtrdid = " << r_config_to_tgt_rsp_trdid.read() 7125 << " / rpktid = " << r_config_to_tgt_rsp_pktid.read() 7126 << std::endl; 7127 } 7128 #endif 7129 } 7130 break; 7131 } 7132 ///////////////////// 7133 case TGT_RSP_TGT_CMD: // send the response for a configuration access 7134 { 7135 if ( p_vci_tgt.rspack ) 7136 { 7137 r_tgt_cmd_to_tgt_rsp_req = false; 7138 r_tgt_rsp_fsm = TGT_RSP_TGT_CMD_IDLE; 7139 7140 #if DEBUG_MEMC_TGT_RSP 7141 if( m_debug ) 7142 { 7143 std::cout 7144 << " <MEMC " << name() 7145 << " TGT_RSP_TGT_CMD> Send response for a configuration access" 7146 << " / rsrcid = " << std::hex << r_tgt_cmd_to_tgt_rsp_srcid.read() 7147 << " / rtrdid = " << r_tgt_cmd_to_tgt_rsp_trdid.read() 7148 << " / rpktid = " << r_tgt_cmd_to_tgt_rsp_pktid.read() 7149 << " / error = " << r_tgt_cmd_to_tgt_rsp_error.read() 7150 << std::endl; 7151 } 7152 #endif 7153 } 7154 break; 7155 } 7156 ////////////////// 7157 case TGT_RSP_READ: // send the response to a read 7158 { 7159 if ( p_vci_tgt.rspack ) 7160 { 7161 7162 #if DEBUG_MEMC_TGT_RSP 7163 if( m_debug ) 7164 { 7165 std::cout 7166 << " <MEMC " << name() << " TGT_RSP_READ> Read response" 7167 << " / rsrcid = " << std::hex << r_read_to_tgt_rsp_srcid.read() 7168 << " / rtrdid = " << r_read_to_tgt_rsp_trdid.read() 7169 << " / rpktid = " << r_read_to_tgt_rsp_pktid.read() 7170 << " / rdata = " << r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() 7171 << " / cpt = " << std::dec << r_tgt_rsp_cpt.read() << std::endl; 7172 } 7173 #endif 7174 7175 uint32_t last_word_idx = r_read_to_tgt_rsp_word.read() + 7176 r_read_to_tgt_rsp_length.read() - 1; 7177 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 7178 bool is_ll = ((r_read_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 7179 7180 if ((is_last_word and not is_ll) or 7181 (r_tgt_rsp_key_sent.read() and is_ll)) 7182 { 7183 // Last word in case of READ or second flit in case if LL 7184 r_tgt_rsp_key_sent = false; 7185 r_read_to_tgt_rsp_req = false; 7186 r_tgt_rsp_fsm = TGT_RSP_READ_IDLE; 7187 } 7188 else 7189 { 7190 if (is_ll) 7191 { 7192 r_tgt_rsp_key_sent = true; // Send second flit of ll 7193 } 7194 else 7195 { 7196 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; // Send next word of read 7197 } 7198 } 7199 } 7200 break; 7201 } 7202 ////////////////// 7203 case TGT_RSP_WRITE: // send the write acknowledge 7204 { 7205 if(p_vci_tgt.rspack) 7206 { 7207 7208 #if DEBUG_MEMC_TGT_RSP 7209 if(m_debug) 7210 std::cout << " <MEMC " << name() << " TGT_RSP_WRITE> Write response" 7211 << " / rsrcid = " << std::hex << r_write_to_tgt_rsp_srcid.read() 7212 << " / rtrdid = " << r_write_to_tgt_rsp_trdid.read() 7213 << " / rpktid = " << r_write_to_tgt_rsp_pktid.read() << std::endl; 7214 #endif 7215 r_tgt_rsp_fsm = TGT_RSP_WRITE_IDLE; 7216 r_write_to_tgt_rsp_req = false; 7217 } 7218 break; 7219 } 7220 ///////////////////// 7221 case TGT_RSP_CLEANUP: // pas clair pour moi (AG) 7222 { 7223 if(p_vci_tgt.rspack) 7224 { 7225 7226 #if DEBUG_MEMC_TGT_RSP 7227 if(m_debug) 7228 std::cout << " <MEMC " << name() << " TGT_RSP_CLEANUP> Cleanup response" 7229 << " / rsrcid = " << std::hex << r_cleanup_to_tgt_rsp_srcid.read() 7230 << " / rtrdid = " << r_cleanup_to_tgt_rsp_trdid.read() 7231 << " / rpktid = " << r_cleanup_to_tgt_rsp_pktid.read() << std::endl; 7232 #endif 7233 r_tgt_rsp_fsm = TGT_RSP_CLEANUP_IDLE; 7234 r_cleanup_to_tgt_rsp_req = false; 7235 } 7236 break; 7237 } 7238 ///////////////// 7239 case TGT_RSP_CAS: // send one atomic word response 7240 { 7241 if(p_vci_tgt.rspack) 7242 { 7243 7244 #if DEBUG_MEMC_TGT_RSP 7245 if(m_debug) 7246 std::cout << " <MEMC " << name() << " TGT_RSP_CAS> CAS response" 7247 << " / rsrcid = " << std::hex << r_cas_to_tgt_rsp_srcid.read() 7248 << " / rtrdid = " << r_cas_to_tgt_rsp_trdid.read() 7249 << " / rpktid = " << r_cas_to_tgt_rsp_pktid.read() << std::endl; 7250 #endif 7251 r_tgt_rsp_fsm = TGT_RSP_CAS_IDLE; 7252 r_cas_to_tgt_rsp_req = false; 7253 } 7254 break; 7255 } 7256 ////////////////// 7257 case TGT_RSP_XRAM: // send the response after XRAM access 7258 { 7259 if ( p_vci_tgt.rspack ) 7260 { 7261 7262 #if DEBUG_MEMC_TGT_RSP 7263 if( m_debug ) 7264 std::cout << " <MEMC " << name() << " TGT_RSP_XRAM> Response following XRAM access" 7265 << " / rsrcid = " << std::hex << r_xram_rsp_to_tgt_rsp_srcid.read() 7266 << " / rtrdid = " << r_xram_rsp_to_tgt_rsp_trdid.read() 7267 << " / rpktid = " << r_xram_rsp_to_tgt_rsp_pktid.read() 7268 << " / rdata = " << r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() 7269 << " / cpt = " << std::dec << r_tgt_rsp_cpt.read() << std::endl; 7270 #endif 7271 uint32_t last_word_idx = r_xram_rsp_to_tgt_rsp_word.read() + 7272 r_xram_rsp_to_tgt_rsp_length.read() - 1; 7273 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 7274 bool is_ll = ((r_xram_rsp_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 7275 bool is_error = r_xram_rsp_to_tgt_rsp_rerror.read(); 7276 7277 if (((is_last_word or is_error) and not is_ll) or 7278 (r_tgt_rsp_key_sent.read() and is_ll)) 7279 { 7280 // Last word sent in case of READ or second flit sent in case if LL 7281 r_tgt_rsp_key_sent = false; 7282 r_xram_rsp_to_tgt_rsp_req = false; 7283 r_tgt_rsp_fsm = TGT_RSP_XRAM_IDLE; 7284 } 7285 else 7286 { 7287 if (is_ll) 7288 { 7289 r_tgt_rsp_key_sent = true; // Send second flit of ll 7290 } 7291 else 7292 { 7293 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; // Send next word of read 7294 } 7295 } 7296 } 7297 break; 7298 } 7299 /////////////////////// 7300 case TGT_RSP_MULTI_ACK: // send the write response after coherence transaction 7301 { 7302 if(p_vci_tgt.rspack) 7303 { 7304 7305 #if DEBUG_MEMC_TGT_RSP 7306 if(m_debug) 7307 std::cout << " <MEMC " << name() << " TGT_RSP_MULTI_ACK> Write response after coherence transaction" 7308 << " / rsrcid = " << std::hex << r_multi_ack_to_tgt_rsp_srcid.read() 7309 << " / rtrdid = " << r_multi_ack_to_tgt_rsp_trdid.read() 7310 << " / rpktid = " << r_multi_ack_to_tgt_rsp_pktid.read() << std::endl; 7311 #endif 7312 r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK_IDLE; 7313 r_multi_ack_to_tgt_rsp_req = false; 7314 } 7315 break; 7316 } 7317 } // end switch tgt_rsp_fsm 7318 7319 //////////////////////////////////////////////////////////////////////////////////// 7320 // ALLOC_UPT FSM 7321 //////////////////////////////////////////////////////////////////////////////////// 7322 // The ALLOC_UPT FSM allocates the access to the Update Table (UPT), 7323 // with a round robin priority between three FSMs, with the following order: 7324 // WRITE -> CAS -> MULTI_ACK 7325 // - The WRITE FSM initiates update transaction and sets a new entry in UPT. 7326 // - The CAS FSM does the same thing as the WRITE FSM. 7327 // - The MULTI_ACK FSM complete those trasactions and erase the UPT entry. 7328 // The resource is always allocated. 7329 ///////////////////////////////////////////////////////////////////////////////////// 7330 7331 //std::cout << std::endl << "alloc_upt_fsm" << std::endl; 7332 7333 switch(r_alloc_upt_fsm.read()) 7334 { 7335 ///////////////////////// 7336 case ALLOC_UPT_WRITE: // allocated to WRITE FSM 7337 if (r_write_fsm.read() != WRITE_UPT_LOCK) 7338 { 7339 if (r_cas_fsm.read() == CAS_UPT_LOCK) 7340 r_alloc_upt_fsm = ALLOC_UPT_CAS; 7341 7342 else if (r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK) 7343 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK; 7344 } 7345 break; 7346 7347 ///////////////////////// 7348 case ALLOC_UPT_CAS: // allocated to CAS FSM 7349 if (r_cas_fsm.read() != CAS_UPT_LOCK) 7350 { 7351 if (r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK) 7352 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK; 7353 7354 else if (r_write_fsm.read() == WRITE_UPT_LOCK) 7355 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 7356 } 7357 break; 7358 7359 ///////////////////////// 7360 case ALLOC_UPT_MULTI_ACK: // allocated to MULTI_ACK FSM 7361 if ((r_multi_ack_fsm.read() != MULTI_ACK_UPT_LOCK ) and 7362 (r_multi_ack_fsm.read() != MULTI_ACK_UPT_CLEAR)) 7363 { 7364 if (r_write_fsm.read() == WRITE_UPT_LOCK) 7365 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 7366 7367 else if (r_cas_fsm.read() == CAS_UPT_LOCK) 7368 r_alloc_upt_fsm = ALLOC_UPT_CAS; 7369 } 7370 break; 7371 } // end switch r_alloc_upt_fsm 7372 7373 //////////////////////////////////////////////////////////////////////////////////// 7374 // ALLOC_IVT FSM 7375 //////////////////////////////////////////////////////////////////////////////////// 7376 // The ALLOC_IVT FSM allocates the access to the Invalidate Table (IVT), 7377 // with a round robin priority between five FSMs, with the following order: 7378 // WRITE -> XRAM_RSP -> CLEANUP -> CAS -> CONFIG 7379 // - The WRITE FSM initiates broadcast invalidate transactions and sets a new entry 7380 // in IVT. 7381 // - The CAS FSM does the same thing as the WRITE FSM. 7382 // - The XRAM_RSP FSM initiates broadcast/multicast invalidate transaction and sets 7383 // a new entry in the IVT 7384 // - The CONFIG FSM does the same thing as the XRAM_RSP FSM 7385 // - The CLEANUP FSM complete those trasactions and erase the IVT entry. 7386 // The resource is always allocated. 7387 ///////////////////////////////////////////////////////////////////////////////////// 7388 7389 //std::cout << std::endl << "alloc_ivt_fsm" << std::endl; 7390 7391 switch(r_alloc_ivt_fsm.read()) 7392 { 7393 ///////////////////// 7394 case ALLOC_IVT_WRITE: // allocated to WRITE FSM 7395 if (r_write_fsm.read() != WRITE_BC_IVT_LOCK) 7396 { 7397 if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 7398 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 7399 7400 else if (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 7401 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 7402 7403 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 7404 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 7405 7406 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 7407 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 7408 7409 else 7410 m_cpt_ivt_unused++; 7411 } 7412 break; 7413 7414 //////////////////////// 7415 case ALLOC_IVT_XRAM_RSP: // allocated to XRAM_RSP FSM 7416 if(r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK) 7417 { 7418 if(r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 7419 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 7420 7421 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 7422 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 7423 7424 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 7425 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 7426 7427 else if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 7428 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 7429 7430 else 7431 m_cpt_ivt_unused++; 7432 } 7433 break; 7434 7435 /////////////////////// 7436 case ALLOC_IVT_CLEANUP: // allocated to CLEANUP FSM 7437 if ((r_cleanup_fsm.read() != CLEANUP_IVT_LOCK ) and 7438 (r_cleanup_fsm.read() != CLEANUP_IVT_DECREMENT)) 7439 { 7440 if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 7441 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 7442 7443 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 7444 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 7445 7446 else if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 7447 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 7448 7449 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 7450 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 7451 } 7452 break; 7453 7454 ////////////////////////// 7455 case ALLOC_IVT_CAS: // allocated to CAS FSM 7456 if (r_cas_fsm.read() != CAS_BC_IVT_LOCK) 7457 { 7458 if (r_config_fsm.read() == CONFIG_IVT_LOCK) 7459 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 7460 7461 else if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 7462 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 7463 7464 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 7465 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 7466 7467 else if (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 7468 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 7469 7470 } 7471 break; 7472 7473 ////////////////////////// 7474 case ALLOC_IVT_CONFIG: // allocated to CONFIG FSM 7475 if (r_config_fsm.read() != CONFIG_IVT_LOCK) 7476 { 7477 if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 7478 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 7479 7480 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 7481 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 7482 7483 else if (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 7484 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 7485 7486 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 7487 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 7488 7489 } 7490 break; 7491 7492 } // end switch r_alloc_ivt_fsm 7493 7494 //////////////////////////////////////////////////////////////////////////////////// 7495 // ALLOC_DIR FSM 7496 //////////////////////////////////////////////////////////////////////////////////// 7497 // The ALLOC_DIR FSM allocates the access to the directory and 7498 // the data cache with a round robin priority between 6 user FSMs : 7499 // The cyclic ordering is CONFIG > READ > WRITE > CAS > CLEANUP > XRAM_RSP 7500 // The ressource is always allocated. 7501 ///////////////////////////////////////////////////////////////////////////////////// 7502 7503 //std::cout << std::endl << "alloc_dir_fsm" << std::endl; 7504 7505 switch(r_alloc_dir_fsm.read()) 7506 { 7507 ///////////////////// 7508 case ALLOC_DIR_RESET: // Initializes the directory one SET per cycle. 7509 // All the WAYS of a SET initialized in parallel 7510 7511 r_alloc_dir_reset_cpt.write(r_alloc_dir_reset_cpt.read() + 1); 7512 7513 if(r_alloc_dir_reset_cpt.read() == (m_sets - 1)) 7514 { 7515 m_cache_directory.init(); 7516 r_alloc_dir_fsm = ALLOC_DIR_READ; 7517 } 7518 break; 7519 7520 ////////////////////// 7521 case ALLOC_DIR_CONFIG: // allocated to CONFIG FSM 7522 if ( (r_config_fsm.read() != CONFIG_DIR_REQ) and 7523 (r_config_fsm.read() != CONFIG_DIR_ACCESS) and 7524 (r_config_fsm.read() != CONFIG_TRT_LOCK) and 7525 (r_config_fsm.read() != CONFIG_TRT_SET) and 7526 (r_config_fsm.read() != CONFIG_IVT_LOCK) ) 7527 { 7528 if(r_read_fsm.read() == READ_DIR_REQ) 7529 r_alloc_dir_fsm = ALLOC_DIR_READ; 7530 7531 else if(r_write_fsm.read() == WRITE_DIR_REQ) 7532 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 7533 7534 else if(r_cas_fsm.read() == CAS_DIR_REQ) 7535 r_alloc_dir_fsm = ALLOC_DIR_CAS; 7536 7537 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 7538 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 7539 7540 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 7541 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 7542 } 7543 break; 7544 7545 //////////////////// 7546 case ALLOC_DIR_READ: // allocated to READ FSM 7547 if( ((r_read_fsm.read() != READ_DIR_REQ) and 7548 (r_read_fsm.read() != READ_DIR_LOCK) and 7549 (r_read_fsm.read() != READ_TRT_LOCK) and 7550 (r_read_fsm.read() != READ_HEAP_REQ)) 7551 or 7552 ((r_read_fsm.read() == READ_TRT_LOCK) and 7553 (r_alloc_trt_fsm.read() == ALLOC_TRT_READ)) ) 7554 { 7555 if(r_write_fsm.read() == WRITE_DIR_REQ) 7556 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 7557 7558 else if(r_cas_fsm.read() == CAS_DIR_REQ) 7559 r_alloc_dir_fsm = ALLOC_DIR_CAS; 7560 7561 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 7562 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 7563 7564 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 7565 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 7566 7567 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 7568 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 7569 7570 else 7571 m_cpt_dir_unused++; 7572 } 7573 else 7574 m_cpt_read_fsm_dir_used++; 7575 break; 7576 7577 ///////////////////// 7578 case ALLOC_DIR_WRITE: // allocated to WRITE FSM 7579 if(((r_write_fsm.read() != WRITE_DIR_REQ) and 7580 (r_write_fsm.read() != WRITE_DIR_LOCK) and 7581 (r_write_fsm.read() != WRITE_BC_DIR_READ) and 7582 (r_write_fsm.read() != WRITE_DIR_HIT) and 7583 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 7584 (r_write_fsm.read() != WRITE_BC_IVT_LOCK) and 7585 (r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 7586 (r_write_fsm.read() != WRITE_UPT_LOCK) and 7587 (r_write_fsm.read() != WRITE_UPT_HEAP_LOCK)) 7588 or 7589 ((r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) and 7590 (r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE)) 7591 or 7592 ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) and 7593 (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE))) 7594 { 7595 if(r_cas_fsm.read() == CAS_DIR_REQ) 7596 r_alloc_dir_fsm = ALLOC_DIR_CAS; 7597 7598 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 7599 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 7600 7601 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 7602 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 7603 7604 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 7605 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 7606 7607 else if(r_read_fsm.read() == READ_DIR_REQ) 7608 r_alloc_dir_fsm = ALLOC_DIR_READ; 7609 7610 else 7611 m_cpt_dir_unused++; 7612 } 7613 else 7614 m_cpt_write_fsm_dir_used++; 7615 break; 7616 7617 /////////////////// 7618 case ALLOC_DIR_CAS: // allocated to CAS FSM 7619 if(((r_cas_fsm.read() != CAS_DIR_REQ) and 7620 (r_cas_fsm.read() != CAS_DIR_LOCK) and 7621 (r_cas_fsm.read() != CAS_DIR_HIT_READ) and 7622 (r_cas_fsm.read() != CAS_DIR_HIT_COMPARE) and 7623 (r_cas_fsm.read() != CAS_DIR_HIT_WRITE) and 7624 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 7625 (r_cas_fsm.read() != CAS_BC_IVT_LOCK) and 7626 (r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 7627 (r_cas_fsm.read() != CAS_UPT_LOCK) and 7628 (r_cas_fsm.read() != CAS_UPT_HEAP_LOCK)) 7629 or 7630 ((r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) and 7631 (r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS)) 7632 or 7633 ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) and 7634 (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS))) 7635 { 7636 if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 7637 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 7638 7639 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 7640 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 7641 7642 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 7643 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 7644 7645 else if(r_read_fsm.read() == READ_DIR_REQ) 7646 r_alloc_dir_fsm = ALLOC_DIR_READ; 7647 7648 else if(r_write_fsm.read() == WRITE_DIR_REQ) 7649 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 7650 7651 else 7652 m_cpt_dir_unused++; 7653 } 7654 else 7655 m_cpt_cas_fsm_dir_used++; 7656 break; 7657 7658 /////////////////////// 7659 case ALLOC_DIR_CLEANUP: // allocated to CLEANUP FSM 7660 if((r_cleanup_fsm.read() != CLEANUP_DIR_REQ) and 7661 (r_cleanup_fsm.read() != CLEANUP_DIR_LOCK) and 7662 (r_cleanup_fsm.read() != CLEANUP_HEAP_REQ) and 7663 (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK)) 7664 { 7665 if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 7666 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 7667 7668 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 7669 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 7670 7671 else if(r_read_fsm.read() == READ_DIR_REQ) 7672 r_alloc_dir_fsm = ALLOC_DIR_READ; 7673 7674 else if(r_write_fsm.read() == WRITE_DIR_REQ) 7675 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 7676 7677 else if(r_cas_fsm.read() == CAS_DIR_REQ) 7678 r_alloc_dir_fsm = ALLOC_DIR_CAS; 7679 7680 else 7681 m_cpt_dir_unused++; 7682 } 7683 else 7684 m_cpt_cleanup_fsm_dir_used++; 7685 break; 7686 7687 //////////////////////// 7688 case ALLOC_DIR_XRAM_RSP: // allocated to XRAM_RSP FSM 7689 if( (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) and 7690 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) and 7691 (r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK)) 7692 { 7693 if(r_config_fsm.read() == CONFIG_DIR_REQ) 7694 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 7695 7696 else if(r_read_fsm.read() == READ_DIR_REQ) 7697 r_alloc_dir_fsm = ALLOC_DIR_READ; 7698 7699 else if(r_write_fsm.read() == WRITE_DIR_REQ) 7700 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 7701 7702 else if(r_cas_fsm.read() == CAS_DIR_REQ) 7703 r_alloc_dir_fsm = ALLOC_DIR_CAS; 7704 7705 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 7706 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 7707 7708 else 7709 m_cpt_dir_unused++; 7710 } 7711 else 7712 m_cpt_xram_rsp_fsm_dir_used++; 7713 break; 7714 7715 } // end switch alloc_dir_fsm 7716 7717 //////////////////////////////////////////////////////////////////////////////////// 7718 // ALLOC_TRT FSM 7719 //////////////////////////////////////////////////////////////////////////////////// 7720 // The ALLOC_TRT fsm allocates the access to the Transaction Table (write buffer) 7721 // with a round robin priority between 7 user FSMs : 7722 // The priority is READ > WRITE > CAS > IXR_CMD > XRAM_RSP > IXR_RSP > CONFIG 7723 // The ressource is always allocated. 7724 /////////////////////////////////////////////////////////////////////////////////// 7725 7726 //std::cout << std::endl << "alloc_trt_fsm" << std::endl; 7727 7728 switch(r_alloc_trt_fsm.read()) 7729 { 7730 //////////////////// 7731 case ALLOC_TRT_READ: 7732 if(r_read_fsm.read() != READ_TRT_LOCK) 7733 { 7734 if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7735 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7736 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7737 7738 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7739 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7740 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7741 7742 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7743 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7744 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7745 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7746 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 7747 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7748 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7749 7750 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7751 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7752 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7753 7754 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7755 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7756 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7757 7758 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7759 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7760 7761 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 7762 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 7763 } 7764 break; 7765 7766 ///////////////////// 7767 case ALLOC_TRT_WRITE: 7768 if((r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 7769 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 7770 (r_write_fsm.read() != WRITE_BC_IVT_LOCK)) 7771 { 7772 if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7773 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7774 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7775 7776 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7777 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7778 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7779 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7780 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 7781 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7782 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7783 7784 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7785 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7786 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7787 7788 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7789 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7790 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7791 7792 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7793 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7794 7795 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 7796 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 7797 7798 else if(r_read_fsm.read() == READ_TRT_LOCK) 7799 r_alloc_trt_fsm = ALLOC_TRT_READ; 7800 } 7801 break; 7802 /////////////////// 7803 case ALLOC_TRT_CAS: 7804 if((r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 7805 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 7806 (r_cas_fsm.read() != CAS_BC_IVT_LOCK)) 7807 { 7808 if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7809 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7810 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7811 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7812 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 7813 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7814 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7815 7816 if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7817 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7818 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7819 7820 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7821 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7822 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7823 7824 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7825 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7826 7827 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 7828 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 7829 7830 else if(r_read_fsm.read() == READ_TRT_LOCK) 7831 r_alloc_trt_fsm = ALLOC_TRT_READ; 7832 7833 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7834 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7835 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7836 } 7837 break; 7838 /////////////////////// 7839 case ALLOC_TRT_IXR_CMD: 7840 if((r_ixr_cmd_fsm.read() != IXR_CMD_READ_TRT) and 7841 (r_ixr_cmd_fsm.read() != IXR_CMD_WRITE_TRT) and 7842 (r_ixr_cmd_fsm.read() != IXR_CMD_CAS_TRT) and 7843 (r_ixr_cmd_fsm.read() != IXR_CMD_XRAM_TRT) and 7844 (r_ixr_cmd_fsm.read() != IXR_CMD_CLEANUP_TRT) and 7845 (r_ixr_cmd_fsm.read() != IXR_CMD_CONFIG_TRT)) 7846 { 7847 if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7848 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7849 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7850 7851 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7852 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7853 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7854 7855 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7856 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7857 7858 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 7859 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 7860 7861 else if(r_read_fsm.read() == READ_TRT_LOCK) 7862 r_alloc_trt_fsm = ALLOC_TRT_READ; 7863 7864 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7865 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7866 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7867 7868 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7869 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7870 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7871 } 7872 break; 7873 //////////////////////// 7874 case ALLOC_TRT_XRAM_RSP: 7875 if(((r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) or 7876 (r_alloc_dir_fsm.read() != ALLOC_DIR_XRAM_RSP)) and 7877 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) and 7878 (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_UPDT) and 7879 (r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK)) 7880 { 7881 if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7882 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7883 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7884 7885 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7886 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7887 7888 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 7889 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 7890 7891 else if(r_read_fsm.read() == READ_TRT_LOCK) 7892 r_alloc_trt_fsm = ALLOC_TRT_READ; 7893 7894 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7895 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7896 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7897 7898 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7899 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7900 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7901 7902 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7903 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7904 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7905 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7906 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 7907 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7908 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7909 7910 } 7911 break; 7912 /////////////////////// 7913 case ALLOC_TRT_IXR_RSP: 7914 if((r_ixr_rsp_fsm.read() != IXR_RSP_TRT_ERASE) and 7915 (r_ixr_rsp_fsm.read() != IXR_RSP_TRT_READ)) 7916 { 7917 if(r_config_fsm.read() == CONFIG_TRT_LOCK) 7918 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7919 7920 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 7921 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 7922 7923 else if(r_read_fsm.read() == READ_TRT_LOCK) 7924 r_alloc_trt_fsm = ALLOC_TRT_READ; 7925 7926 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7927 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7928 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7929 7930 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7931 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7932 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7933 7934 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7935 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7936 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7937 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7938 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 7939 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7940 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7941 7942 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7943 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7944 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7945 } 7946 break; 7947 ////////////////////// 7948 case ALLOC_TRT_CONFIG: 7949 if((r_config_fsm.read() != CONFIG_TRT_LOCK) and 7950 (r_config_fsm.read() != CONFIG_TRT_SET)) 7951 { 7952 if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 7953 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 7954 7955 else if(r_read_fsm.read() == READ_TRT_LOCK) 7956 r_alloc_trt_fsm = ALLOC_TRT_READ; 7957 7958 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7959 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7960 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7961 7962 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7963 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7964 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7965 7966 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7967 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7968 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7969 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7970 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 7971 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7972 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7973 7974 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7975 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7976 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7977 7978 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7979 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7980 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7981 7982 } 7983 break; 7984 7985 //////////////////////// 7986 case ALLOC_TRT_CLEANUP: 7987 if(r_cleanup_fsm.read() != CLEANUP_IXR_REQ) 7988 { 7989 if(r_read_fsm.read() == READ_TRT_LOCK) 7990 r_alloc_trt_fsm = ALLOC_TRT_READ; 7991 7992 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7993 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7994 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7995 7996 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7997 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7998 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7999 8000 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 8001 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 8002 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 8003 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 8004 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 8005 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 8006 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 8007 8008 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 8009 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 8010 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 8011 8012 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 8013 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 8014 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 8015 8016 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 8017 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 8018 } 8019 break; 8020 8021 8022 } // end switch alloc_trt_fsm 8023 8024 //////////////////////////////////////////////////////////////////////////////////// 8025 // ALLOC_HEAP FSM 8026 //////////////////////////////////////////////////////////////////////////////////// 8027 // The ALLOC_HEAP FSM allocates the access to the heap 8028 // with a round robin priority between 6 user FSMs : 8029 // The cyclic ordering is READ > WRITE > CAS > CLEANUP > XRAM_RSP > CONFIG 8030 // The ressource is always allocated. 8031 ///////////////////////////////////////////////////////////////////////////////////// 8032 8033 //std::cout << std::endl << "alloc_heap_fsm" << std::endl; 8034 8035 switch(r_alloc_heap_fsm.read()) 8036 { 8037 //////////////////// 8038 case ALLOC_HEAP_RESET: 8039 // Initializes the heap one ENTRY each cycle. 8040 8041 r_alloc_heap_reset_cpt.write(r_alloc_heap_reset_cpt.read() + 1); 8042 8043 if(r_alloc_heap_reset_cpt.read() == (m_heap_size-1)) 8044 { 8045 m_heap.init(); 8046 8047 r_alloc_heap_fsm = ALLOC_HEAP_READ; 8048 } 8049 break; 8050 8051 //////////////////// 8052 case ALLOC_HEAP_READ: 8053 if((r_read_fsm.read() != READ_HEAP_REQ) and 8054 (r_read_fsm.read() != READ_HEAP_LOCK) and 8055 (r_read_fsm.read() != READ_HEAP_ERASE)) 8056 { 8057 if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 8058 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 8059 8060 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 8061 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 8062 8063 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 8064 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 8065 8066 else if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 8067 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 8068 8069 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 8070 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 8071 else 8072 m_cpt_heap_unused++; 8073 } 8074 else 8075 m_cpt_read_fsm_heap_used++; 8076 break; 8077 8078 ///////////////////// 8079 case ALLOC_HEAP_WRITE: 8080 if((r_write_fsm.read() != WRITE_UPT_HEAP_LOCK) and 8081 (r_write_fsm.read() != WRITE_UPT_REQ) and 8082 (r_write_fsm.read() != WRITE_UPT_NEXT)) 8083 { 8084 if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 8085 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 8086 8087 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 8088 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 8089 8090 else if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 8091 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 8092 8093 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 8094 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 8095 8096 else if(r_read_fsm.read() == READ_HEAP_REQ) 8097 r_alloc_heap_fsm = ALLOC_HEAP_READ; 8098 8099 else 8100 m_cpt_heap_unused++; 8101 } 8102 else 8103 m_cpt_write_fsm_heap_used++; 8104 break; 8105 8106 //////////////////// 8107 case ALLOC_HEAP_CAS: 8108 if((r_cas_fsm.read() != CAS_UPT_HEAP_LOCK) and 8109 (r_cas_fsm.read() != CAS_UPT_REQ) and 8110 (r_cas_fsm.read() != CAS_UPT_NEXT)) 8111 { 8112 if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 8113 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 8114 8115 else if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 8116 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 8117 8118 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 8119 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 8120 8121 else if(r_read_fsm.read() == READ_HEAP_REQ) 8122 r_alloc_heap_fsm = ALLOC_HEAP_READ; 8123 8124 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 8125 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 8126 8127 else 8128 m_cpt_heap_unused++; 8129 } 8130 else 8131 m_cpt_cas_fsm_heap_used++; 8132 break; 8133 8134 /////////////////////// 8135 case ALLOC_HEAP_CLEANUP: 8136 if((r_cleanup_fsm.read() != CLEANUP_HEAP_REQ) and 8137 (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK) and 8138 (r_cleanup_fsm.read() != CLEANUP_HEAP_SEARCH) and 8139 (r_cleanup_fsm.read() != CLEANUP_HEAP_CLEAN)) 8140 { 8141 if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 8142 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 8143 8144 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 8145 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 8146 8147 else if(r_read_fsm.read() == READ_HEAP_REQ) 8148 r_alloc_heap_fsm = ALLOC_HEAP_READ; 8149 8150 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 8151 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 8152 8153 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 8154 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 8155 8156 else 8157 m_cpt_heap_unused++; 8158 } 8159 else 8160 m_cpt_cleanup_fsm_heap_used++; 8161 break; 8162 8163 //////////////////////// 8164 case ALLOC_HEAP_XRAM_RSP: 8165 if((r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_REQ) and 8166 (r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_ERASE)) 8167 { 8168 if(r_config_fsm.read() == CONFIG_HEAP_REQ) 8169 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 8170 8171 else if(r_read_fsm.read() == READ_HEAP_REQ) 8172 r_alloc_heap_fsm = ALLOC_HEAP_READ; 8173 8174 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 8175 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 8176 8177 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 8178 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 8179 8180 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 8181 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 8182 8183 } 8184 break; 8185 8186 /////////////////////// 8187 case ALLOC_HEAP_CONFIG: 8188 if((r_config_fsm.read() != CONFIG_HEAP_REQ) and 8189 (r_config_fsm.read() != CONFIG_HEAP_SCAN)) 8190 { 8191 if(r_read_fsm.read() == READ_HEAP_REQ) 8192 r_alloc_heap_fsm = ALLOC_HEAP_READ; 8193 8194 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 8195 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 8196 8197 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 8198 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 8199 8200 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 8201 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 8202 8203 if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 8204 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 8205 else 8206 m_cpt_heap_unused++; 8207 } 8208 else 8209 m_cpt_xram_rsp_fsm_heap_used++; 8210 break; 8211 8212 } // end switch alloc_heap_fsm 8213 8214 //std::cout << std::endl << "fifo_update" << std::endl; 8215 8216 ///////////////////////////////////////////////////////////////////// 8217 // TGT_CMD to READ FIFO 8218 ///////////////////////////////////////////////////////////////////// 8219 8220 m_cmd_read_addr_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 8221 p_vci_tgt.address.read() ); 8222 m_cmd_read_length_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 8223 p_vci_tgt.plen.read()>>2 ); 8224 m_cmd_read_srcid_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 8225 p_vci_tgt.srcid.read() ); 8226 m_cmd_read_trdid_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 8227 p_vci_tgt.trdid.read() ); 8228 m_cmd_read_pktid_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 8229 p_vci_tgt.pktid.read() ); 8230 8231 ///////////////////////////////////////////////////////////////////// 8232 // TGT_CMD to WRITE FIFO 8233 ///////////////////////////////////////////////////////////////////// 8234 8235 m_cmd_write_addr_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 8236 (addr_t)p_vci_tgt.address.read() ); 8237 m_cmd_write_eop_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 8238 p_vci_tgt.eop.read() ); 8239 m_cmd_write_srcid_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 8240 p_vci_tgt.srcid.read() ); 8241 m_cmd_write_trdid_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 8242 p_vci_tgt.trdid.read() ); 8243 m_cmd_write_pktid_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 8244 p_vci_tgt.pktid.read() ); 8245 m_cmd_write_data_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 8246 p_vci_tgt.wdata.read() ); 8247 m_cmd_write_be_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 8248 p_vci_tgt.be.read() ); 8249 8250 //////////////////////////////////////////////////////////////////////////////////// 8251 // TGT_CMD to CAS FIFO 8252 //////////////////////////////////////////////////////////////////////////////////// 8253 8254 m_cmd_cas_addr_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 8255 (addr_t)p_vci_tgt.address.read() ); 8256 m_cmd_cas_eop_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 8257 p_vci_tgt.eop.read() ); 8258 m_cmd_cas_srcid_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 8259 p_vci_tgt.srcid.read() ); 8260 m_cmd_cas_trdid_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 8261 p_vci_tgt.trdid.read() ); 8262 m_cmd_cas_pktid_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 8263 p_vci_tgt.pktid.read() ); 8264 m_cmd_cas_wdata_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 8265 p_vci_tgt.wdata.read() ); 8266 8267 //////////////////////////////////////////////////////////////////////////////////// 8268 // CC_RECEIVE to CLEANUP FIFO 8269 //////////////////////////////////////////////////////////////////////////////////// 8270 8271 /* 8272 if(cc_receive_to_cleanup_fifo_put) 8273 { 8274 if(cc_receive_to_cleanup_fifo_get) 8275 { 8276 m_cc_receive_to_cleanup_fifo.put_and_get( ( (uint64_t)(p_dspin_in.eop.read() & 0x1 ) << 32 ) | p_dspin_in.data.read()); 8277 } 8278 else // TODO PAS METTRE 32 en DUR !!!! 8279 { 8280 m_cc_receive_to_cleanup_fifo.simple_put( ( (uint64_t)(p_dspin_in.eop.read() & 0x1 ) << 32 ) | p_dspin_in.data.read()); 8281 //m_cc_receive_to_cleanup_fifo.simple_put(p_dspin_in.data.read()); 8282 } 3078 8283 } 3079 8284 else 3080 8285 { 3081 r_write_fsm = WRITE_UPT_NEXT;3082 r_write_to_dec = false;3083 8286 if(cc_receive_to_cleanup_fifo_get) 8287 { 8288 m_cc_receive_to_cleanup_fifo.simple_get(); 3084 8289 } 3085 } 3086 else 3087 { 3088 r_write_fsm = WRITE_UPT_NEXT; 3089 r_write_to_dec = false; 3090 } 3091 3092 #if DEBUG_MEMC_WRITE 3093 if(m_debug) 3094 { 3095 std::cout 3096 << " <MEMC " << name() 3097 << " WRITE_UPT_REQ> Post first request to CC_SEND FSM" 3098 << " / srcid = " << std::dec << r_write_copy.read() 3099 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3100 3101 if(r_write_count.read() == 1) 3102 std::cout << " ... and this is the last" << std::endl; 3103 } 3104 #endif 3105 break; 3106 } 3107 3108 /////////////////// 3109 case WRITE_UPT_NEXT: 8290 } 8291 */ 8292 m_cc_receive_to_cleanup_fifo.update( cc_receive_to_cleanup_fifo_get, 8293 cc_receive_to_cleanup_fifo_put, 8294 ( (uint64_t)(p_dspin_p2m.eop.read() & 0x1 ) << 32 ) | p_dspin_p2m.data.read() ); 8295 8296 //////////////////////////////////////////////////////////////////////////////////// 8297 // CC_RECEIVE to MULTI_ACK FIFO 8298 //////////////////////////////////////////////////////////////////////////////////// 8299 8300 m_cc_receive_to_multi_ack_fifo.update( cc_receive_to_multi_ack_fifo_get, 8301 cc_receive_to_multi_ack_fifo_put, 8302 p_dspin_p2m.data.read() ); 8303 8304 //////////////////////////////////////////////////////////////////////////////////// 8305 // WRITE to CC_SEND FIFO 8306 //////////////////////////////////////////////////////////////////////////////////// 8307 8308 m_write_to_cc_send_inst_fifo.update( write_to_cc_send_fifo_get, 8309 write_to_cc_send_fifo_put, 8310 write_to_cc_send_fifo_inst ); 8311 m_write_to_cc_send_srcid_fifo.update( write_to_cc_send_fifo_get, 8312 write_to_cc_send_fifo_put, 8313 write_to_cc_send_fifo_srcid ); 8314 8315 //////////////////////////////////////////////////////////////////////////////////// 8316 // CONFIG to CC_SEND FIFO 8317 //////////////////////////////////////////////////////////////////////////////////// 8318 8319 m_config_to_cc_send_inst_fifo.update( config_to_cc_send_fifo_get, 8320 config_to_cc_send_fifo_put, 8321 config_to_cc_send_fifo_inst ); 8322 m_config_to_cc_send_srcid_fifo.update( config_to_cc_send_fifo_get, 8323 config_to_cc_send_fifo_put, 8324 config_to_cc_send_fifo_srcid ); 8325 8326 //////////////////////////////////////////////////////////////////////////////////// 8327 // XRAM_RSP to CC_SEND FIFO 8328 //////////////////////////////////////////////////////////////////////////////////// 8329 8330 m_xram_rsp_to_cc_send_inst_fifo.update( xram_rsp_to_cc_send_fifo_get, 8331 xram_rsp_to_cc_send_fifo_put, 8332 xram_rsp_to_cc_send_fifo_inst ); 8333 m_xram_rsp_to_cc_send_srcid_fifo.update( xram_rsp_to_cc_send_fifo_get, 8334 xram_rsp_to_cc_send_fifo_put, 8335 xram_rsp_to_cc_send_fifo_srcid ); 8336 8337 //////////////////////////////////////////////////////////////////////////////////// 8338 // CAS to CC_SEND FIFO 8339 //////////////////////////////////////////////////////////////////////////////////// 8340 8341 m_cas_to_cc_send_inst_fifo.update( cas_to_cc_send_fifo_get, 8342 cas_to_cc_send_fifo_put, 8343 cas_to_cc_send_fifo_inst ); 8344 m_cas_to_cc_send_srcid_fifo.update( cas_to_cc_send_fifo_get, 8345 cas_to_cc_send_fifo_put, 8346 cas_to_cc_send_fifo_srcid ); 8347 m_cpt_cycles++; 8348 8349 } // end transition() 8350 8351 ///////////////////////////// 8352 tmpl(void)::genMoore() 8353 ///////////////////////////// 3110 8354 { 3111 // continue the multi-update request to CC_SEND fsm 3112 // when there is copies in the heap. 3113 // if one copy in the heap is the writer itself 3114 // the corresponding SRCID should not be written in the fifo, 3115 // but the UPT counter must be decremented. 3116 // As this decrement is done in the WRITE_UPT_DEC state, 3117 // after the last copy has been found, the decrement request 3118 // must be registered in the r_write_to_dec flip-flop. 3119 3120 HeapEntry entry = m_heap.read(r_write_ptr.read()); 3121 3122 bool dec_upt_counter; 3123 3124 // put the next srcid in the fifo 3125 if( (entry.owner.srcid != r_write_srcid.read()) or 3126 (r_write_pktid.read() == TYPE_SC) or entry.owner.inst) 3127 { 3128 dec_upt_counter = false; 3129 write_to_cc_send_fifo_put = true; 3130 write_to_cc_send_fifo_inst = entry.owner.inst; 3131 write_to_cc_send_fifo_srcid = entry.owner.srcid; 3132 3133 #if DEBUG_MEMC_WRITE 3134 if(m_debug) 8355 //////////////////////////////////////////////////////////// 8356 // Command signals on the p_vci_ixr port 8357 //////////////////////////////////////////////////////////// 8358 8359 // DATA width is 8 bytes 8360 // The following values are not transmitted to XRAM 8361 // p_vci_ixr.be 8362 // p_vci_ixr.pktid 8363 // p_vci_ixr.cons 8364 // p_vci_ixr.wrap 8365 // p_vci_ixr.contig 8366 // p_vci_ixr.clen 8367 // p_vci_ixr.cfixed 8368 8369 p_vci_ixr.plen = 64; 8370 p_vci_ixr.srcid = m_srcid_x; 8371 p_vci_ixr.trdid = r_ixr_cmd_trdid.read(); 8372 p_vci_ixr.address = (addr_t)r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2); 8373 p_vci_ixr.be = 0xFF; 8374 p_vci_ixr.pktid = 0; 8375 p_vci_ixr.cons = false; 8376 p_vci_ixr.wrap = false; 8377 p_vci_ixr.contig = true; 8378 p_vci_ixr.clen = 0; 8379 p_vci_ixr.cfixed = false; 8380 8381 if ( (r_ixr_cmd_fsm.read() == IXR_CMD_READ_SEND) or 8382 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_SEND) or 8383 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_SEND) or 8384 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_SEND) or 8385 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_SEND) ) 3135 8386 { 3136 std::cout << " <MEMC " << name() << " WRITE_UPT_NEXT> Post another request to CC_SEND FSM" 3137 << " / heap_index = " << std::dec << r_write_ptr.read() 3138 << " / srcid = " << std::dec << r_write_copy.read() 3139 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3140 if(entry.next == r_write_ptr.read()) 3141 std::cout << " ... and this is the last" << std::endl; 8387 p_vci_ixr.cmdval = true; 8388 8389 if ( r_ixr_cmd_get.read() ) // GET 8390 { 8391 p_vci_ixr.cmd = vci_param_ext::CMD_READ; 8392 p_vci_ixr.wdata = 0; 8393 p_vci_ixr.eop = true; 8394 } 8395 else // PUT 8396 { 8397 size_t word = r_ixr_cmd_word.read(); 8398 p_vci_ixr.cmd = vci_param_ext::CMD_WRITE; 8399 p_vci_ixr.wdata = ((wide_data_t)(r_ixr_cmd_wdata[word].read())) | 8400 ((wide_data_t)(r_ixr_cmd_wdata[word+1].read()) << 32); 8401 p_vci_ixr.eop = (word == (m_words-2)); 8402 } 3142 8403 } 3143 #endif 3144 } 3145 else // the UPT counter must be decremented 3146 { 3147 dec_upt_counter = true; 3148 3149 #if DEBUG_MEMC_WRITE 3150 if(m_debug) 8404 /*ODCCP*/ 8405 else if (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_DATA_SEND) 3151 8406 { 3152 std::cout << " <MEMC " << name() << " WRITE_UPT_NEXT> Skip one entry in heap matching the writer" 3153 << " / heap_index = " << std::dec << r_write_ptr.read() 3154 << " / srcid = " << std::dec << r_write_copy.read() 3155 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3156 if(entry.next == r_write_ptr.read()) 3157 std::cout << " ... and this is the last" << std::endl; 3158 } 3159 #endif 3160 } 3161 3162 // register the possible UPT decrement request 3163 r_write_to_dec = dec_upt_counter or r_write_to_dec.read(); 3164 3165 if(not m_write_to_cc_send_inst_fifo.wok()) 3166 { 3167 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_UPT_NEXT state" << std::endl 3168 << "The write_to_cc_send_fifo should not be full" << std::endl 3169 << "as the depth should be larger than the max number of copies" << std::endl; 3170 exit(0); 3171 } 3172 3173 r_write_ptr = entry.next; 3174 3175 if(entry.next == r_write_ptr.read()) // last copy 3176 { 3177 r_write_to_cc_send_multi_req = true; 3178 if(r_write_to_dec.read() or dec_upt_counter) r_write_fsm = WRITE_UPT_DEC; 3179 else r_write_fsm = WRITE_IDLE; 3180 } 3181 break; 3182 } 3183 3184 ////////////////// 3185 case WRITE_UPT_DEC: 3186 { 3187 // If the initial writer has a copy, it should not 3188 // receive an update request, but the counter in the 3189 // update table must be decremented by the MULTI_ACK FSM. 3190 3191 if(!r_write_to_multi_ack_req.read()) 3192 { 3193 r_write_to_multi_ack_req = true; 3194 r_write_to_multi_ack_upt_index = r_write_upt_index.read(); 3195 r_write_fsm = WRITE_IDLE; 3196 } 3197 break; 3198 } 3199 3200 /////////////// 3201 case WRITE_RSP: // Post a request to TGT_RSP FSM to acknowledge the write 3202 // In order to increase the Write requests throughput, 3203 // we don't wait to return in the IDLE state to consume 3204 // a new request in the write FIFO 3205 { 3206 if(!r_write_to_tgt_rsp_req.read()) 3207 { 3208 // post the request to TGT_RSP_FSM 3209 r_write_to_tgt_rsp_req = true; 3210 r_write_to_tgt_rsp_srcid = r_write_srcid.read(); 3211 r_write_to_tgt_rsp_trdid = r_write_trdid.read(); 3212 r_write_to_tgt_rsp_pktid = r_write_pktid.read(); 3213 r_write_to_tgt_rsp_sc_fail = r_write_sc_fail.read(); 3214 3215 // try to get a new write request from the FIFO 3216 if(m_cmd_write_addr_fifo.rok()) 3217 { 3218 if((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 3219 m_cpt_sc++; 3220 else 3221 { 3222 m_cpt_write++; 3223 m_cpt_write_cells++; 3224 } 3225 3226 // consume a word in the FIFO & write it in the local buffer 3227 cmd_write_fifo_get = true; 3228 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 3229 3230 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 3231 r_write_word_index = index; 3232 r_write_word_count = 1; 3233 r_write_data[index] = m_cmd_write_data_fifo.read(); 3234 r_write_srcid = m_cmd_write_srcid_fifo.read(); 3235 r_write_trdid = m_cmd_write_trdid_fifo.read(); 3236 r_write_pktid = m_cmd_write_pktid_fifo.read(); 3237 r_write_pending_sc = false; 3238 3239 // initialize the be field for all words 3240 for(size_t word=0 ; word<m_words ; word++) 3241 { 3242 if(word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 3243 else r_write_be[word] = 0x0; 3244 } 3245 3246 if(m_cmd_write_eop_fifo.read() or ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC)) 3247 { 3248 r_write_fsm = WRITE_DIR_REQ; 3249 } 3250 else 3251 { 3252 r_write_fsm = WRITE_NEXT; 3253 } 8407 p_vci_ixr.cmd = vci_param_ext::CMD_WRITE; 8408 p_vci_ixr.cmdval = true; 8409 #if ODCCP_NON_INCLUSIVE 8410 p_vci_ixr.address = (addr_t)((r_cleanup_to_ixr_cmd_nline.read() * m_words + 8411 r_ixr_cmd_word.read()) * 4); 8412 p_vci_ixr.wdata = ((wide_data_t)(r_cleanup_to_ixr_cmd_data[r_ixr_cmd_word.read()].read()) | 8413 ((wide_data_t)(r_cleanup_to_ixr_cmd_data[r_ixr_cmd_word.read() + 1].read()) << 32)); 8414 #else 8415 /*p_vci_ixr.address = (addr_t)((r_cleanup_to_ixr_cmd_nline.read() * m_words + 8416 r_ixr_cmd_word.read()) * 4);*/ 8417 p_vci_ixr.address = (addr_t)r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2); 8418 p_vci_ixr.wdata = ((wide_data_t)(r_ixr_cmd_wdata[r_ixr_cmd_word.read()].read()) | 8419 ((wide_data_t)(r_ixr_cmd_wdata[r_ixr_cmd_word.read() + 1].read()) << 32)); 8420 #endif 8421 8422 p_vci_ixr.trdid = r_cleanup_to_ixr_cmd_index.read(); 8423 p_vci_ixr.eop = (r_ixr_cmd_word == (m_words - 2)); 3254 8424 } 3255 8425 else 3256 8426 { 3257 r_write_fsm = WRITE_IDLE;8427 p_vci_ixr.cmdval = false; 3258 8428 } 3259 8429 3260 #if DEBUG_MEMC_WRITE 3261 if(m_debug) 3262 { 3263 std::cout << " <MEMC " << name() << " WRITE_RSP> Post a request to TGT_RSP FSM" 3264 << " : rsrcid = " << std::hex << r_write_srcid.read() << std::endl; 3265 if(m_cmd_write_addr_fifo.rok()) 3266 { 3267 std::cout << " New Write request: " 3268 << " srcid = " << std::hex << m_cmd_write_srcid_fifo.read() 3269 << " / address = " << m_cmd_write_addr_fifo.read() 3270 << " / data = " << m_cmd_write_data_fifo.read() << std::endl; 3271 } 3272 } 3273 #endif 3274 } 3275 break; 3276 } 3277 3278 ///////////////////////// 3279 case WRITE_MISS_TRT_LOCK: // Miss : check Transaction Table 3280 { 3281 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3282 { 3283 3284 #if DEBUG_MEMC_WRITE 3285 if(m_debug) 3286 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_LOCK> Check the TRT" << std::endl; 3287 #endif 3288 size_t hit_index = 0; 3289 size_t wok_index = 0; 3290 addr_t addr = (addr_t) r_write_address.read(); 3291 bool hit_read = m_trt.hit_read(m_nline[addr], hit_index); 3292 #if ODCCP_NON_INCLUSIVE 3293 bool hit_write = (m_trt.hit_write(m_nline[addr]) or 3294 ((r_cleanup_to_ixr_cmd_nline.read() == m_nline[addr]) and r_cleanup_to_ixr_cmd_req.read())); 3295 #else 3296 bool hit_write = m_trt.hit_write(m_nline[addr]); 3297 #endif 3298 bool wok = not m_trt.full(wok_index); 3299 3300 if(hit_read) // register the modified data in TRT 8430 //////////////////////////////////////////////////// 8431 // Response signals on the p_vci_ixr port 8432 //////////////////////////////////////////////////// 8433 8434 if( (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ) or 8435 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) ) 8436 { 8437 p_vci_ixr.rspack = (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP); 8438 } 8439 else if (r_ixr_rsp_fsm.read() == IXR_RSP_ACK) 3301 8440 { 3302 r_write_trt_index = hit_index; 3303 r_write_fsm = WRITE_MISS_TRT_DATA; 3304 m_cpt_write_miss++; 8441 p_vci_ixr.rspack = true; 3305 8442 } 3306 else if(wok and !hit_write) // set a new entry in TRT8443 else // r_ixr_rsp_fsm == IXR_RSP_IDLE 3307 8444 { 3308 r_write_trt_index = wok_index; 3309 r_write_fsm = WRITE_MISS_TRT_SET; 3310 m_cpt_write_miss++; 8445 p_vci_ixr.rspack = false; 3311 8446 } 3312 else // wait an empty entry in TRT 8447 8448 //////////////////////////////////////////////////// 8449 // Command signals on the p_vci_tgt port 8450 //////////////////////////////////////////////////// 8451 8452 switch((tgt_cmd_fsm_state_e) r_tgt_cmd_fsm.read()) 3313 8453 { 3314 r_write_fsm = WRITE_WAIT; 3315 m_cpt_trt_full++; 8454 case TGT_CMD_IDLE: 8455 p_vci_tgt.cmdack = false; 8456 break; 8457 8458 case TGT_CMD_CONFIG: 8459 case TGT_CMD_ERROR: 8460 p_vci_tgt.cmdack = not r_tgt_cmd_to_tgt_rsp_req.read(); 8461 break; 8462 8463 case TGT_CMD_READ: 8464 p_vci_tgt.cmdack = m_cmd_read_addr_fifo.wok(); 8465 break; 8466 8467 case TGT_CMD_WRITE: 8468 p_vci_tgt.cmdack = m_cmd_write_addr_fifo.wok(); 8469 break; 8470 8471 case TGT_CMD_CAS: 8472 p_vci_tgt.cmdack = m_cmd_cas_addr_fifo.wok(); 8473 break; 3316 8474 } 3317 m_cpt_write_fsm_n_trt_lock++; 3318 } 3319 3320 m_cpt_write_fsm_trt_lock++; 3321 3322 break; 3323 } 3324 3325 //////////////// 3326 case WRITE_WAIT: // release the locks protecting the shared ressources 3327 { 3328 3329 #if DEBUG_MEMC_WRITE 3330 if(m_debug) 3331 std::cout << " <MEMC " << name() << " WRITE_WAIT> Releases the locks before retry" << std::endl; 3332 #endif 3333 r_write_fsm = WRITE_DIR_REQ; 3334 break; 3335 } 3336 3337 //////////////////////// 3338 case WRITE_MISS_TRT_SET: // register a new transaction in TRT (Write Buffer) 3339 { 3340 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3341 { 3342 std::vector<be_t> be_vector; 3343 std::vector<data_t> data_vector; 3344 be_vector.clear(); 3345 data_vector.clear(); 3346 for(size_t i=0; i<m_words; i++) 8475 8476 //////////////////////////////////////////////////// 8477 // Response signals on the p_vci_tgt port 8478 //////////////////////////////////////////////////// 8479 8480 switch(r_tgt_rsp_fsm.read()) 3347 8481 { 3348 be_vector.push_back(r_write_be[i]); 3349 data_vector.push_back(r_write_data[i]); 8482 case TGT_RSP_CONFIG_IDLE: 8483 case TGT_RSP_TGT_CMD_IDLE: 8484 case TGT_RSP_READ_IDLE: 8485 case TGT_RSP_WRITE_IDLE: 8486 case TGT_RSP_CAS_IDLE: 8487 case TGT_RSP_XRAM_IDLE: 8488 case TGT_RSP_MULTI_ACK_IDLE: 8489 case TGT_RSP_CLEANUP_IDLE: 8490 { 8491 p_vci_tgt.rspval = false; 8492 p_vci_tgt.rsrcid = 0; 8493 p_vci_tgt.rdata = 0; 8494 p_vci_tgt.rpktid = 0; 8495 p_vci_tgt.rtrdid = 0; 8496 p_vci_tgt.rerror = 0; 8497 p_vci_tgt.reop = false; 8498 break; 8499 } 8500 case TGT_RSP_CONFIG: 8501 { 8502 p_vci_tgt.rspval = true; 8503 p_vci_tgt.rdata = 0; 8504 p_vci_tgt.rsrcid = r_config_to_tgt_rsp_srcid.read(); 8505 p_vci_tgt.rtrdid = r_config_to_tgt_rsp_trdid.read(); 8506 p_vci_tgt.rpktid = r_config_to_tgt_rsp_pktid.read(); 8507 p_vci_tgt.rerror = r_config_to_tgt_rsp_error.read(); 8508 p_vci_tgt.reop = true; 8509 8510 break; 8511 } 8512 case TGT_RSP_TGT_CMD: 8513 { 8514 p_vci_tgt.rspval = true; 8515 p_vci_tgt.rdata = r_tgt_cmd_to_tgt_rsp_rdata.read(); 8516 p_vci_tgt.rsrcid = r_tgt_cmd_to_tgt_rsp_srcid.read(); 8517 p_vci_tgt.rtrdid = r_tgt_cmd_to_tgt_rsp_trdid.read(); 8518 p_vci_tgt.rpktid = r_tgt_cmd_to_tgt_rsp_pktid.read(); 8519 p_vci_tgt.rerror = r_tgt_cmd_to_tgt_rsp_error.read(); 8520 p_vci_tgt.reop = true; 8521 8522 break; 8523 } 8524 case TGT_RSP_READ: 8525 { 8526 uint32_t last_word_idx = r_read_to_tgt_rsp_word.read() + r_read_to_tgt_rsp_length - 1; 8527 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 8528 bool is_ll = ((r_read_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 8529 8530 p_vci_tgt.rspval = true; 8531 8532 if ( is_ll and not r_tgt_rsp_key_sent.read() ) 8533 { 8534 // LL response first flit 8535 p_vci_tgt.rdata = r_read_to_tgt_rsp_ll_key.read(); 8536 } 8537 else 8538 { 8539 // LL response second flit or READ response 8540 p_vci_tgt.rdata = r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 8541 } 8542 8543 p_vci_tgt.rsrcid = r_read_to_tgt_rsp_srcid.read(); 8544 p_vci_tgt.rtrdid = r_read_to_tgt_rsp_trdid.read(); 8545 p_vci_tgt.rpktid = r_read_to_tgt_rsp_pktid.read(); 8546 p_vci_tgt.rerror = 0; 8547 p_vci_tgt.reop = (is_last_word and not is_ll) or (r_tgt_rsp_key_sent.read() and is_ll); 8548 break; 8549 } 8550 8551 case TGT_RSP_WRITE: 8552 p_vci_tgt.rspval = true; 8553 if(((r_write_to_tgt_rsp_pktid.read() & 0x7) == TYPE_SC) and r_write_to_tgt_rsp_sc_fail.read()) 8554 p_vci_tgt.rdata = 1; 8555 else 8556 p_vci_tgt.rdata = 0; 8557 p_vci_tgt.rsrcid = r_write_to_tgt_rsp_srcid.read(); 8558 p_vci_tgt.rtrdid = r_write_to_tgt_rsp_trdid.read(); 8559 p_vci_tgt.rpktid = r_write_to_tgt_rsp_pktid.read(); 8560 p_vci_tgt.rerror = 0; 8561 p_vci_tgt.reop = true; 8562 break; 8563 8564 case TGT_RSP_CLEANUP: 8565 p_vci_tgt.rspval = true; 8566 p_vci_tgt.rdata = 0; 8567 p_vci_tgt.rsrcid = r_cleanup_to_tgt_rsp_srcid.read(); 8568 p_vci_tgt.rtrdid = r_cleanup_to_tgt_rsp_trdid.read(); 8569 p_vci_tgt.rpktid = r_cleanup_to_tgt_rsp_pktid.read(); 8570 p_vci_tgt.rerror = 0; // Can be a CAS rsp 8571 p_vci_tgt.reop = true; 8572 break; 8573 8574 case TGT_RSP_CAS: 8575 p_vci_tgt.rspval = true; 8576 p_vci_tgt.rdata = r_cas_to_tgt_rsp_data.read(); 8577 p_vci_tgt.rsrcid = r_cas_to_tgt_rsp_srcid.read(); 8578 p_vci_tgt.rtrdid = r_cas_to_tgt_rsp_trdid.read(); 8579 p_vci_tgt.rpktid = r_cas_to_tgt_rsp_pktid.read(); 8580 p_vci_tgt.rerror = 0; 8581 p_vci_tgt.reop = true; 8582 break; 8583 8584 case TGT_RSP_XRAM: 8585 { 8586 uint32_t last_word_idx = r_xram_rsp_to_tgt_rsp_word.read() + r_xram_rsp_to_tgt_rsp_length.read() - 1; 8587 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 8588 bool is_ll = ((r_xram_rsp_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 8589 bool is_error = r_xram_rsp_to_tgt_rsp_rerror.read(); 8590 8591 p_vci_tgt.rspval = true; 8592 8593 if( is_ll and not r_tgt_rsp_key_sent.read() ) { 8594 // LL response first flit 8595 p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_ll_key.read(); 8596 } 8597 else { 8598 // LL response second flit or READ response 8599 p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 8600 } 8601 8602 p_vci_tgt.rsrcid = r_xram_rsp_to_tgt_rsp_srcid.read(); 8603 p_vci_tgt.rtrdid = r_xram_rsp_to_tgt_rsp_trdid.read(); 8604 p_vci_tgt.rpktid = r_xram_rsp_to_tgt_rsp_pktid.read(); 8605 p_vci_tgt.rerror = is_error; 8606 p_vci_tgt.reop = (((is_last_word or is_error) and not is_ll) or 8607 (r_tgt_rsp_key_sent.read() and is_ll)); 8608 break; 8609 } 8610 8611 case TGT_RSP_MULTI_ACK: 8612 p_vci_tgt.rspval = true; 8613 p_vci_tgt.rdata = 0; // Can be a CAS or SC rsp 8614 p_vci_tgt.rsrcid = r_multi_ack_to_tgt_rsp_srcid.read(); 8615 p_vci_tgt.rtrdid = r_multi_ack_to_tgt_rsp_trdid.read(); 8616 p_vci_tgt.rpktid = r_multi_ack_to_tgt_rsp_pktid.read(); 8617 p_vci_tgt.rerror = 0; 8618 p_vci_tgt.reop = true; 8619 break; 8620 } // end switch r_tgt_rsp_fsm 8621 8622 //////////////////////////////////////////////////////////////////// 8623 // p_dspin_m2p port (CC_SEND FSM) 8624 //////////////////////////////////////////////////////////////////// 8625 8626 p_dspin_m2p.write = false; 8627 p_dspin_m2p.eop = false; 8628 p_dspin_m2p.data = 0; 8629 8630 switch(r_cc_send_fsm.read()) 8631 { 8632 /////////////////////////// 8633 case CC_SEND_CONFIG_IDLE: 8634 case CC_SEND_XRAM_RSP_IDLE: 8635 case CC_SEND_WRITE_IDLE: 8636 case CC_SEND_CAS_IDLE: 8637 { 8638 break; 8639 } 8640 //////////////////////////////// 8641 case CC_SEND_CONFIG_INVAL_HEADER: 8642 { 8643 uint8_t multi_inval_type; 8644 if(m_config_to_cc_send_inst_fifo.read()) 8645 { 8646 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_INST; 8647 } 8648 else 8649 { 8650 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_DATA; 8651 } 8652 8653 uint64_t flit = 0; 8654 uint64_t dest = m_config_to_cc_send_srcid_fifo.read() << 8655 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 8656 8657 DspinDhccpParam::dspin_set( flit, 8658 dest, 8659 DspinDhccpParam::MULTI_INVAL_DEST); 8660 8661 DspinDhccpParam::dspin_set( flit, 8662 m_cc_global_id, 8663 DspinDhccpParam::MULTI_INVAL_SRCID); 8664 8665 DspinDhccpParam::dspin_set( flit, 8666 r_config_to_cc_send_trdid.read(), 8667 DspinDhccpParam::MULTI_INVAL_UPDT_INDEX); 8668 8669 DspinDhccpParam::dspin_set( flit, 8670 multi_inval_type, 8671 DspinDhccpParam::M2P_TYPE); 8672 p_dspin_m2p.write = true; 8673 p_dspin_m2p.data = flit; 8674 break; 8675 } 8676 //////////////////////////////// 8677 case CC_SEND_CONFIG_INVAL_NLINE: 8678 { 8679 uint64_t flit = 0; 8680 DspinDhccpParam::dspin_set( flit, 8681 r_config_to_cc_send_nline.read(), 8682 DspinDhccpParam::MULTI_INVAL_NLINE); 8683 p_dspin_m2p.eop = true; 8684 p_dspin_m2p.write = true; 8685 p_dspin_m2p.data = flit; 8686 break; 8687 } 8688 /////////////////////////////////// 8689 case CC_SEND_XRAM_RSP_INVAL_HEADER: 8690 { 8691 if(not m_xram_rsp_to_cc_send_inst_fifo.rok()) break; 8692 8693 uint8_t multi_inval_type; 8694 if(m_xram_rsp_to_cc_send_inst_fifo.read()) 8695 { 8696 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_INST; 8697 } 8698 else 8699 { 8700 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_DATA; 8701 } 8702 8703 uint64_t flit = 0; 8704 uint64_t dest = m_xram_rsp_to_cc_send_srcid_fifo.read() << 8705 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 8706 8707 DspinDhccpParam::dspin_set( flit, 8708 dest, 8709 DspinDhccpParam::MULTI_INVAL_DEST); 8710 8711 DspinDhccpParam::dspin_set( flit, 8712 m_cc_global_id, 8713 DspinDhccpParam::MULTI_INVAL_SRCID); 8714 8715 DspinDhccpParam::dspin_set( flit, 8716 r_xram_rsp_to_cc_send_trdid.read(), 8717 DspinDhccpParam::MULTI_INVAL_UPDT_INDEX); 8718 8719 DspinDhccpParam::dspin_set( flit, 8720 multi_inval_type, 8721 DspinDhccpParam::M2P_TYPE); 8722 p_dspin_m2p.write = true; 8723 p_dspin_m2p.data = flit; 8724 break; 8725 } 8726 8727 ////////////////////////////////// 8728 case CC_SEND_XRAM_RSP_INVAL_NLINE: 8729 { 8730 uint64_t flit = 0; 8731 8732 DspinDhccpParam::dspin_set( flit, 8733 r_xram_rsp_to_cc_send_nline.read(), 8734 DspinDhccpParam::MULTI_INVAL_NLINE); 8735 p_dspin_m2p.eop = true; 8736 p_dspin_m2p.write = true; 8737 p_dspin_m2p.data = flit; 8738 break; 8739 } 8740 8741 ///////////////////////////////////// 8742 case CC_SEND_CONFIG_BRDCAST_HEADER: 8743 case CC_SEND_XRAM_RSP_BRDCAST_HEADER: 8744 case CC_SEND_WRITE_BRDCAST_HEADER: 8745 case CC_SEND_CAS_BRDCAST_HEADER: 8746 { 8747 uint64_t flit = 0; 8748 8749 DspinDhccpParam::dspin_set( flit, 8750 m_broadcast_boundaries, 8751 DspinDhccpParam::BROADCAST_BOX); 8752 8753 DspinDhccpParam::dspin_set( flit, 8754 m_cc_global_id, 8755 DspinDhccpParam::BROADCAST_SRCID); 8756 8757 DspinDhccpParam::dspin_set( flit, 8758 1ULL, 8759 DspinDhccpParam::M2P_BC); 8760 p_dspin_m2p.write = true; 8761 p_dspin_m2p.data = flit; 8762 break; 8763 } 8764 //////////////////////////////////// 8765 case CC_SEND_XRAM_RSP_BRDCAST_NLINE: 8766 { 8767 uint64_t flit = 0; 8768 DspinDhccpParam::dspin_set( flit, 8769 r_xram_rsp_to_cc_send_nline.read(), 8770 DspinDhccpParam::BROADCAST_NLINE); 8771 p_dspin_m2p.write = true; 8772 p_dspin_m2p.eop = true; 8773 p_dspin_m2p.data = flit; 8774 break; 8775 } 8776 ////////////////////////////////// 8777 case CC_SEND_CONFIG_BRDCAST_NLINE: 8778 { 8779 uint64_t flit = 0; 8780 DspinDhccpParam::dspin_set( flit, 8781 r_config_to_cc_send_nline.read(), 8782 DspinDhccpParam::BROADCAST_NLINE); 8783 p_dspin_m2p.write = true; 8784 p_dspin_m2p.eop = true; 8785 p_dspin_m2p.data = flit; 8786 break; 8787 } 8788 ///////////////////////////////// 8789 case CC_SEND_WRITE_BRDCAST_NLINE: 8790 { 8791 uint64_t flit = 0; 8792 DspinDhccpParam::dspin_set( flit, 8793 r_write_to_cc_send_nline.read(), 8794 DspinDhccpParam::BROADCAST_NLINE); 8795 p_dspin_m2p.write = true; 8796 p_dspin_m2p.eop = true; 8797 p_dspin_m2p.data = flit; 8798 break; 8799 } 8800 /////////////////////////////// 8801 case CC_SEND_CAS_BRDCAST_NLINE: 8802 { 8803 uint64_t flit = 0; 8804 DspinDhccpParam::dspin_set( flit, 8805 r_cas_to_cc_send_nline.read(), 8806 DspinDhccpParam::BROADCAST_NLINE); 8807 p_dspin_m2p.write = true; 8808 p_dspin_m2p.eop = true; 8809 p_dspin_m2p.data = flit; 8810 break; 8811 } 8812 /////////////////////////////// 8813 case CC_SEND_WRITE_UPDT_HEADER: 8814 { 8815 if(not m_write_to_cc_send_inst_fifo.rok()) break; 8816 8817 uint8_t multi_updt_type; 8818 if(m_write_to_cc_send_inst_fifo.read()) 8819 { 8820 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_INST; 8821 } 8822 else 8823 { 8824 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_DATA; 8825 } 8826 8827 uint64_t flit = 0; 8828 uint64_t dest = 8829 m_write_to_cc_send_srcid_fifo.read() << 8830 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 8831 8832 DspinDhccpParam::dspin_set( 8833 flit, 8834 dest, 8835 DspinDhccpParam::MULTI_UPDT_DEST); 8836 8837 DspinDhccpParam::dspin_set( 8838 flit, 8839 m_cc_global_id, 8840 DspinDhccpParam::MULTI_UPDT_SRCID); 8841 8842 DspinDhccpParam::dspin_set( 8843 flit, 8844 r_write_to_cc_send_trdid.read(), 8845 DspinDhccpParam::MULTI_UPDT_UPDT_INDEX); 8846 8847 DspinDhccpParam::dspin_set( 8848 flit, 8849 multi_updt_type, 8850 DspinDhccpParam::M2P_TYPE); 8851 8852 p_dspin_m2p.write = true; 8853 p_dspin_m2p.data = flit; 8854 8855 break; 8856 } 8857 ////////////////////////////// 8858 case CC_SEND_WRITE_UPDT_NLINE: 8859 { 8860 uint64_t flit = 0; 8861 8862 DspinDhccpParam::dspin_set( 8863 flit, 8864 r_write_to_cc_send_index.read(), 8865 DspinDhccpParam::MULTI_UPDT_WORD_INDEX); 8866 8867 DspinDhccpParam::dspin_set( 8868 flit, 8869 r_write_to_cc_send_nline.read(), 8870 DspinDhccpParam::MULTI_UPDT_NLINE); 8871 8872 p_dspin_m2p.write = true; 8873 p_dspin_m2p.data = flit; 8874 8875 break; 8876 } 8877 ///////////////////////////// 8878 case CC_SEND_WRITE_UPDT_DATA: 8879 { 8880 8881 uint8_t multi_updt_cpt = 8882 r_cc_send_cpt.read() + r_write_to_cc_send_index.read(); 8883 8884 uint8_t multi_updt_be = r_write_to_cc_send_be[multi_updt_cpt].read(); 8885 uint32_t multi_updt_data = r_write_to_cc_send_data[multi_updt_cpt].read(); 8886 8887 uint64_t flit = 0; 8888 8889 DspinDhccpParam::dspin_set( 8890 flit, 8891 multi_updt_be, 8892 DspinDhccpParam::MULTI_UPDT_BE); 8893 8894 DspinDhccpParam::dspin_set( 8895 flit, 8896 multi_updt_data, 8897 DspinDhccpParam::MULTI_UPDT_DATA); 8898 8899 p_dspin_m2p.write = true; 8900 p_dspin_m2p.eop = (r_cc_send_cpt.read() == (r_write_to_cc_send_count.read()-1)); 8901 p_dspin_m2p.data = flit; 8902 8903 break; 8904 } 8905 //////////////////////////// 8906 case CC_SEND_CAS_UPDT_HEADER: 8907 { 8908 if (not m_cas_to_cc_send_inst_fifo.rok()) break; 8909 8910 uint8_t multi_updt_type; 8911 if(m_cas_to_cc_send_inst_fifo.read()) 8912 { 8913 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_INST; 8914 } 8915 else 8916 { 8917 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_DATA; 8918 } 8919 8920 uint64_t flit = 0; 8921 uint64_t dest = 8922 m_cas_to_cc_send_srcid_fifo.read() << 8923 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 8924 8925 DspinDhccpParam::dspin_set( 8926 flit, 8927 dest, 8928 DspinDhccpParam::MULTI_UPDT_DEST); 8929 8930 DspinDhccpParam::dspin_set( 8931 flit, 8932 m_cc_global_id, 8933 DspinDhccpParam::MULTI_UPDT_SRCID); 8934 8935 DspinDhccpParam::dspin_set( 8936 flit, 8937 r_cas_to_cc_send_trdid.read(), 8938 DspinDhccpParam::MULTI_UPDT_UPDT_INDEX); 8939 8940 DspinDhccpParam::dspin_set( 8941 flit, 8942 multi_updt_type, 8943 DspinDhccpParam::M2P_TYPE); 8944 8945 p_dspin_m2p.write = true; 8946 p_dspin_m2p.data = flit; 8947 8948 break; 8949 } 8950 //////////////////////////// 8951 case CC_SEND_CAS_UPDT_NLINE: 8952 { 8953 uint64_t flit = 0; 8954 8955 DspinDhccpParam::dspin_set( 8956 flit, 8957 r_cas_to_cc_send_index.read(), 8958 DspinDhccpParam::MULTI_UPDT_WORD_INDEX); 8959 8960 DspinDhccpParam::dspin_set( 8961 flit, 8962 r_cas_to_cc_send_nline.read(), 8963 DspinDhccpParam::MULTI_UPDT_NLINE); 8964 8965 p_dspin_m2p.write = true; 8966 p_dspin_m2p.data = flit; 8967 8968 break; 8969 } 8970 /////////////////////////// 8971 case CC_SEND_CAS_UPDT_DATA: 8972 { 8973 uint64_t flit = 0; 8974 8975 DspinDhccpParam::dspin_set( 8976 flit, 8977 0xF, 8978 DspinDhccpParam::MULTI_UPDT_BE); 8979 8980 DspinDhccpParam::dspin_set( 8981 flit, 8982 r_cas_to_cc_send_wdata.read(), 8983 DspinDhccpParam::MULTI_UPDT_DATA); 8984 8985 p_dspin_m2p.write = true; 8986 p_dspin_m2p.eop = not r_cas_to_cc_send_is_long.read(); 8987 p_dspin_m2p.data = flit; 8988 8989 break; 8990 } 8991 //////////////////////////////// 8992 case CC_SEND_CAS_UPDT_DATA_HIGH: 8993 { 8994 uint64_t flit = 0; 8995 8996 DspinDhccpParam::dspin_set( 8997 flit, 8998 0xF, 8999 DspinDhccpParam::MULTI_UPDT_BE); 9000 9001 DspinDhccpParam::dspin_set( 9002 flit, 9003 r_cas_to_cc_send_wdata_high.read(), 9004 DspinDhccpParam::MULTI_UPDT_DATA); 9005 9006 p_dspin_m2p.write = true; 9007 p_dspin_m2p.eop = true; 9008 p_dspin_m2p.data = flit; 9009 9010 break; 9011 } 3350 9012 } 3351 m_trt.set(r_write_trt_index.read(), 3352 true, // read request to XRAM 3353 m_nline[(addr_t)(r_write_address.read())], 3354 r_write_srcid.read(), 3355 r_write_trdid.read(), 3356 r_write_pktid.read(), 3357 false, // not a processor read 3358 0, // not a single word 3359 0, // word index 3360 be_vector, 3361 data_vector); 3362 r_write_fsm = WRITE_MISS_XRAM_REQ; 3363 3364 #if DEBUG_MEMC_WRITE 3365 if(m_debug) 3366 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_SET> Set a new entry in TRT" << std::endl; 3367 #endif 3368 } 3369 break; 3370 } 3371 3372 ///////////////////////// 3373 case WRITE_MISS_TRT_DATA: // update an entry in TRT (used as a Write Buffer) 3374 { 3375 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3376 { 3377 std::vector<be_t> be_vector; 3378 std::vector<data_t> data_vector; 3379 be_vector.clear(); 3380 data_vector.clear(); 3381 for(size_t i=0; i<m_words; i++) 9013 9014 //////////////////////////////////////////////////////////////////// 9015 // p_dspin_clack port (CLEANUP FSM) 9016 //////////////////////////////////////////////////////////////////// 9017 9018 if ( r_cleanup_fsm.read() == CLEANUP_SEND_CLACK ) 3382 9019 { 3383 be_vector.push_back(r_write_be[i]); 3384 data_vector.push_back(r_write_data[i]); 3385 } 3386 m_trt.write_data_mask( r_write_trt_index.read(), 3387 be_vector, 3388 data_vector ); 3389 r_write_fsm = WRITE_RSP; 3390 3391 #if DEBUG_MEMC_WRITE 3392 if(m_debug) 3393 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_DATA> Modify an existing entry in TRT" << std::endl; 3394 #endif 3395 } 3396 break; 3397 } 3398 ///////////////////////// 3399 case WRITE_MISS_XRAM_REQ: // send a GET request to IXR_CMD FSM 3400 { 3401 if( not r_write_to_ixr_cmd_req.read() ) 3402 { 3403 r_write_to_ixr_cmd_req = true; 3404 r_write_to_ixr_cmd_put = false; 3405 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 3406 r_write_fsm = WRITE_RSP; 3407 3408 #if DEBUG_MEMC_WRITE 3409 if(m_debug) 3410 std::cout << " <MEMC " << name() << " WRITE_MISS_XRAM_REQ> Post a GET request to the IXR_CMD FSM" << std::endl; 3411 #endif 3412 } 3413 break; 3414 } 3415 /////////////////////// 3416 case WRITE_BC_DIR_READ: // enter this state if a broadcast-inval is required 3417 // the cache line must be erased in mem-cache, and written 3418 // into XRAM. we read the cache and complete the buffer 3419 { 3420 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3421 "MEMC ERROR in WRITE_BC_DIR_READ state: Bad DIR allocation"); 3422 3423 // update local buffer 3424 size_t set = m_y[(addr_t)(r_write_address.read())]; 3425 size_t way = r_write_way.read(); 3426 for(size_t word=0 ; word<m_words ; word++) 3427 { 3428 data_t mask = 0; 3429 if(r_write_be[word].read() & 0x1) mask = mask | 0x000000FF; 3430 if(r_write_be[word].read() & 0x2) mask = mask | 0x0000FF00; 3431 if(r_write_be[word].read() & 0x4) mask = mask | 0x00FF0000; 3432 if(r_write_be[word].read() & 0x8) mask = mask | 0xFF000000; 3433 3434 // complete only if mask is not null (for energy consumption) 3435 r_write_data[word] = (r_write_data[word].read() & mask) | 3436 (m_cache_data.read(way, set, word) & ~mask); 3437 } // end for 3438 3439 r_write_fsm = WRITE_BC_TRT_LOCK; 3440 3441 #if DEBUG_MEMC_WRITE 3442 if(m_debug) 3443 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_READ>" 3444 << " Read the cache to complete local buffer" << std::endl; 3445 #endif 3446 break; 3447 } 3448 /////////////////////// 3449 case WRITE_BC_TRT_LOCK: // get TRT lock to check TRT not full 3450 { 3451 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3452 "MEMC ERROR in WRITE_BC_TRT_LOCK state: Bad DIR allocation"); 3453 3454 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3455 { 3456 size_t wok_index = 0; 3457 bool wok = not m_trt.full(wok_index); 3458 if( wok ) 9020 uint8_t cleanup_ack_type; 9021 if(r_cleanup_inst.read()) 3459 9022 { 3460 r_write_trt_index = wok_index; 3461 r_write_fsm = WRITE_BC_IVT_LOCK; 3462 } 3463 else // wait an empty slot in TRT 3464 { 3465 r_write_fsm = WRITE_WAIT; 3466 } 3467 3468 #if DEBUG_MEMC_WRITE 3469 if(m_debug) 3470 std::cout << " <MEMC " << name() << " WRITE_BC_TRT_LOCK> Check TRT" 3471 << " : wok = " << wok << " / index = " << wok_index << std::endl; 3472 #endif 3473 m_cpt_write_fsm_n_trt_lock++; 3474 } 3475 3476 m_cpt_write_fsm_trt_lock++; 3477 3478 break; 3479 } 3480 ////////////////////// 3481 case WRITE_BC_IVT_LOCK: // get IVT lock and register BC transaction in IVT 3482 { 3483 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3484 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad DIR allocation"); 3485 3486 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 3487 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad TRT allocation"); 3488 3489 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 3490 { 3491 bool wok = false; 3492 size_t index = 0; 3493 size_t srcid = r_write_srcid.read(); 3494 size_t trdid = r_write_trdid.read(); 3495 size_t pktid = r_write_pktid.read(); 3496 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 3497 size_t nb_copies = r_write_count.read(); 3498 3499 wok = m_ivt.set(false, // it's an inval transaction 3500 true, // it's a broadcast 3501 true, // response required 3502 false, // no acknowledge required 3503 srcid, 3504 trdid, 3505 pktid, 3506 nline, 3507 nb_copies, 3508 index); 3509 #if DEBUG_MEMC_WRITE 3510 if( m_debug and wok ) 3511 std::cout << " <MEMC " << name() << " WRITE_BC_IVT_LOCK> Register broadcast inval in IVT" 3512 << " / nb_copies = " << r_write_count.read() << std::endl; 3513 #endif 3514 r_write_upt_index = index; 3515 3516 if( wok ) r_write_fsm = WRITE_BC_DIR_INVAL; 3517 else r_write_fsm = WRITE_WAIT; 3518 } 3519 break; 3520 } 3521 //////////////////////// 3522 case WRITE_BC_DIR_INVAL: // Register a put transaction in TRT 3523 // and invalidate the line in directory 3524 { 3525 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3526 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad DIR allocation"); 3527 3528 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 3529 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad TRT allocation"); 3530 3531 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) and 3532 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad IVT allocation"); 3533 3534 // register PUT request in TRT 3535 std::vector<data_t> data_vector; 3536 data_vector.clear(); 3537 for(size_t i=0; i<m_words; i++) data_vector.push_back(r_write_data[i].read()); 3538 m_trt.set( r_write_trt_index.read(), 3539 false, // PUT request 3540 m_nline[(addr_t)(r_write_address.read())], 3541 0, // unused 3542 0, // unused 3543 0, // unused 3544 false, // not a processor read 3545 0, // unused 3546 0, // unused 3547 std::vector<be_t> (m_words,0), 3548 data_vector ); 3549 3550 // invalidate directory entry 3551 DirectoryEntry entry; 3552 entry.valid = false; 3553 entry.dirty = false; 3554 entry.tag = 0; 3555 entry.is_cnt = false; 3556 entry.lock = false; 3557 entry.owner.srcid = 0; 3558 entry.owner.inst = false; 3559 entry.ptr = 0; 3560 entry.count = 0; 3561 size_t set = m_y[(addr_t)(r_write_address.read())]; 3562 size_t way = r_write_way.read(); 3563 3564 m_cache_directory.write(set, way, entry); 3565 3566 #if DEBUG_MEMC_WRITE 3567 if(m_debug) 3568 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_INVAL> Inval DIR and register in TRT:" 3569 << " address = " << r_write_address.read() << std::endl; 3570 #endif 3571 r_write_fsm = WRITE_BC_CC_SEND; 3572 break; 3573 } 3574 3575 ////////////////////// 3576 case WRITE_BC_CC_SEND: // Post a coherence broadcast request to CC_SEND FSM 3577 { 3578 if(!r_write_to_cc_send_multi_req.read() and !r_write_to_cc_send_brdcast_req.read()) 3579 { 3580 r_write_to_cc_send_multi_req = false; 3581 r_write_to_cc_send_brdcast_req = true; 3582 r_write_to_cc_send_trdid = r_write_upt_index.read(); 3583 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())]; 3584 r_write_to_cc_send_index = 0; 3585 r_write_to_cc_send_count = 0; 3586 3587 for(size_t i=0; i<m_words ; i++) // Ã quoi sert ce for? (AG) 3588 { 3589 r_write_to_cc_send_be[i]=0; 3590 r_write_to_cc_send_data[i] = 0; 3591 } 3592 r_write_fsm = WRITE_BC_XRAM_REQ; 3593 3594 #if DEBUG_MEMC_WRITE 3595 if(m_debug) 3596 std::cout << " <MEMC " << name() 3597 << " WRITE_BC_CC_SEND> Post a broadcast request to CC_SEND FSM" << std::endl; 3598 #endif 3599 } 3600 break; 3601 } 3602 3603 /////////////////////// 3604 case WRITE_BC_XRAM_REQ: // Post a PUT request to IXR_CMD FSM 3605 { 3606 if( not r_write_to_ixr_cmd_req.read() ) 3607 { 3608 r_write_to_ixr_cmd_req = true; 3609 r_write_to_ixr_cmd_put = true; 3610 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 3611 r_write_fsm = WRITE_IDLE; 3612 3613 #if DEBUG_MEMC_WRITE 3614 if(m_debug) 3615 std::cout << " <MEMC " << name() 3616 << " WRITE_BC_XRAM_REQ> Post a put request to IXR_CMD FSM" << std::endl; 3617 #endif 3618 } 3619 break; 3620 } 3621 } // end switch r_write_fsm 3622 3623 /////////////////////////////////////////////////////////////////////// 3624 // IXR_CMD FSM 3625 /////////////////////////////////////////////////////////////////////// 3626 // The IXR_CMD fsm controls the command packets to the XRAM : 3627 // It handles requests from 5 FSMs with a round-robin priority: 3628 // READ > WRITE > CAS > XRAM_RSP > CONFIG 3629 // 3630 // - It sends a single flit VCI read to the XRAM in case of 3631 // GET request posted by the READ, WRITE or CAS FSMs. 3632 // - It sends a multi-flit VCI write in case of PUT request posted by 3633 // the XRAM_RSP, WRITE, CAS, or CONFIG FSMs. 3634 // 3635 // For each client, there is three steps: 3636 // - IXR_CMD_*_IDLE : round-robin allocation to a client 3637 // - IXR_CMD_*_TRT : access to TRT for address and data 3638 // - IXR_CMD_*_SEND : send the PUT or GET VCI command 3639 // 3640 // The address and data to be written (for a PUT) are stored in TRT. 3641 // The trdid field contains always the TRT entry index. 3642 //////////////////////////////////////////////////////////////////////// 3643 3644 //std::cout << std::endl << "ixr_cmd_fsm" << std::endl; 3645 3646 switch(r_ixr_cmd_fsm.read()) 3647 { 3648 /////////////////////// 3649 case IXR_CMD_READ_IDLE: 3650 if (r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3651 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3652 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3653 #if ODCCP_NON_INCLUSIVE 3654 else if(r_cleanup_to_ixr_cmd_req.read()) 3655 { 3656 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3657 r_ixr_cmd_word = 0; 3658 } 3659 #else 3660 else if(r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 3661 #endif 3662 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3663 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3664 break; 3665 //////////////////////// 3666 case IXR_CMD_WRITE_IDLE: 3667 if (r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3668 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3669 #if ODCCP_NON_INCLUSIVE 3670 else if(r_cleanup_to_ixr_cmd_req.read()) 3671 { 3672 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3673 r_ixr_cmd_word = 0; 3674 } 3675 #else 3676 else if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 3677 #endif 3678 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3679 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3680 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3681 break; 3682 ////////////////////// 3683 case IXR_CMD_CAS_IDLE: 3684 if (r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3685 #if ODCCP_NON_INCLUSIVE 3686 else if(r_cleanup_to_ixr_cmd_req.read()) 3687 { 3688 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3689 r_ixr_cmd_word = 0; 3690 } 3691 #else 3692 else if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 3693 #endif 3694 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3695 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3696 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3697 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3698 break; 3699 /////////////////////// 3700 case IXR_CMD_XRAM_IDLE: 3701 #if ODCCP_NON_INCLUSIVE 3702 if(r_cleanup_to_ixr_cmd_req.read()) 3703 { 3704 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3705 r_ixr_cmd_word = 0; 3706 } 3707 #else 3708 if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 3709 #endif 3710 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3711 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3712 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3713 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3714 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3715 break; 3716 //////////////////////// 3717 case IXR_CMD_CLEANUP_IDLE: 3718 if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3719 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3720 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3721 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3722 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3723 #if ODCCP_NON_INCLUSIVE 3724 else if(r_cleanup_to_ixr_cmd_req.read()) 3725 { 3726 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3727 r_ixr_cmd_word = 0; 3728 } 3729 #else 3730 else if(r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 3731 #endif 3732 break; 3733 ///////////////////////// 3734 case IXR_CMD_CONFIG_IDLE: 3735 if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3736 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3737 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3738 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3739 #if ODCCP_NON_INCLUSIVE 3740 else if(r_cleanup_to_ixr_cmd_req.read()) 3741 { 3742 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3743 r_ixr_cmd_word = 0; 3744 } 3745 #else 3746 else if(r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 3747 #endif 3748 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3749 break; 3750 3751 3752 ////////////////////// 3753 case IXR_CMD_READ_TRT: // access TRT for a GET 3754 { 3755 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3756 { 3757 TransactionTabEntry entry = m_trt.read( r_read_to_ixr_cmd_index.read() ); 3758 r_ixr_cmd_address = entry.nline * (m_words<<2); 3759 r_ixr_cmd_trdid = r_read_to_ixr_cmd_index.read(); 3760 r_ixr_cmd_get = true; 3761 r_ixr_cmd_word = 0; 3762 r_ixr_cmd_fsm = IXR_CMD_READ_SEND; 3763 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3764 3765 #if DEBUG_MEMC_IXR_CMD 3766 if(m_debug) 3767 std::cout << " <MEMC " << name() << " IXR_CMD_READ_TRT> TRT access" 3768 << " index = " << std::dec << r_read_to_ixr_cmd_index.read() 3769 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3770 #endif 3771 } 3772 break; 3773 } 3774 /////////////////////// 3775 case IXR_CMD_WRITE_TRT: // access TRT for a PUT or a GET 3776 { 3777 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3778 { 3779 TransactionTabEntry entry = m_trt.read( r_write_to_ixr_cmd_index.read() ); 3780 r_ixr_cmd_address = entry.nline * (m_words<<2); 3781 r_ixr_cmd_trdid = r_write_to_ixr_cmd_index.read(); 3782 r_ixr_cmd_get = entry.xram_read; 3783 r_ixr_cmd_word = 0; 3784 r_ixr_cmd_fsm = IXR_CMD_WRITE_SEND; 3785 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3786 3787 #if DEBUG_MEMC_IXR_CMD 3788 if(m_debug) 3789 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_TRT> TRT access" 3790 << " index = " << std::dec << r_write_to_ixr_cmd_index.read() 3791 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3792 #endif 3793 } 3794 break; 3795 } 3796 ///////////////////// 3797 case IXR_CMD_CAS_TRT: // access TRT for a PUT or a GET 3798 { 3799 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3800 { 3801 TransactionTabEntry entry = m_trt.read( r_cas_to_ixr_cmd_index.read() ); 3802 r_ixr_cmd_address = entry.nline * (m_words<<2); 3803 r_ixr_cmd_trdid = r_cas_to_ixr_cmd_index.read(); 3804 r_ixr_cmd_get = entry.xram_read; 3805 r_ixr_cmd_word = 0; 3806 r_ixr_cmd_fsm = IXR_CMD_CAS_SEND; 3807 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3808 3809 #if DEBUG_MEMC_IXR_CMD 3810 if(m_debug) 3811 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_TRT> TRT access" 3812 << " index = " << std::dec << r_cas_to_ixr_cmd_index.read() 3813 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3814 #endif 3815 } 3816 break; 3817 } 3818 ////////////////////// 3819 case IXR_CMD_XRAM_TRT: // access TRT for a PUT 3820 { 3821 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3822 { 3823 TransactionTabEntry entry = m_trt.read( r_xram_rsp_to_ixr_cmd_index.read() ); 3824 r_ixr_cmd_address = entry.nline * (m_words<<2); 3825 r_ixr_cmd_trdid = r_xram_rsp_to_ixr_cmd_index.read(); 3826 r_ixr_cmd_get = false; 3827 r_ixr_cmd_word = 0; 3828 r_ixr_cmd_fsm = IXR_CMD_XRAM_SEND; 3829 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3830 3831 #if DEBUG_MEMC_IXR_CMD 3832 if(m_debug) 3833 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_TRT> TRT access" 3834 << " index = " << std::dec << r_xram_rsp_to_ixr_cmd_index.read() 3835 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3836 #endif 3837 } 3838 break; 3839 } 3840 ////////////////////// 3841 case IXR_CMD_CLEANUP_TRT: // access TRT for a PUT 3842 { 3843 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3844 { 3845 3846 TransactionTabEntry entry = m_trt.read( r_cleanup_to_ixr_cmd_index.read() ); 3847 r_ixr_cmd_address = entry.nline * (m_words<<2); 3848 r_ixr_cmd_trdid = r_cleanup_to_ixr_cmd_index.read(); 3849 r_ixr_cmd_get = false; 3850 r_ixr_cmd_word = 0; 3851 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3852 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3853 3854 #if DEBUG_MEMC_IXR_CMD 3855 if(m_debug) 3856 std::cout << " <MEMC " << name() << " IXR_CMD_CLEANUP_TRT> TRT access" 3857 << " index = " << std::dec << r_cleanup_to_ixr_cmd_index.read() 3858 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3859 #endif 3860 } 3861 break; 3862 } 3863 //////////////////////// 3864 case IXR_CMD_CONFIG_TRT: // access TRT for a PUT 3865 { 3866 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3867 { 3868 TransactionTabEntry entry = m_trt.read( r_config_to_ixr_cmd_index.read() ); 3869 r_ixr_cmd_address = entry.nline * (m_words<<2); 3870 r_ixr_cmd_trdid = r_config_to_ixr_cmd_index.read(); 3871 r_ixr_cmd_get = false; 3872 r_ixr_cmd_word = 0; 3873 r_ixr_cmd_fsm = IXR_CMD_CONFIG_SEND; 3874 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3875 3876 #if DEBUG_MEMC_IXR_CMD 3877 if(m_debug) 3878 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_TRT> TRT access" 3879 << " index = " << std::dec << r_config_to_ixr_cmd_index.read() 3880 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3881 #endif 3882 } 3883 break; 3884 } 3885 3886 /////////////////////// 3887 case IXR_CMD_READ_SEND: // send a get from READ FSM 3888 { 3889 if(p_vci_ixr.cmdack) 3890 { 3891 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 3892 r_read_to_ixr_cmd_req = false; 3893 3894 #if DEBUG_MEMC_IXR_CMD 3895 if(m_debug) 3896 std::cout << " <MEMC " << name() << " IXR_CMD_READ_SEND> GET request:" << std::hex 3897 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3898 #endif 3899 } 3900 break; 3901 } 3902 //////////////////////// 3903 case IXR_CMD_WRITE_SEND: // send a put or get from WRITE FSM 3904 { 3905 if(p_vci_ixr.cmdack) 3906 { 3907 if(r_write_to_ixr_cmd_put.read()) // PUT 3908 { 3909 if(r_ixr_cmd_word.read() == (m_words - 2)) 3910 { 3911 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3912 r_write_to_ixr_cmd_req = false; 3913 } 3914 else 3915 { 3916 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3917 } 3918 3919 #if DEBUG_MEMC_IXR_CMD 3920 if(m_debug) 3921 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> PUT request:" << std::hex 3922 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3923 #endif 3924 } 3925 else // GET 3926 { 3927 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3928 r_write_to_ixr_cmd_req = false; 3929 3930 #if DEBUG_MEMC_IXR_CMD 3931 if(m_debug) 3932 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> GET request:" << std::hex 3933 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3934 #endif 3935 } 3936 } 3937 break; 3938 } 3939 ////////////////////// 3940 case IXR_CMD_CAS_SEND: // send a put or get command from CAS FSM 3941 { 3942 if(p_vci_ixr.cmdack) 3943 { 3944 if(r_cas_to_ixr_cmd_put.read()) // PUT 3945 { 3946 if(r_ixr_cmd_word.read() == (m_words - 2)) 3947 { 3948 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3949 r_cas_to_ixr_cmd_req = false; 3950 } 3951 else 3952 { 3953 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3954 } 3955 3956 #if DEBUG_MEMC_IXR_CMD 3957 if(m_debug) 3958 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> PUT request:" << std::hex 3959 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3960 #endif 3961 } 3962 else // GET 3963 { 3964 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3965 r_cas_to_ixr_cmd_req = false; 3966 3967 #if DEBUG_MEMC_IXR_CMD 3968 if(m_debug) 3969 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> GET request:" << std::hex 3970 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3971 #endif 3972 } 3973 } 3974 break; 3975 } 3976 /////////////////////// 3977 case IXR_CMD_XRAM_SEND: // send a put from XRAM_RSP FSM 3978 { 3979 if(p_vci_ixr.cmdack.read()) 3980 { 3981 if(r_ixr_cmd_word.read() == (m_words - 2)) 3982 { 3983 r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE; 3984 r_xram_rsp_to_ixr_cmd_req = false; 9023 cleanup_ack_type = DspinDhccpParam::TYPE_CLACK_INST; 3985 9024 } 3986 9025 else 3987 9026 { 3988 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2;9027 cleanup_ack_type = DspinDhccpParam::TYPE_CLACK_DATA; 3989 9028 } 3990 #if DEBUG_MEMC_IXR_CMD 3991 if(m_debug) 3992 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_SEND> PUT request:" << std::hex 3993 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3994 #endif 3995 } 3996 break; 3997 } 3998 3999 //////////////////////// 4000 case IXR_CMD_CLEANUP_DATA_SEND: // send a put command to XRAM 4001 { 4002 if(p_vci_ixr.cmdack.read()) 4003 { 4004 if(r_ixr_cmd_word.read() == (m_words - 2)) 4005 { 4006 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_IDLE; 4007 r_cleanup_to_ixr_cmd_req = false; 4008 //r_ixr_cmd_word = 0; 4009 //r_xram_rsp_to_ixr_cmd_inval_ncc_pending = false; 9029 9030 uint64_t flit = 0; 9031 uint64_t dest = r_cleanup_srcid.read() << 9032 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 9033 9034 DspinDhccpParam::dspin_set( 9035 flit, 9036 dest, 9037 DspinDhccpParam::CLACK_DEST); 9038 9039 DspinDhccpParam::dspin_set( 9040 flit, 9041 r_cleanup_nline.read() & 0xFFFF, 9042 DspinDhccpParam::CLACK_SET); 9043 9044 DspinDhccpParam::dspin_set( 9045 flit, 9046 r_cleanup_way_index.read(), 9047 DspinDhccpParam::CLACK_WAY); 9048 9049 DspinDhccpParam::dspin_set( 9050 flit, 9051 cleanup_ack_type, 9052 DspinDhccpParam::CLACK_TYPE); 9053 9054 p_dspin_clack.eop = true; 9055 p_dspin_clack.write = true; 9056 p_dspin_clack.data = flit; 4010 9057 } 4011 9058 else 4012 9059 { 4013 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 9060 p_dspin_clack.write = false; 9061 p_dspin_clack.eop = false; 9062 p_dspin_clack.data = 0; 4014 9063 } 4015 9064 4016 #if DEBUG_MEMC_IXR_CMD 4017 if(m_debug) 9065 /////////////////////////////////////////////////////////////////// 9066 // p_dspin_p2m port (CC_RECEIVE FSM) 9067 /////////////////////////////////////////////////////////////////// 9068 // 9069 switch(r_cc_receive_fsm.read()) 4018 9070 { 4019 std::cout << " <MEMC " << name() << ".IXR_CMD_CLEANUP_DATA_SEND> Send a put request to xram" << std::endl; 9071 case CC_RECEIVE_IDLE: 9072 { 9073 p_dspin_p2m.read = false; 9074 break; 9075 } 9076 case CC_RECEIVE_CLEANUP: 9077 case CC_RECEIVE_CLEANUP_EOP: 9078 { 9079 p_dspin_p2m.read = m_cc_receive_to_cleanup_fifo.wok(); 9080 break; 9081 } 9082 case CC_RECEIVE_MULTI_ACK: 9083 { 9084 p_dspin_p2m.read = m_cc_receive_to_multi_ack_fifo.wok(); 9085 break; 9086 } 4020 9087 } 4021 #endif 4022 } 4023 break; 4024 } 4025 4026 ///////////////////////// 4027 case IXR_CMD_CONFIG_SEND: // send a put from CONFIG FSM 4028 { 4029 if(p_vci_ixr.cmdack.read()) 4030 { 4031 if(r_ixr_cmd_word.read() == (m_words - 2)) 4032 { 4033 r_ixr_cmd_fsm = IXR_CMD_CONFIG_IDLE; 4034 r_config_to_ixr_cmd_req = false; 4035 } 4036 else 4037 { 4038 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 4039 } 4040 4041 #if DEBUG_MEMC_IXR_CMD 4042 if(m_debug) 4043 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_SEND> PUT request:" << std::hex 4044 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4045 #endif 4046 } 4047 break; 4048 } 4049 } // end switch r_ixr_cmd_fsm 4050 4051 //////////////////////////////////////////////////////////////////////////// 4052 // IXR_RSP FSM 4053 //////////////////////////////////////////////////////////////////////////// 4054 // The IXR_RSP FSM receives the response packets from the XRAM, 4055 // for both PUT transaction, and GET transaction. 4056 // 4057 // - A response to a PUT request is a single-cell VCI packet. 4058 // The TRT index is contained in the RTRDID field. 4059 // The FSM takes the lock protecting the TRT, and the corresponding 4060 // entry is erased. If an acknowledge was required (in case of software SYNC) 4061 // the r_config_rsp_lines counter is decremented. 4062 // 4063 // - A response to a GET request is a multi-cell VCI packet. 4064 // The TRT index is contained in the RTRDID field. 4065 // The N cells contain the N words of the cache line in the RDATA field. 4066 // The FSM takes the lock protecting the TRT to store the line in the TRT 4067 // (taking into account the write requests already stored in the TRT). 4068 // When the line is completely written, the r_ixr_rsp_to_xram_rsp_rok[index] 4069 // signal is set to inform the XRAM_RSP FSM. 4070 /////////////////////////////////////////////////////////////////////////////// 4071 4072 //std::cout << std::endl << "ixr_rsp_fsm" << std::endl; 4073 4074 switch(r_ixr_rsp_fsm.read()) 4075 { 4076 ////////////////// 4077 case IXR_RSP_IDLE: // test transaction type: PUT/GET 4078 { 4079 if(p_vci_ixr.rspval.read()) 4080 { 4081 r_ixr_rsp_cpt = 0; 4082 r_ixr_rsp_trt_index = p_vci_ixr.rtrdid.read(); 4083 4084 assert( ((p_vci_ixr.rerror.read() & 0x1) == 0) and 4085 "MEMC ERROR in IXR_RSP state: XRAM response error !"); 4086 4087 if(p_vci_ixr.reop.read()) // PUT 4088 { 4089 #if ODCCP_NON_INCLUSIVE 4090 if (p_vci_ixr.rtrdid.read() == m_trt_lines) 4091 r_ixr_rsp_fsm = IXR_RSP_ACK; 4092 else 4093 r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 4094 #else 4095 r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 4096 #endif 4097 4098 #if DEBUG_MEMC_IXR_RSP 4099 if(m_debug) 4100 std::cout << " <MEMC " << name() 4101 << " IXR_RSP_IDLE> Response from XRAM to a put transaction" << std::endl; 4102 #endif 4103 } 4104 else // GET 4105 { 4106 r_ixr_rsp_fsm = IXR_RSP_TRT_READ; 4107 4108 #if DEBUG_MEMC_IXR_RSP 4109 if(m_debug) 4110 std::cout << " <MEMC " << name() 4111 << " IXR_RSP_IDLE> Response from XRAM to a get transaction" << std::endl; 4112 #endif 4113 } 4114 } 4115 break; 4116 } 4117 //////////////////////// 4118 case IXR_RSP_ACK: // Acknowledge PUT transaction 4119 { 4120 r_ixr_rsp_fsm = IXR_RSP_IDLE; 4121 break; 4122 } 4123 //////////////////////// 4124 case IXR_RSP_TRT_ERASE: // erase the entry in the TRT 4125 // decrease the line counter if config request 4126 { 4127 if(r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) 4128 { 4129 size_t index = r_ixr_rsp_trt_index.read(); 4130 if (m_trt.is_config(index) ) r_config_rsp_lines = r_config_rsp_lines.read() - 1; 4131 m_trt.erase(index); 4132 r_ixr_rsp_fsm = IXR_RSP_IDLE; 4133 4134 #if DEBUG_MEMC_IXR_RSP 4135 if(m_debug) 4136 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_ERASE> Erase TRT entry " 4137 << r_ixr_rsp_trt_index.read() << std::endl; 4138 #endif 4139 } 4140 break; 4141 } 4142 ////////////////////// 4143 case IXR_RSP_TRT_READ: // write a 64 bits data word in TRT 4144 { 4145 if((r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) and p_vci_ixr.rspval) 4146 { 4147 size_t index = r_ixr_rsp_trt_index.read(); 4148 size_t word = r_ixr_rsp_cpt.read(); 4149 bool eop = p_vci_ixr.reop.read(); 4150 wide_data_t data = p_vci_ixr.rdata.read(); 4151 bool error = ((p_vci_ixr.rerror.read() & 0x1) == 1); 4152 4153 assert(((eop == (word == (m_words-2))) or error) and 4154 "MEMC ERROR in IXR_RSP_TRT_READ state : invalid response from XRAM"); 4155 4156 m_trt.write_rsp( index, 4157 word, 4158 data ); 4159 4160 r_ixr_rsp_cpt = word + 2; 4161 4162 if(eop) 4163 { 4164 r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()]=true; 4165 /*if(p_vci_ixr.rpktid.read()&0xF == 0x9) 4166 r_ixr_rsp_to_xram_rsp_no_coherent[r_ixr_rsp_trt_index.read()] = true; 4167 else 4168 r_ixr_rsp_to_xram_rsp_no_coherent[r_ixr_rsp_trt_index.read()] = false;*/ 4169 r_ixr_rsp_fsm = IXR_RSP_IDLE; 4170 } 4171 4172 #if DEBUG_MEMC_IXR_RSP 4173 if(m_debug) 4174 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_READ> Writing 2 words in TRT : " 4175 << " index = " << std::dec << index 4176 << " / word = " << word 4177 << " / data = " << std::hex << data << std::endl; 4178 #endif 4179 } 4180 break; 4181 } 4182 } // end swich r_ixr_rsp_fsm 4183 4184 //////////////////////////////////////////////////////////////////////////// 4185 // XRAM_RSP FSM 4186 //////////////////////////////////////////////////////////////////////////// 4187 // The XRAM_RSP FSM handles the incoming cache lines after an XRAM GET. 4188 // The cache line has been written in the TRT by the IXR_CMD_FSM. 4189 // As the IXR_RSP FSM and the XRAM_RSP FSM are running in parallel, 4190 // there is as many flip-flops r_ixr_rsp_to_xram_rsp_rok[i] as the number 4191 // of entries in the TRT, that are handled with a round-robin priority... 4192 // 4193 // The FSM takes the lock protecting TRT, and the lock protecting DIR. 4194 // The selected TRT entry is copied in the local buffer r_xram_rsp_trt_buf. 4195 // It selects a cache slot and save the victim line in another local buffer 4196 // r_xram_rsp_victim_***. 4197 // It writes the line extracted from TRT in the cache. 4198 // If it was a read MISS, the XRAM_RSP FSM send a request to the TGT_RSP 4199 // FSM to return the cache line to the registered processor. 4200 // If there is no empty slot, a victim line is evicted, and 4201 // invalidate requests are sent to the L1 caches containing copies. 4202 // If this line is dirty, the XRAM_RSP FSM send a request to the IXR_CMD 4203 // FSM to save the victim line to the XRAM, and register the write transaction 4204 // in the TRT (using the entry previously used by the read transaction). 4205 /////////////////////////////////////////////////////////////////////////////// 4206 4207 //std::cout << std::endl << "xram_rsp_fsm" << std::endl; 4208 4209 switch(r_xram_rsp_fsm.read()) 4210 { 4211 /////////////////// 4212 case XRAM_RSP_IDLE: // scan the XRAM responses / select a TRT index (round robin) 4213 { 4214 size_t old = r_xram_rsp_trt_index.read(); 4215 size_t lines = m_trt_lines; 4216 for(size_t i=0 ; i<lines ; i++) 4217 { 4218 size_t index = (i+old+1) %lines; 4219 if(r_ixr_rsp_to_xram_rsp_rok[index]) 4220 { 4221 r_xram_rsp_trt_index = index; 4222 r_ixr_rsp_to_xram_rsp_rok[index] = false; 4223 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 4224 4225 #if DEBUG_MEMC_XRAM_RSP 4226 if(m_debug) 4227 std::cout << " <MEMC " << name() << " XRAM_RSP_IDLE>" 4228 << " Available cache line in TRT:" 4229 << " index = " << std::dec << index << std::endl; 4230 #endif 4231 break; 4232 } 4233 } 4234 break; 4235 } 4236 /////////////////////// 4237 case XRAM_RSP_DIR_LOCK: // Takes the DIR lock and the TRT lock 4238 // Copy the TRT entry in a local buffer 4239 { 4240 if( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4241 (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) ) 4242 { 4243 // copy the TRT entry in the r_xram_rsp_trt_buf local buffer 4244 size_t index = r_xram_rsp_trt_index.read(); 4245 r_xram_rsp_trt_buf.copy( m_trt.read(index) ); 4246 r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; 4247 4248 #if DEBUG_MEMC_XRAM_RSP 4249 if(m_debug) 4250 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_LOCK>" 4251 << " Get access to DIR and TRT" << std::endl; 4252 #endif 4253 } 4254 break; 4255 } 4256 /////////////////////// 4257 case XRAM_RSP_TRT_COPY: // Select a victim cache line 4258 // and copy it in a local buffer 4259 { 4260 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4261 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad DIR allocation"); 4262 4263 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4264 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad TRT allocation"); 4265 4266 // selects & extracts a victim line from cache 4267 size_t way = 0; 4268 size_t set = m_y[(addr_t)(r_xram_rsp_trt_buf.nline * m_words * 4)]; 4269 4270 DirectoryEntry victim(m_cache_directory.select(set, way)); 4271 4272 #if ODCCP_NON_INCLUSIVE 4273 bool inval = (victim.count and victim.valid and victim.coherent) ; 4274 #else 4275 bool inval = (victim.count and victim.valid) ; 4276 #endif 4277 4278 // copy the victim line in a local buffer (both data dir) 4279 m_cache_data.read_line(way, set, r_xram_rsp_victim_data); 4280 4281 r_xram_rsp_victim_copy = victim.owner.srcid; 4282 4283 r_xram_rsp_victim_coherent = victim.coherent; 4284 r_xram_rsp_victim_copy_inst = victim.owner.inst; 4285 r_xram_rsp_victim_count = victim.count; 4286 r_xram_rsp_victim_ptr = victim.ptr; 4287 r_xram_rsp_victim_way = way; 4288 r_xram_rsp_victim_set = set; 4289 r_xram_rsp_victim_nline = victim.tag*m_sets + set; 4290 r_xram_rsp_victim_is_cnt = victim.is_cnt; 4291 r_xram_rsp_victim_inval = inval ; 4292 r_xram_rsp_victim_dirty = victim.dirty; 4293 4294 if( not r_xram_rsp_trt_buf.rerror ) r_xram_rsp_fsm = XRAM_RSP_IVT_LOCK; 4295 else r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 4296 4297 #if DEBUG_MEMC_XRAM_RSP 4298 if(m_debug) 4299 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_COPY>" 4300 << " Select a victim slot: " 4301 << " way = " << std::dec << way 4302 << " / set = " << set 4303 << " / victim coherent = " << victim.coherent 4304 << " / victim owner id = " << victim.owner.srcid 4305 << " / inval_required = " << inval << std::endl; 4306 #endif 4307 break; 4308 } 4309 /////////////////////// 4310 case XRAM_RSP_IVT_LOCK: // Keep DIR and TRT locks and take the IVT lock 4311 // to check a possible pending inval 4312 { 4313 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4314 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad DIR allocation"); 4315 4316 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4317 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad TRT allocation"); 4318 4319 if(r_alloc_ivt_fsm == ALLOC_IVT_XRAM_RSP) 4320 { 4321 size_t index = 0; 4322 if(m_ivt.search_inval(r_xram_rsp_trt_buf.nline, index)) // pending inval 4323 { 4324 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 4325 4326 #if DEBUG_MEMC_XRAM_RSP 4327 if(m_debug) 4328 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 4329 << " Get acces to IVT, but line invalidation registered" 4330 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline*m_words*4 4331 << " / index = " << std::dec << index << std::endl; 4332 #endif 4333 4334 } 4335 else if(m_ivt.is_full() and r_xram_rsp_victim_inval.read()) // IVT full 4336 { 4337 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 4338 4339 #if DEBUG_MEMC_XRAM_RSP 4340 if(m_debug) 4341 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 4342 << " Get acces to IVT, but inval required and IVT full" << std::endl; 4343 #endif 4344 } 4345 else 4346 { 4347 r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; 4348 4349 #if DEBUG_MEMC_XRAM_RSP 4350 if(m_debug) 4351 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 4352 << " Get acces to IVT / no pending inval request" << std::endl; 4353 #endif 4354 } 4355 } 4356 break; 4357 } 4358 ///////////////////////// 4359 case XRAM_RSP_INVAL_WAIT: // release all locks and returns to DIR_LOCK to retry 4360 { 4361 4362 #if DEBUG_MEMC_XRAM_RSP 4363 if(m_debug) 4364 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL_WAIT>" 4365 << " Release all locks and retry" << std::endl; 4366 #endif 4367 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 4368 break; 4369 } 4370 /////////////////////// 4371 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory), 4372 // erases the TRT entry if victim not dirty, 4373 // and set inval request in IVT if required 4374 { 4375 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4376 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad DIR allocation"); 4377 4378 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4379 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad TRT allocation"); 4380 4381 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_XRAM_RSP) and 4382 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad IVT allocation"); 4383 4384 // check if this is an instruction read, this means pktid is either 4385 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 4386 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4387 bool inst_read = (r_xram_rsp_trt_buf.pktid & 0x2) and r_xram_rsp_trt_buf.proc_read; 4388 4389 // check if this is a cached read, this means pktid is either 4390 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 4391 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4392 bool cached_read = (r_xram_rsp_trt_buf.pktid & 0x1) and r_xram_rsp_trt_buf.proc_read; 4393 4394 bool dirty = false; 4395 4396 // update cache data 4397 size_t set = r_xram_rsp_victim_set.read(); 4398 size_t way = r_xram_rsp_victim_way.read(); 4399 4400 for(size_t word=0; word<m_words ; word++) 4401 { 4402 m_cache_data.write(way, set, word, r_xram_rsp_trt_buf.wdata[word]); 4403 dirty = dirty or (r_xram_rsp_trt_buf.wdata_be[word] != 0); 4404 } 4405 4406 // update cache directory 4407 DirectoryEntry entry; 4408 entry.valid = true; 4409 entry.is_cnt = false; 4410 entry.lock = false; 4411 entry.dirty = dirty; 4412 entry.tag = r_xram_rsp_trt_buf.nline / m_sets; 4413 entry.ptr = 0; 4414 if(cached_read) 4415 { 4416 entry.owner.srcid = r_xram_rsp_trt_buf.srcid; 4417 #if L1_MULTI_CACHE 4418 entry.owner.cache_id= r_xram_rsp_trt_buf.pktid; 4419 #endif 4420 entry.owner.inst = inst_read; 4421 entry.count = 1; 4422 4423 } 4424 else 4425 { 4426 entry.owner.srcid = 0; 4427 #if L1_MULTI_CACHE 4428 entry.owner.cache_id = 0; 4429 #endif 4430 entry.owner.inst = 0; 4431 entry.count = 0; 4432 } 4433 4434 /*ODCCP*/ //if pktid = 0x9 that means line no coherent 4435 if(r_xram_rsp_trt_buf.pktid == 0x9){ 4436 entry.coherent = false; 4437 } 4438 else{ 4439 entry.coherent = true; 4440 } 4441 4442 m_cache_directory.write(set, way, entry); 4443 4444 // register invalid request in IVT for victim line if required 4445 if(r_xram_rsp_victim_inval.read()) 4446 { 4447 bool broadcast = r_xram_rsp_victim_is_cnt.read(); 4448 size_t index = 0; 4449 size_t count_copies = r_xram_rsp_victim_count.read(); 4450 4451 bool wok = m_ivt.set(false, // it's an inval transaction 4452 broadcast, // set broadcast bit 4453 false, // no response required 4454 false, // no acknowledge required 4455 0, // srcid 4456 0, // trdid 4457 0, // pktid 4458 r_xram_rsp_victim_nline.read(), 4459 count_copies, 4460 index); 4461 4462 r_xram_rsp_ivt_index = index; 4463 4464 assert( wok and 4465 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: IVT should not be full"); 4466 } 4467 4468 #if DEBUG_MEMC_XRAM_RSP 4469 if(m_debug) 4470 { 4471 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_UPDT>" 4472 << " Cache update: " 4473 << " way = " << std::dec << way 4474 << " / set = " << set 4475 << " / owner_id = " << std::hex << entry.owner.srcid 4476 << " / owner_ins = " << std::dec << entry.owner.inst 4477 << " / count = " << entry.count 4478 << " / is_cnt = " << entry.is_cnt << std::endl; 4479 if(r_xram_rsp_victim_inval.read()) 4480 std::cout << " Invalidation request for address " 4481 << std::hex << r_xram_rsp_victim_nline.read()*m_words*4 4482 << " / broadcast = " << r_xram_rsp_victim_is_cnt.read() << std::endl; 4483 } 4484 #endif 4485 4486 #if ODCCP_NON_INCLUSIVE 4487 if (!r_xram_rsp_victim_dirty.read()) m_trt.erase(r_xram_rsp_trt_index.read()); 4488 4489 if (r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; 4490 else if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4491 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4492 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4493 #else 4494 // If the victim is not dirty and coherent or victim's count egal 0 , we don't need another XRAM put transaction, 4495 // and we can erase the TRT entry 4496 if(!r_xram_rsp_victim_dirty.read() and (r_xram_rsp_victim_coherent.read() or (r_xram_rsp_victim_count.read() == 0))) m_trt.erase(r_xram_rsp_trt_index.read()); 4497 4498 // Next state 4499 if(r_xram_rsp_victim_dirty.read() or (!r_xram_rsp_victim_coherent.read() and (r_xram_rsp_victim_count.read() == 1))) 4500 r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; 4501 else if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4502 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4503 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4504 #endif 4505 break; 4506 } 4507 //////////////////////// 4508 case XRAM_RSP_TRT_DIRTY: // set the TRT entry (PUT to XRAM) if the victim is dirty 4509 { 4510 if(r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) 4511 { 4512 std::vector<data_t> data_vector; 4513 data_vector.clear(); 4514 for(size_t i=0; i<m_words; i++) 4515 { 4516 data_vector.push_back(r_xram_rsp_victim_data[i].read()); 4517 } 4518 m_trt.set( r_xram_rsp_trt_index.read(), 4519 false, // PUT 4520 r_xram_rsp_victim_nline.read(), // line index 4521 0, // unused 4522 0, // unused 4523 0, // unused 4524 false, // not proc_read 4525 0, // unused 4526 0, // unused 4527 std::vector<be_t>(m_words,0xF), 4528 data_vector); 4529 4530 #if DEBUG_MEMC_XRAM_RSP 4531 if(m_debug) 4532 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_DIRTY>" 4533 << " Set TRT entry for the put transaction" 4534 << " / address = " << (r_xram_rsp_victim_nline.read()*m_words*4) << std::endl; 4535 #endif 4536 if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4537 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4538 else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4539 } 4540 break; 4541 } 4542 ////////////////////// 4543 case XRAM_RSP_DIR_RSP: // Request a response to TGT_RSP FSM 4544 { 4545 if ( not r_xram_rsp_to_tgt_rsp_req.read() ) 4546 { 4547 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 4548 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 4549 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 4550 for(size_t i=0; i < m_words; i++) 4551 { 4552 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 4553 } 4554 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 4555 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 4556 r_xram_rsp_to_tgt_rsp_ll_key = r_xram_rsp_trt_buf.ll_key; 4557 r_xram_rsp_to_tgt_rsp_rerror = false; 4558 r_xram_rsp_to_tgt_rsp_req = true; 4559 4560 #if ODCCP_NON_INCLUSIVE 4561 if (r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4562 else if(r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4563 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4564 #else 4565 if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4566 else if(r_xram_rsp_victim_dirty.read() or 4567 (!r_xram_rsp_victim_coherent.read() and (r_xram_rsp_victim_count.read() == 1))) 4568 r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4569 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4570 #endif 4571 4572 #if DEBUG_MEMC_XRAM_RSP 4573 if(m_debug) 4574 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_RSP>" 4575 << " Request the TGT_RSP FSM to return data:" 4576 << " rsrcid = " << std::hex << r_xram_rsp_trt_buf.srcid 4577 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline*m_words*4 4578 << " / nwords = " << std::dec << r_xram_rsp_trt_buf.read_length << std::endl; 4579 #endif 4580 } 4581 break; 4582 } 4583 //////////////////// 4584 case XRAM_RSP_INVAL: // send invalidate request to CC_SEND FSM 4585 { 4586 if(!r_xram_rsp_to_cc_send_multi_req.read() and 4587 !r_xram_rsp_to_cc_send_brdcast_req.read()) 4588 { 4589 bool multi_req = !r_xram_rsp_victim_is_cnt.read(); 4590 bool last_multi_req = multi_req and (r_xram_rsp_victim_count.read() == 1); 4591 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 4592 4593 r_xram_rsp_to_cc_send_multi_req = last_multi_req; 4594 r_xram_rsp_to_cc_send_brdcast_req = r_xram_rsp_victim_is_cnt.read(); 4595 r_xram_rsp_to_cc_send_nline = r_xram_rsp_victim_nline.read(); 4596 r_xram_rsp_to_cc_send_trdid = r_xram_rsp_ivt_index; 4597 xram_rsp_to_cc_send_fifo_srcid = r_xram_rsp_victim_copy.read(); 4598 xram_rsp_to_cc_send_fifo_inst = r_xram_rsp_victim_copy_inst.read(); 4599 xram_rsp_to_cc_send_fifo_put = multi_req; 4600 r_xram_rsp_next_ptr = r_xram_rsp_victim_ptr.read(); 4601 4602 #if ODCCP_NON_INCLUSIVE 4603 if(r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4604 else if(not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 4605 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4606 #else 4607 if(r_xram_rsp_victim_dirty or (!r_xram_rsp_victim_coherent.read() and (r_xram_rsp_victim_count.read() == 1))) 4608 r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4609 else if(not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 4610 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4611 #endif 4612 4613 #if DEBUG_MEMC_XRAM_RSP 4614 if(m_debug) 4615 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL>" 4616 << " Send an inval request to CC_SEND FSM" 4617 << " / address = " << r_xram_rsp_victim_nline.read()*m_words*4 << std::endl; 4618 #endif 4619 } 4620 break; 4621 } 4622 ////////////////////////// 4623 case XRAM_RSP_WRITE_DIRTY: // send a write request to IXR_CMD FSM 4624 { 4625 if((!r_xram_rsp_to_ixr_cmd_req.read()) /*and (!r_xram_rsp_to_ixr_cmd_inval_ncc_pending.read())*/) 4626 { 4627 4628 r_xram_rsp_to_ixr_cmd_req = true; 4629 //r_xram_rsp_to_ixr_cmd_nline = r_xram_rsp_victim_nline.read(); 4630 r_xram_rsp_to_ixr_cmd_index = r_xram_rsp_trt_index.read(); 4631 /*for(size_t i=0; i<m_words ; i++) 4632 { 4633 r_xram_rsp_to_ixr_cmd_data[i] = r_xram_rsp_victim_data[i]; 4634 }*/ 4635 #if (ODCCP_NON_INCLUSIVE == 0) 4636 // if victim is no coherent, we dont request a ixr command 4637 if( (!r_xram_rsp_victim_coherent.read()) and (r_xram_rsp_victim_count.read() == 1) ) 4638 { 4639 //r_xram_rsp_to_ixr_cmd_inval_ncc_pending = true; // inval no coherent pending 4640 r_xram_rsp_to_ixr_cmd_req = false; 4641 r_xram_rsp_fsm = XRAM_RSP_IDLE; 4642 break; 4643 } 4644 #endif 4645 4646 m_cpt_write_dirty++; 4647 4648 bool multi_req = not r_xram_rsp_victim_is_cnt.read() and 4649 r_xram_rsp_victim_inval.read(); 4650 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 4651 4652 if(not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 4653 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4654 4655 #if DEBUG_MEMC_XRAM_RSP 4656 if(m_debug) 4657 std::cout << " <MEMC " << name() << " XRAM_RSP_WRITE_DIRTY>" 4658 << " Send the put request to IXR_CMD FSM" 4659 << " / address = " << r_xram_rsp_victim_nline.read()*m_words*4 << std::endl; 4660 #endif 4661 } 4662 break; 4663 } 4664 ///////////////////////// 4665 case XRAM_RSP_HEAP_REQ: // Get the lock to the HEAP 4666 { 4667 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 4668 { 4669 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4670 } 4671 4672 #if DEBUG_MEMC_XRAM_RSP 4673 if(m_debug) 4674 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_REQ>" 4675 << " Requesting HEAP lock" << std::endl; 4676 #endif 4677 break; 4678 } 4679 ///////////////////////// 4680 case XRAM_RSP_HEAP_ERASE: // erase the copies and send invalidations 4681 { 4682 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 4683 { 4684 HeapEntry entry = m_heap.read(r_xram_rsp_next_ptr.read()); 4685 4686 xram_rsp_to_cc_send_fifo_srcid = entry.owner.srcid; 4687 xram_rsp_to_cc_send_fifo_inst = entry.owner.inst; 4688 xram_rsp_to_cc_send_fifo_put = true; 4689 if(m_xram_rsp_to_cc_send_inst_fifo.wok()) 4690 { 4691 r_xram_rsp_next_ptr = entry.next; 4692 if(entry.next == r_xram_rsp_next_ptr.read()) // last copy 4693 { 4694 r_xram_rsp_to_cc_send_multi_req = true; 4695 r_xram_rsp_fsm = XRAM_RSP_HEAP_LAST; 4696 } 4697 else 4698 { 4699 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4700 } 4701 } 4702 else 4703 { 4704 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4705 } 4706 4707 #if DEBUG_MEMC_XRAM_RSP 4708 if(m_debug) 4709 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_ERASE>" 4710 << " Erase copy:" 4711 << " srcid = " << std::hex << entry.owner.srcid 4712 << " / inst = " << std::dec << entry.owner.inst << std::endl; 4713 #endif 4714 } 4715 break; 4716 } 4717 ///////////////////////// 4718 case XRAM_RSP_HEAP_LAST: // last copy 4719 { 4720 if(r_alloc_heap_fsm.read() != ALLOC_HEAP_XRAM_RSP) 4721 { 4722 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_HEAP_LAST" 4723 << " bad HEAP allocation" << std::endl; 4724 exit(0); 4725 } 4726 size_t free_pointer = m_heap.next_free_ptr(); 4727 4728 HeapEntry last_entry; 4729 last_entry.owner.srcid = 0; 4730 last_entry.owner.inst = false; 4731 if(m_heap.is_full()) 4732 { 4733 last_entry.next = r_xram_rsp_next_ptr.read(); 4734 m_heap.unset_full(); 4735 } 4736 else 4737 { 4738 last_entry.next = free_pointer; 4739 } 4740 4741 m_heap.write_free_ptr(r_xram_rsp_victim_ptr.read()); 4742 m_heap.write(r_xram_rsp_next_ptr.read(),last_entry); 4743 4744 r_xram_rsp_fsm = XRAM_RSP_IDLE; 4745 4746 #if DEBUG_MEMC_XRAM_RSP 4747 if(m_debug) 4748 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_LAST>" 4749 << " Heap housekeeping" << std::endl; 4750 #endif 4751 break; 4752 } 4753 ////////////////////////// 4754 case XRAM_RSP_ERROR_ERASE: // erase TRT entry in case of error 4755 { 4756 m_trt.erase(r_xram_rsp_trt_index.read()); 4757 4758 // Next state 4759 if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_ERROR_RSP; 4760 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4761 4762 #if DEBUG_MEMC_XRAM_RSP 4763 if(m_debug) 4764 std::cout << " <MEMC " << name() << " XRAM_RSP_ERROR_ERASE>" 4765 << " Error reported by XRAM / erase the TRT entry" << std::endl; 4766 #endif 4767 break; 4768 } 4769 //////////////////////// 4770 case XRAM_RSP_ERROR_RSP: // Request an error response to TGT_RSP FSM 4771 { 4772 if(!r_xram_rsp_to_tgt_rsp_req.read()) 4773 { 4774 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 4775 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 4776 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 4777 for(size_t i=0; i < m_words; i++) 4778 { 4779 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 4780 } 4781 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 4782 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 4783 r_xram_rsp_to_tgt_rsp_rerror = true; 4784 r_xram_rsp_to_tgt_rsp_req = true; 4785 4786 r_xram_rsp_fsm = XRAM_RSP_IDLE; 4787 4788 #if DEBUG_MEMC_XRAM_RSP 4789 if(m_debug) 4790 std::cout << " <MEMC " << name() 4791 << " XRAM_RSP_ERROR_RSP> Request a response error to TGT_RSP FSM:" 4792 << " srcid = " << std::dec << r_xram_rsp_trt_buf.srcid << std::endl; 4793 #endif 4794 } 4795 break; 4796 } 4797 } // end swich r_xram_rsp_fsm 4798 4799 //////////////////////////////////////////////////////////////////////////////////// 4800 // CLEANUP FSM 4801 //////////////////////////////////////////////////////////////////////////////////// 4802 // The CLEANUP FSM handles the cleanup request from L1 caches. 4803 // It accesses the cache directory and the heap to update the list of copies. 4804 //////////////////////////////////////////////////////////////////////////////////// 4805 4806 //std::cout << std::endl << "cleanup_fsm" << std::endl; 4807 4808 switch(r_cleanup_fsm.read()) 4809 { 4810 ////////////////// 4811 case CLEANUP_IDLE: // Get first DSPIN flit of the CLEANUP command 4812 { 4813 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 4814 4815 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 4816 uint32_t srcid = DspinDhccpParam::dspin_get( flit, 4817 DspinDhccpParam::CLEANUP_SRCID); 4818 4819 uint8_t type = DspinDhccpParam::dspin_get( flit, 4820 DspinDhccpParam::P2M_TYPE); 4821 4822 r_cleanup_way_index = DspinDhccpParam::dspin_get( flit, 4823 DspinDhccpParam::CLEANUP_WAY_INDEX); 4824 4825 r_cleanup_nline = DspinDhccpParam::dspin_get( flit, 4826 DspinDhccpParam::CLEANUP_NLINE_MSB) << 32; 4827 4828 /*ODCCP*/ // Cleanup on no coherent line if 1 4829 r_cleanup_ncc = 4830 DspinDhccpParam::dspin_get( 4831 flit, 4832 DspinDhccpParam::CLEANUP_NCC); 4833 4834 r_cleanup_inst = (type == DspinDhccpParam::TYPE_CLEANUP_INST); 4835 r_cleanup_srcid = srcid; 4836 4837 assert( (srcid < m_initiators) and 4838 "MEMC ERROR in CLEANUP_IDLE state : illegal SRCID value"); 4839 4840 m_cpt_cleanup++; 4841 cc_receive_to_cleanup_fifo_get = true; 4842 r_cleanup_fsm = CLEANUP_GET_NLINE; 4843 4844 #if DEBUG_MEMC_CLEANUP 4845 if(m_debug) 4846 std::cout << " <MEMC " << name() 4847 << " CLEANUP_IDLE> Cleanup request:" << std::hex 4848 << " owner_id = " << srcid 4849 << " / owner_ins = " << (type == DspinDhccpParam::TYPE_CLEANUP_INST) << std::endl; 4850 #endif 4851 break; 4852 } 4853 /////////////////////// 4854 case CLEANUP_GET_NLINE: // GET second DSPIN flit of the cleanup command 4855 { 4856 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 4857 4858 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 4859 4860 4861 addr_t nline = r_cleanup_nline.read() | 4862 DspinDhccpParam::dspin_get(flit, DspinDhccpParam::CLEANUP_NLINE_LSB); 4863 4864 cc_receive_to_cleanup_fifo_get = true; 4865 r_cleanup_nline = nline; 4866 r_cleanup_fsm = CLEANUP_DIR_REQ; 4867 4868 bool eop = DspinDhccpParam::dspin_get(flit, DspinDhccpParam::P2M_EOP); 4869 4870 /*ODCCP*/ // if not eop (more than 2 flits) there is a cleanup no coherent with data 4871 if (!eop) 4872 { 4873 r_cleanup_contains_data = true; // this cleanup contains data 4874 r_cleanup_fsm = CLEANUP_GET_DATA; 4875 r_cleanup_data_index = 0; 4876 } 4877 else 4878 { 4879 r_cleanup_contains_data = false; 4880 r_cleanup_fsm = CLEANUP_DIR_REQ; 4881 } 4882 4883 cc_receive_to_cleanup_fifo_get = true; 4884 r_cleanup_nline = nline; 4885 4886 #if DEBUG_MEMC_CLEANUP 4887 if(m_debug) 4888 std::cout << " <MEMC " << name() 4889 << " CLEANUP_GET_NLINE> Cleanup request:" 4890 << " / ncc = " << r_cleanup_ncc.read() 4891 << " / address = " << std::hex << nline * m_words * 4 << std::endl; 4892 #endif 4893 break; 4894 } 4895 ///////////////////// 4896 /*ODCCP*/ // We save the cleanup's data into a buffer 4897 case CLEANUP_GET_DATA : 4898 { 4899 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 4900 4901 assert (r_cleanup_data_index.read() < m_words and "MEM_CACHE in CLEANUP_GET_DATA : too much flits in cleanup data updt"); 4902 4903 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 4904 4905 uint32_t data = 4906 DspinDhccpParam::dspin_get (flit, DspinDhccpParam::CLEANUP_DATA_UPDT); 4907 4908 r_cleanup_data[r_cleanup_data_index.read()] = data; 4909 r_cleanup_data_index = r_cleanup_data_index.read() + 1; 4910 cc_receive_to_cleanup_fifo_get = true; 4911 m_cpt_cleanup_data++; 4912 4913 if (r_cleanup_data_index.read() == m_words - 1) // last flit 4914 { 4915 r_cleanup_fsm = CLEANUP_DIR_REQ; 4916 } 4917 break; 4918 } 4919 ///////////////////// 4920 case CLEANUP_DIR_REQ: // Get the lock to the directory 4921 { 4922 // Get the lock to the directory 4923 if(r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) break; 4924 r_cleanup_fsm = CLEANUP_DIR_LOCK; 4925 4926 #if DEBUG_MEMC_CLEANUP 4927 if(m_debug) 4928 std::cout << " <MEMC " << name() << " CLEANUP_DIR_REQ> Requesting DIR lock" << std::endl; 4929 #endif 4930 break; 4931 } 4932 ////////////////////// 4933 case CLEANUP_DIR_LOCK: // test directory status 4934 { 4935 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP) and 4936 "MEMC ERROR in CLEANUP_DIR_LOCK: bad DIR allocation"); 4937 4938 // Read the directory 4939 size_t way = 0; 4940 addr_t cleanup_address = r_cleanup_nline.read() * m_words * 4; 4941 DirectoryEntry entry = m_cache_directory.read(cleanup_address , way); 4942 r_cleanup_is_cnt = entry.is_cnt; 4943 r_cleanup_dirty = entry.dirty; 4944 r_cleanup_tag = entry.tag; 4945 r_cleanup_lock = entry.lock; 4946 r_cleanup_way = way; 4947 r_cleanup_count = entry.count; 4948 r_cleanup_ptr = entry.ptr; 4949 r_cleanup_copy = entry.owner.srcid; 4950 r_cleanup_copy_inst = entry.owner.inst; 4951 if(entry.valid) // hit : the copy must be cleared 4952 { 4953 assert( (entry.count > 0) and 4954 "MEMC ERROR in CLEANUP_DIR_LOCK state, CLEANUP on valid entry with no copies"); 4955 4956 if((entry.count == 1) or (entry.is_cnt)) // no access to the heap 4957 { 4958 r_cleanup_fsm = CLEANUP_DIR_WRITE; 4959 } 4960 else // access to the heap 4961 { 4962 r_cleanup_fsm = CLEANUP_HEAP_REQ; 4963 } 4964 } 4965 else // miss : check IVT for a pending inval 4966 { 4967 r_cleanup_fsm = CLEANUP_IVT_LOCK; 4968 } 4969 4970 #if DEBUG_MEMC_CLEANUP 4971 if(m_debug) 4972 std::cout << " <MEMC " << name() 4973 << " CLEANUP_DIR_LOCK> Test directory status: " 4974 << std::hex << " address = " << cleanup_address 4975 << " / hit = " << entry.valid 4976 << " / dir_id = " << entry.owner.srcid 4977 << " / dir_ins = " << entry.owner.inst 4978 << " / search_id = " << r_cleanup_srcid.read() 4979 << " / search_ins = " << r_cleanup_inst.read() 4980 << " / count = " << entry.count 4981 << " / is_cnt = " << entry.is_cnt << std::endl; 4982 #endif 4983 break; 4984 } 4985 /////////////////////// 4986 case CLEANUP_DIR_WRITE: // Update the directory entry without heap access 4987 { 4988 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP) and 4989 "MEMC ERROR in CLEANUP_DIR_LOCK: bad DIR allocation"); 4990 4991 size_t way = r_cleanup_way.read(); 4992 size_t set = m_y[(addr_t)(r_cleanup_nline.read()*m_words*4)]; 4993 bool match_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 4994 bool match_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 4995 bool match = match_srcid and match_inst; 4996 4997 assert( (r_cleanup_is_cnt.read() or match) and 4998 "MEMC ERROR in CLEANUP_DIR_LOCK: illegal CLEANUP on valid entry"); 4999 5000 // update the cache directory (for the copies) 5001 DirectoryEntry entry; 5002 entry.valid = true; 5003 entry.is_cnt = r_cleanup_is_cnt.read(); 5004 entry.dirty = r_cleanup_dirty.read() or r_cleanup_contains_data.read(); 5005 entry.tag = r_cleanup_tag.read(); 5006 entry.lock = r_cleanup_lock.read(); 5007 entry.ptr = r_cleanup_ptr.read(); 5008 entry.count = r_cleanup_count.read() - 1; 5009 entry.owner.srcid = 0; 5010 entry.owner.inst = 0; 5011 /*ODCCP*/ // if cleanup contains data we update the cache data 5012 if (r_cleanup_contains_data.read()) 5013 { 5014 for (size_t word = 0; word < m_words; word ++) 5015 { 5016 m_cache_data.write(way, set, word, r_cleanup_data[word].read(), 0xF); 5017 } 5018 } 5019 5020 5021 m_cache_directory.write(set, way, entry); 5022 5023 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5024 5025 #if DEBUG_MEMC_CLEANUP 5026 if(m_debug) 5027 std::cout << " <MEMC " << name() 5028 << " CLEANUP_DIR_WRITE> Update directory:" 5029 << std::hex << " address = " << r_cleanup_nline.read() * m_words * 4 5030 << " / dir_id = " << entry.owner.srcid 5031 << " / dir_ins = " << entry.owner.inst 5032 << " / count = " << entry.count 5033 << " / is_cnt = " << entry.is_cnt << std::endl; 5034 #endif 5035 5036 break; 5037 } 5038 5039 ////////////////////// 5040 case CLEANUP_HEAP_REQ: // get the lock to the HEAP directory 5041 { 5042 if(r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP) break; 5043 5044 r_cleanup_fsm = CLEANUP_HEAP_LOCK; 5045 5046 #if DEBUG_MEMC_CLEANUP 5047 if(m_debug) 5048 std::cout << " <MEMC " << name() 5049 << " CLEANUP_HEAP_REQ> HEAP lock acquired " << std::endl; 5050 #endif 5051 break; 5052 } 5053 ////////////////////// 5054 case CLEANUP_HEAP_LOCK: // two cases are handled in this state : 5055 // 1. the matching copy is directly in the directory 5056 // 2. the matching copy is the first copy in the heap 5057 { 5058 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 5059 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 5060 5061 size_t way = r_cleanup_way.read(); 5062 size_t set = m_y[(addr_t)(r_cleanup_nline.read() *m_words*4)]; 5063 5064 HeapEntry heap_entry = m_heap.read(r_cleanup_ptr.read()); 5065 bool last = (heap_entry.next == r_cleanup_ptr.read()); 5066 5067 // match_dir computation 5068 bool match_dir_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 5069 bool match_dir_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 5070 bool match_dir = match_dir_srcid and match_dir_inst; 5071 5072 // match_heap computation 5073 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 5074 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 5075 bool match_heap = match_heap_srcid and match_heap_inst; 5076 5077 r_cleanup_prev_ptr = r_cleanup_ptr.read(); 5078 r_cleanup_prev_srcid = heap_entry.owner.srcid; 5079 r_cleanup_prev_inst = heap_entry.owner.inst; 5080 5081 assert( (not last or match_dir or match_heap) and 5082 "MEMC ERROR in CLEANUP_HEAP_LOCK state: hit but no copy found"); 5083 5084 assert( (not match_dir or not match_heap) and 5085 "MEMC ERROR in CLEANUP_HEAP_LOCK state: two matching copies found"); 5086 5087 DirectoryEntry dir_entry; 5088 dir_entry.valid = true; 5089 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 5090 dir_entry.dirty = r_cleanup_dirty.read(); 5091 dir_entry.tag = r_cleanup_tag.read(); 5092 dir_entry.lock = r_cleanup_lock.read(); 5093 dir_entry.count = r_cleanup_count.read()-1; 5094 5095 // the matching copy is registered in the directory and 5096 // it must be replaced by the first copy registered in 5097 // the heap. The corresponding entry must be freed 5098 if(match_dir) 5099 { 5100 dir_entry.ptr = heap_entry.next; 5101 dir_entry.owner.srcid = heap_entry.owner.srcid; 5102 dir_entry.owner.inst = heap_entry.owner.inst; 5103 r_cleanup_next_ptr = r_cleanup_ptr.read(); 5104 r_cleanup_fsm = CLEANUP_HEAP_FREE; 5105 } 5106 5107 // the matching copy is the first copy in the heap 5108 // It must be freed and the copy registered in directory 5109 // must point to the next copy in heap 5110 else if(match_heap) 5111 { 5112 dir_entry.ptr = heap_entry.next; 5113 dir_entry.owner.srcid = r_cleanup_copy.read(); 5114 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 5115 r_cleanup_next_ptr = r_cleanup_ptr.read(); 5116 r_cleanup_fsm = CLEANUP_HEAP_FREE; 5117 } 5118 5119 // The matching copy is in the heap, but is not the first copy 5120 // The directory entry must be modified to decrement count 5121 else 5122 { 5123 dir_entry.ptr = r_cleanup_ptr.read(); 5124 dir_entry.owner.srcid = r_cleanup_copy.read(); 5125 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 5126 r_cleanup_next_ptr = heap_entry.next; 5127 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 5128 } 5129 5130 m_cache_directory.write(set,way,dir_entry); 5131 5132 #if DEBUG_MEMC_CLEANUP 5133 if(m_debug) 5134 std::cout << " <MEMC " << name() 5135 << " CLEANUP_HEAP_LOCK> Checks matching:" 5136 << " address = " << r_cleanup_nline.read() * m_words * 4 5137 << " / dir_id = " << r_cleanup_copy.read() 5138 << " / dir_ins = " << r_cleanup_copy_inst.read() 5139 << " / heap_id = " << heap_entry.owner.srcid 5140 << " / heap_ins = " << heap_entry.owner.inst 5141 << " / search_id = " << r_cleanup_srcid.read() 5142 << " / search_ins = " << r_cleanup_inst.read() << std::endl; 5143 #endif 5144 break; 5145 } 5146 //////////////////////// 5147 case CLEANUP_HEAP_SEARCH: // This state is handling the case where the copy 5148 // is in the heap, but not the first in linked list 5149 { 5150 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 5151 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 5152 5153 HeapEntry heap_entry = m_heap.read(r_cleanup_next_ptr.read()); 5154 5155 bool last = (heap_entry.next == r_cleanup_next_ptr.read()); 5156 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 5157 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 5158 bool match_heap = match_heap_srcid and match_heap_inst; 5159 5160 assert( (not last or match_heap) and 5161 "MEMC ERROR in CLEANUP_HEAP_SEARCH state: no copy found"); 5162 5163 // the matching copy must be removed 5164 if(match_heap) 5165 { 5166 // re-use ressources 5167 r_cleanup_ptr = heap_entry.next; 5168 r_cleanup_fsm = CLEANUP_HEAP_CLEAN; 5169 } 5170 // test the next in the linked list 5171 else 5172 { 5173 r_cleanup_prev_ptr = r_cleanup_next_ptr.read(); 5174 r_cleanup_prev_srcid = heap_entry.owner.srcid; 5175 r_cleanup_prev_inst = heap_entry.owner.inst; 5176 r_cleanup_next_ptr = heap_entry.next; 5177 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 5178 } 5179 5180 #if DEBUG_MEMC_CLEANUP 5181 if(m_debug) 5182 { 5183 if(not match_heap) 5184 { 5185 std::cout 5186 << " <MEMC " << name() 5187 << " CLEANUP_HEAP_SEARCH> Matching copy not found, search next:" 5188 << std::endl; 5189 } 5190 else 5191 { 5192 std::cout 5193 << " <MEMC " << name() 5194 << " CLEANUP_HEAP_SEARCH> Matching copy found:" 5195 << std::endl; 5196 } 5197 std::cout 5198 << " address = " << r_cleanup_nline.read() * m_words * 4 5199 << " / heap_id = " << heap_entry.owner.srcid 5200 << " / heap_ins = " << heap_entry.owner.inst 5201 << " / search_id = " << r_cleanup_srcid.read() 5202 << " / search_ins = " << r_cleanup_inst.read() 5203 << " / last = " << last 5204 << std::endl; 5205 } 5206 #endif 5207 break; 5208 } 5209 //////////////////////// 5210 case CLEANUP_HEAP_CLEAN: // remove a copy in the linked list 5211 { 5212 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 5213 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 5214 5215 HeapEntry heap_entry; 5216 heap_entry.owner.srcid = r_cleanup_prev_srcid.read(); 5217 heap_entry.owner.inst = r_cleanup_prev_inst.read(); 5218 bool last = (r_cleanup_next_ptr.read() == r_cleanup_ptr.read()); 5219 5220 if (last) // this is the last entry of the list of copies 5221 { 5222 heap_entry.next = r_cleanup_prev_ptr.read(); 5223 } 5224 else // this is not the last entry 5225 { 5226 heap_entry.next = r_cleanup_ptr.read(); 5227 } 5228 5229 m_heap.write(r_cleanup_prev_ptr.read(), heap_entry); 5230 5231 r_cleanup_fsm = CLEANUP_HEAP_FREE; 5232 5233 #if DEBUG_MEMC_CLEANUP 5234 if(m_debug) 5235 std::cout << " <MEMC " << name() << " CLEANUP_HEAP_SEARCH>" 5236 << " Remove the copy in the linked list" << std::endl; 5237 #endif 5238 break; 5239 } 5240 /////////////////////// 5241 case CLEANUP_HEAP_FREE: // The heap entry pointed by r_cleanup_next_ptr is freed 5242 // and becomes the head of the list of free entries 5243 { 5244 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 5245 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 5246 5247 HeapEntry heap_entry; 5248 heap_entry.owner.srcid = 0; 5249 heap_entry.owner.inst = false; 5250 5251 if(m_heap.is_full()) 5252 { 5253 heap_entry.next = r_cleanup_next_ptr.read(); 5254 } 5255 else 5256 { 5257 heap_entry.next = m_heap.next_free_ptr(); 5258 } 5259 5260 m_heap.write(r_cleanup_next_ptr.read(),heap_entry); 5261 m_heap.write_free_ptr(r_cleanup_next_ptr.read()); 5262 m_heap.unset_full(); 5263 5264 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5265 5266 #if DEBUG_MEMC_CLEANUP 5267 if(m_debug) 5268 std::cout << " <MEMC " << name() << " CLEANUP_HEAP_FREE>" 5269 << " Update the list of free entries" << std::endl; 5270 #endif 5271 break; 5272 } 5273 ////////////////////// 5274 case CLEANUP_IVT_LOCK: // get the lock protecting the IVT to search a pending 5275 // invalidate transaction matching the cleanup 5276 { 5277 if(r_alloc_ivt_fsm.read() != ALLOC_IVT_CLEANUP) break; 5278 5279 size_t index = 0; 5280 bool match_inval; 5281 5282 match_inval = m_ivt.search_inval(r_cleanup_nline.read(), index); 5283 5284 if ( not match_inval ) // no pending inval 5285 { 5286 /*ODCCP*/ // If cleanup is on no coherent line we go to CLEANUP_IXR_REQ 5287 if (r_cleanup_ncc.read()) 5288 { 5289 r_cleanup_fsm = CLEANUP_IXR_REQ; 5290 } 5291 else 5292 { 5293 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5294 } 5295 5296 #if DEBUG_MEMC_CLEANUP 5297 if(m_debug) 5298 std::cout << " <MEMC " << name() << " CLEANUP_IVT_LOCK>" 5299 << " Unexpected cleanup with no corresponding IVT entry:" 5300 << " address = " << std::hex << (r_cleanup_nline.read()*4*m_words) << std::endl; 5301 #endif 5302 break; 5303 } 5304 else // pending inval in IVT 5305 { 5306 r_cleanup_write_srcid = m_ivt.srcid(index); 5307 r_cleanup_write_trdid = m_ivt.trdid(index); 5308 r_cleanup_write_pktid = m_ivt.pktid(index); 5309 r_cleanup_need_rsp = m_ivt.need_rsp(index); 5310 r_cleanup_need_ack = m_ivt.need_ack(index); 5311 r_cleanup_index = index; 5312 r_cleanup_fsm = CLEANUP_IVT_DECREMENT; 5313 5314 #if DEBUG_MEMC_CLEANUP 5315 if(m_debug) 5316 std::cout << " <MEMC " << name() << " CLEANUP_IVT_LOCK>" 5317 << " Cleanup matching pending invalidate transaction on IVT:" 5318 << " address = " << std::hex << (r_cleanup_nline.read()*m_words*4) 5319 << " / ivt_entry = " << index << std::endl; 5320 #endif 5321 } 5322 break; 5323 } 5324 /////////////////////////// 5325 case CLEANUP_IVT_DECREMENT: // decrement response counter in IVT matching entry 5326 // and test if last 5327 { 5328 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) and 5329 "MEMC ERROR in CLEANUP_IVT_DECREMENT state: Bad IVT allocation"); 5330 5331 size_t count = 0; 5332 m_ivt.decrement(r_cleanup_index.read(), count); 5333 5334 if(count == 0) // multi inval transaction completed 5335 { 5336 r_cleanup_fsm = CLEANUP_IVT_CLEAR; 5337 } 5338 else // multi inval transaction not completed 5339 { 5340 /*ODCCP*/ // If cleanup is on no coherent line we go to CLEANUP_IXR_REQ 5341 if (r_cleanup_ncc.read()) 5342 { 5343 r_cleanup_fsm = CLEANUP_IXR_REQ; 5344 } 5345 else 5346 { 5347 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5348 } 5349 } 5350 5351 #if DEBUG_MEMC_CLEANUP 5352 if(m_debug) 5353 std::cout << " <MEMC " << name() << " CLEANUP_IVT_DECREMENT>" 5354 << " Decrement response counter in IVT:" 5355 << " IVT_index = " << r_cleanup_index.read() 5356 << " / rsp_count = " << count << std::endl; 5357 #endif 5358 break; 5359 } 5360 /////////////////////// 5361 case CLEANUP_IVT_CLEAR: // Clear IVT entry 5362 // Acknowledge CONFIG FSM if required 5363 { 5364 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) and 5365 "MEMC ERROR in CLEANUP_IVT_CLEAR state : bad IVT allocation"); 5366 5367 m_ivt.clear(r_cleanup_index.read()); 5368 5369 if ( r_cleanup_need_ack.read() ) 5370 { 5371 assert( (r_config_rsp_lines.read() > 0) and 5372 "MEMC ERROR in CLEANUP_IVT_CLEAR state"); 5373 5374 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 5375 } 5376 5377 if ( r_cleanup_need_rsp.read() ) r_cleanup_fsm = CLEANUP_WRITE_RSP; 5378 else if ( r_cleanup_ncc.read() ) r_cleanup_fsm = CLEANUP_IXR_REQ; 5379 else r_cleanup_fsm = CLEANUP_SEND_CLACK; 5380 5381 #if DEBUG_MEMC_CLEANUP 5382 if(m_debug) 5383 std::cout << " <MEMC " << name() 5384 << " CLEANUP_IVT_CLEAR> Clear entry in IVT:" 5385 << " IVT_index = " << r_cleanup_index.read() << std::endl; 5386 #endif 5387 break; 5388 } 5389 /////////////////////// 5390 case CLEANUP_WRITE_RSP: // response to a previous write on the direct network 5391 // wait if pending request to the TGT_RSP FSM 5392 { 5393 if(r_cleanup_to_tgt_rsp_req.read()) break; 5394 5395 // no pending request 5396 r_cleanup_to_tgt_rsp_req = true; 5397 r_cleanup_to_tgt_rsp_srcid = r_cleanup_write_srcid.read(); 5398 r_cleanup_to_tgt_rsp_trdid = r_cleanup_write_trdid.read(); 5399 r_cleanup_to_tgt_rsp_pktid = r_cleanup_write_pktid.read(); 5400 5401 /*ODCCP*/ // If cleanup is on no coherent line we go to CLEANUP_IXR_REQ 5402 if (r_cleanup_ncc.read()) 5403 { 5404 r_cleanup_fsm = CLEANUP_IXR_REQ; 5405 } 5406 else 5407 { 5408 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5409 } 5410 5411 #if DEBUG_MEMC_CLEANUP 5412 if(m_debug) 5413 std::cout << " <MEMC " << name() << " CLEANUP_WRITE_RSP>" 5414 << " Send a response to a previous write request: " 5415 << " rsrcid = " << std::hex << r_cleanup_write_srcid.read() 5416 << " / rtrdid = " << r_cleanup_write_trdid.read() 5417 << " / rpktid = " << r_cleanup_write_pktid.read() << std::endl; 5418 #endif 5419 break; 5420 } 5421 5422 /*ODCCP*/ 5423 case CLEANUP_IXR_REQ: 5424 { 5425 5426 //Send a request to the ixr to write the data in the XRAM and set an entry in the TRT. 5427 if (r_alloc_trt_fsm.read() == ALLOC_TRT_CLEANUP) 5428 { 5429 if(!r_cleanup_to_ixr_cmd_req.read()) 5430 { 5431 size_t index = 0; 5432 bool hit = m_trt.hit_write(r_cleanup_nline.read(), &index); // we save the index of the matching entry in TRT 5433 #if ODCCP_NON_INCLUSIVE 5434 if (!hit) 5435 { 5436 for(size_t i = 0; i < m_words; i++){ 5437 r_cleanup_to_ixr_cmd_data[i] = r_cleanup_data[i]; 5438 } 5439 r_cleanup_to_ixr_cmd_req = r_cleanup_contains_data.read(); 5440 r_cleanup_to_ixr_cmd_srcid = r_cleanup_srcid.read(); 5441 r_cleanup_to_ixr_cmd_index = m_trt_lines; 5442 r_cleanup_to_ixr_cmd_pktid = r_cleanup_pktid.read(); 5443 r_cleanup_to_ixr_cmd_nline = r_cleanup_nline.read(); 5444 //r_cleanup_to_ixr_cmd_l1_dirty_ncc = true; 5445 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5446 } 5447 else // wait until inval done 5448 { 5449 r_cleanup_fsm = CLEANUP_WAIT; 5450 } 5451 5452 #else 5453 if (!hit) 5454 { 5455 std::cout << "assert on line " << r_cleanup_nline.read() << " | at cycle " << std::dec <<m_cpt_cycles << std::endl; 5456 for (size_t i = 0; i < m_trt_lines; i++) m_trt.print(i); 5457 } 5458 assert (hit and "CLEANUP_IXR_REQ found no matching entry in TRT"); 5459 5460 r_cleanup_to_ixr_cmd_req = true; 5461 5462 if (r_cleanup_contains_data.read()) 5463 { 5464 std::vector<data_t> data_vector; 5465 data_vector.clear(); 5466 5467 for(size_t i=0; i<m_words; i++) 5468 { 5469 data_vector.push_back(r_cleanup_data[i]); 5470 } 5471 5472 m_trt.set(index, 5473 false, // write to XRAM 5474 r_cleanup_nline.read(), // line index 5475 0, 5476 0, 5477 0, 5478 false, 5479 0, 5480 0, 5481 std::vector<be_t> (m_words,0), 5482 data_vector); 5483 } 5484 r_cleanup_to_ixr_cmd_srcid = r_cleanup_srcid.read(); 5485 r_cleanup_to_ixr_cmd_index = index; 5486 r_cleanup_to_ixr_cmd_pktid = r_cleanup_pktid.read(); 5487 r_cleanup_to_ixr_cmd_nline = r_cleanup_nline.read(); 5488 //r_cleanup_to_ixr_cmd_l1_dirty_ncc = r_cleanup_contains_data.read(); 5489 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5490 #endif 5491 5492 #if DEBUG_MEMC_CLEANUP 5493 if(m_debug) 5494 { 5495 std::cout 5496 << " <MEMC " << name() 5497 << " CLEANUP_IXR_REQ> Send a put request to the ixr:" 5498 << " contains data ? = " << std::dec << r_cleanup_contains_data.read() 5499 << " srcid = " << std::dec << r_cleanup_srcid.read() 5500 << " pktid = " << std::dec << r_cleanup_pktid.read() 5501 << " trdid = " << std::dec << index 5502 << " nline = " << std::hex << r_cleanup_nline.read() << std::dec 5503 << std::endl; 5504 } 5505 #endif 5506 } 5507 else 5508 { 5509 r_cleanup_fsm = CLEANUP_WAIT; 5510 } 5511 } 5512 break; 5513 } 5514 5515 case CLEANUP_WAIT : 5516 { 5517 r_cleanup_fsm = CLEANUP_IXR_REQ; 5518 break; 5519 } 5520 5521 //////////////////////// 5522 case CLEANUP_SEND_CLACK: // acknowledgement to a cleanup command 5523 // on the coherence CLACK network. 5524 { 5525 if(not p_dspin_clack.read) break; 5526 5527 r_cleanup_fsm = CLEANUP_IDLE; 5528 5529 #if DEBUG_MEMC_CLEANUP 5530 if(m_debug) 5531 std::cout << " <MEMC " << name() 5532 << " CLEANUP_SEND_CLACK> Send the response to a cleanup request:" 5533 << " address = " << std::hex << r_cleanup_nline.read()*m_words*4 5534 << " / way = " << std::dec << r_cleanup_way.read() 5535 << " / srcid = " << std::dec << r_cleanup_srcid.read() 5536 << std::endl; 5537 #endif 5538 break; 5539 } 5540 } // end switch cleanup fsm 5541 5542 //////////////////////////////////////////////////////////////////////////////////// 5543 // CAS FSM 5544 //////////////////////////////////////////////////////////////////////////////////// 5545 // The CAS FSM handles the CAS (Compare And Swap) atomic commands. 5546 // 5547 // This command contains two or four flits: 5548 // - In case of 32 bits atomic access, the first flit contains the value read 5549 // by a previous READ instruction, the second flit contains the value to be writen. 5550 // - In case of 64 bits atomic access, the 2 first flits contains the value read 5551 // by a previous READ instruction, the 2 next flits contains the value to be writen. 5552 // 5553 // The target address is cachable. If it is replicated in other L1 caches 5554 // than the writer, a coherence operation is done. 5555 // 5556 // It access the directory to check hit / miss. 5557 // - In case of miss, the CAS FSM must register a GET transaction in TRT. 5558 // If a read transaction to the XRAM for this line already exists, 5559 // or if the transaction table is full, it goes to the WAIT state 5560 // to release the locks and try again. When the GET transaction has been 5561 // launched, it goes to the WAIT state and try again. 5562 // The CAS request is not consumed in the FIFO until a HIT is obtained. 5563 // - In case of hit... 5564 /////////////////////////////////////////////////////////////////////////////////// 5565 5566 //std::cout << std::endl << "cas_fsm" << std::endl; 5567 5568 switch(r_cas_fsm.read()) 5569 { 5570 //////////// 5571 case CAS_IDLE: // fill the local rdata buffers 5572 { 5573 if (m_cmd_cas_addr_fifo.rok() ) 5574 { 5575 5576 #if DEBUG_MEMC_CAS 5577 if(m_debug) 5578 std::cout << " <MEMC " << name() << " CAS_IDLE> CAS command: " << std::hex 5579 << " srcid = " << std::dec << m_cmd_cas_srcid_fifo.read() 5580 << " addr = " << std::hex << m_cmd_cas_addr_fifo.read() 5581 << " wdata = " << m_cmd_cas_wdata_fifo.read() 5582 << " eop = " << std::dec << m_cmd_cas_eop_fifo.read() 5583 << " cpt = " << std::dec << r_cas_cpt.read() << std::endl; 5584 #endif 5585 if(m_cmd_cas_eop_fifo.read()) 5586 { 5587 m_cpt_cas++; 5588 r_cas_fsm = CAS_DIR_REQ; 5589 } 5590 else // we keep the last word in the FIFO 5591 { 5592 cmd_cas_fifo_get = true; 5593 } 5594 5595 // We fill the two buffers 5596 if(r_cas_cpt.read() < 2) // 32 bits access 5597 r_cas_rdata[r_cas_cpt.read()] = m_cmd_cas_wdata_fifo.read(); 5598 5599 if((r_cas_cpt.read() == 1) and m_cmd_cas_eop_fifo.read()) 5600 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 5601 5602 assert( (r_cas_cpt.read() <= 3) and // no more than 4 flits... 5603 "MEMC ERROR in CAS_IDLE state: illegal CAS command"); 5604 5605 if(r_cas_cpt.read() ==2) 5606 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 5607 5608 r_cas_cpt = r_cas_cpt.read() +1; 5609 } 5610 break; 5611 } 5612 ///////////////// 5613 case CAS_DIR_REQ: 5614 { 5615 if(r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) 5616 { 5617 r_cas_fsm = CAS_DIR_LOCK; 5618 } 5619 5620 #if DEBUG_MEMC_CAS 5621 if(m_debug) 5622 std::cout << " <MEMC " << name() << " CAS_DIR_REQ> Requesting DIR lock " << std::endl; 5623 #endif 5624 break; 5625 } 5626 ///////////////// 5627 case CAS_DIR_LOCK: // Read the directory 5628 { 5629 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5630 "MEMC ERROR in CAS_DIR_LOCK: Bad DIR allocation"); 5631 5632 size_t way = 0; 5633 DirectoryEntry entry(m_cache_directory.read(m_cmd_cas_addr_fifo.read(), way)); 5634 5635 r_cas_is_cnt = entry.is_cnt; 5636 r_cas_dirty = entry.dirty; 5637 r_cas_tag = entry.tag; 5638 r_cas_way = way; 5639 r_cas_copy = entry.owner.srcid; 5640 r_cas_copy_inst = entry.owner.inst; 5641 r_cas_ptr = entry.ptr; 5642 r_cas_count = entry.count; 5643 5644 if(entry.valid) r_cas_fsm = CAS_DIR_HIT_READ; 5645 else r_cas_fsm = CAS_MISS_TRT_LOCK; 5646 5647 #if DEBUG_MEMC_CAS 5648 if(m_debug) 5649 std::cout << " <MEMC " << name() << " CAS_DIR_LOCK> Directory acces" 5650 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 5651 << " / hit = " << std::dec << entry.valid 5652 << " / count = " << entry.count 5653 << " / is_cnt = " << entry.is_cnt << std::endl; 5654 #endif 5655 5656 break; 5657 } 5658 ///////////////////// 5659 case CAS_DIR_HIT_READ: // update directory for lock and dirty bit 5660 // and check data change in cache 5661 { 5662 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5663 "MEMC ERROR in CAS_DIR_HIT_READ: Bad DIR allocation"); 5664 5665 size_t way = r_cas_way.read(); 5666 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5667 5668 // update directory (lock & dirty bits) 5669 DirectoryEntry entry; 5670 entry.valid = true; 5671 entry.is_cnt = r_cas_is_cnt.read(); 5672 entry.dirty = true; 5673 entry.lock = true; 5674 entry.tag = r_cas_tag.read(); 5675 entry.owner.srcid = r_cas_copy.read(); 5676 entry.owner.inst = r_cas_copy_inst.read(); 5677 entry.count = r_cas_count.read(); 5678 entry.ptr = r_cas_ptr.read(); 5679 5680 m_cache_directory.write(set, way, entry); 5681 5682 // Store data from cache in buffer to do the comparison in next state 5683 m_cache_data.read_line(way, set, r_cas_data); 5684 5685 r_cas_fsm = CAS_DIR_HIT_COMPARE; 5686 5687 #if DEBUG_MEMC_CAS 5688 if(m_debug) 5689 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_READ> Read data from " 5690 << " cache and store it in buffer" << std::endl; 5691 #endif 5692 break; 5693 } 5694 //////////////////////// 5695 case CAS_DIR_HIT_COMPARE: 5696 { 5697 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5698 5699 // check data change 5700 bool ok = (r_cas_rdata[0].read() == r_cas_data[word].read()); 5701 5702 if(r_cas_cpt.read() == 4) // 64 bits CAS 5703 ok &= (r_cas_rdata[1] == r_cas_data[word+1]); 5704 5705 // to avoid livelock, force the atomic access to fail pseudo-randomly 5706 bool forced_fail = ((r_cas_lfsr % (64) == 0) and RANDOMIZE_CAS); 5707 r_cas_lfsr = (r_cas_lfsr >> 1) ^ ((- (r_cas_lfsr & 1)) & 0xd0000001); 5708 5709 if(ok and not forced_fail) r_cas_fsm = CAS_DIR_HIT_WRITE; 5710 else r_cas_fsm = CAS_RSP_FAIL; 5711 5712 #if DEBUG_MEMC_CAS 5713 if(m_debug) 5714 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_COMPARE> Compare old and new data" 5715 << " / expected value = " << std::hex << r_cas_rdata[0].read() 5716 << " / actual value = " << std::hex << r_cas_data[word].read() 5717 << " / forced_fail = " << std::dec << forced_fail << std::endl; 5718 #endif 5719 break; 5720 } 5721 ////////////////////// 5722 case CAS_DIR_HIT_WRITE: // test if a CC transaction is required 5723 // write data in cache if no CC request 5724 { 5725 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5726 "MEMC ERROR in CAS_DIR_HIT_WRITE: Bad DIR allocation"); 5727 5728 // The CAS is a success => sw access to the llsc_global_table 5729 m_llsc_table.sw( m_nline[(addr_t)m_cmd_cas_addr_fifo.read()], 5730 m_x[(addr_t)(m_cmd_cas_addr_fifo.read())], 5731 m_x[(addr_t)(m_cmd_cas_addr_fifo.read())] ); 5732 5733 // test coherence request 5734 if(r_cas_count.read()) // replicated line 5735 { 5736 if(r_cas_is_cnt.read()) 5737 { 5738 r_cas_fsm = CAS_BC_TRT_LOCK; // broadcast invalidate required 5739 5740 #if DEBUG_MEMC_CAS 5741 if(m_debug) 5742 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 5743 << " Broacast Inval required" 5744 << " / copies = " << r_cas_count.read() << std::endl; 5745 #endif 5746 } 5747 else if( not r_cas_to_cc_send_multi_req.read() and 5748 not r_cas_to_cc_send_brdcast_req.read() ) 5749 { 5750 r_cas_fsm = CAS_UPT_LOCK; // multi update required 5751 5752 #if DEBUG_MEMC_CAS 5753 if(m_debug) 5754 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 5755 << " Multi Inval required" 5756 << " / copies = " << r_cas_count.read() << std::endl; 5757 #endif 5758 } 5759 else 5760 { 5761 r_cas_fsm = CAS_WAIT; 5762 5763 #if DEBUG_MEMC_CAS 5764 if(m_debug) 5765 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 5766 << " CC_SEND FSM busy: release all locks and retry" << std::endl; 5767 #endif 5768 } 5769 } 5770 else // no copies 5771 { 5772 size_t way = r_cas_way.read(); 5773 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5774 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5775 5776 // cache update 5777 m_cache_data.write(way, set, word, r_cas_wdata.read()); 5778 if(r_cas_cpt.read() == 4) 5779 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 5780 5781 r_cas_fsm = CAS_RSP_SUCCESS; 5782 5783 #if DEBUG_MEMC_CAS 5784 if(m_debug) 5785 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE> Update cache:" 5786 << " way = " << std::dec << way 5787 << " / set = " << set 5788 << " / word = " << word 5789 << " / value = " << r_cas_wdata.read() 5790 << " / count = " << r_cas_count.read() 5791 << " / global_llsc_table access" << std::endl; 5792 #endif 5793 } 5794 break; 5795 } 5796 ///////////////// 5797 case CAS_UPT_LOCK: // try to register the transaction in UPT 5798 // and write data in cache if successful registration 5799 // releases locks to retry later if UPT full 5800 { 5801 if(r_alloc_upt_fsm.read() == ALLOC_UPT_CAS) 5802 { 5803 bool wok = false; 5804 size_t index = 0; 5805 size_t srcid = m_cmd_cas_srcid_fifo.read(); 5806 size_t trdid = m_cmd_cas_trdid_fifo.read(); 5807 size_t pktid = m_cmd_cas_pktid_fifo.read(); 5808 addr_t nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5809 size_t nb_copies = r_cas_count.read(); 5810 5811 wok = m_upt.set( true, // it's an update transaction 5812 false, // it's not a broadcast 5813 true, // response required 5814 false, // no acknowledge required 5815 srcid, 5816 trdid, 5817 pktid, 5818 nline, 5819 nb_copies, 5820 index); 5821 if(wok) // coherence transaction registered in UPT 5822 { 5823 // cache update 5824 size_t way = r_cas_way.read(); 5825 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5826 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5827 5828 m_cache_data.write(way, set, word, r_cas_wdata.read()); 5829 if(r_cas_cpt.read() ==4) 5830 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 5831 5832 r_cas_upt_index = index; 5833 r_cas_fsm = CAS_UPT_HEAP_LOCK; 5834 } 5835 else // releases the locks protecting UPT and DIR UPT full 5836 { 5837 r_cas_fsm = CAS_WAIT; 5838 } 5839 5840 #if DEBUG_MEMC_CAS 5841 if(m_debug) 5842 std::cout << " <MEMC " << name() 5843 << " CAS_UPT_LOCK> Register multi-update transaction in UPT" 5844 << " / wok = " << wok 5845 << " / address = " << std::hex << nline*m_words*4 5846 << " / count = " << nb_copies << std::endl; 5847 #endif 5848 } 5849 break; 5850 } 5851 ///////////// 5852 case CAS_WAIT: // release all locks and retry from beginning 5853 { 5854 5855 #if DEBUG_MEMC_CAS 5856 if(m_debug) 5857 std::cout << " <MEMC " << name() << " CAS_WAIT> Release all locks" << std::endl; 5858 #endif 5859 r_cas_fsm = CAS_DIR_REQ; 5860 break; 5861 } 5862 ////////////////////// 5863 case CAS_UPT_HEAP_LOCK: // lock the heap 5864 { 5865 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) 5866 { 5867 5868 #if DEBUG_MEMC_CAS 5869 if(m_debug) 5870 { 5871 std::cout << " <MEMC " << name() 5872 << " CAS_UPT_HEAP_LOCK> Get access to the heap" << std::endl; 5873 } 5874 #endif 5875 r_cas_fsm = CAS_UPT_REQ; 5876 m_cpt_cas_fsm_n_heap_lock++; 5877 } 5878 5879 m_cpt_cas_fsm_heap_lock++; 5880 5881 break; 5882 } 5883 //////////////// 5884 case CAS_UPT_REQ: // send a first update request to CC_SEND FSM 5885 { 5886 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) and 5887 "VCI_MEM_CACHE ERROR : bad HEAP allocation"); 5888 5889 if(!r_cas_to_cc_send_multi_req.read() and !r_cas_to_cc_send_brdcast_req.read()) 5890 { 5891 r_cas_to_cc_send_brdcast_req = false; 5892 r_cas_to_cc_send_trdid = r_cas_upt_index.read(); 5893 r_cas_to_cc_send_nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5894 r_cas_to_cc_send_index = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5895 r_cas_to_cc_send_wdata = r_cas_wdata.read(); 5896 5897 if(r_cas_cpt.read() == 4) 5898 { 5899 r_cas_to_cc_send_is_long = true; 5900 r_cas_to_cc_send_wdata_high = m_cmd_cas_wdata_fifo.read(); 5901 } 5902 else 5903 { 5904 r_cas_to_cc_send_is_long = false; 5905 r_cas_to_cc_send_wdata_high = 0; 5906 } 5907 5908 // We put the first copy in the fifo 5909 cas_to_cc_send_fifo_put = true; 5910 cas_to_cc_send_fifo_inst = r_cas_copy_inst.read(); 5911 cas_to_cc_send_fifo_srcid = r_cas_copy.read(); 5912 if(r_cas_count.read() == 1) // one single copy 5913 { 5914 r_cas_fsm = CAS_IDLE; // Response will be sent after receiving 5915 // update responses 5916 cmd_cas_fifo_get = true; 5917 r_cas_to_cc_send_multi_req = true; 5918 r_cas_cpt = 0; 5919 } 5920 else // several copies 5921 { 5922 r_cas_fsm = CAS_UPT_NEXT; 5923 } 5924 5925 #if DEBUG_MEMC_CAS 5926 if(m_debug) 5927 { 5928 std::cout << " <MEMC " << name() << " CAS_UPT_REQ> Send the first update request to CC_SEND FSM " 5929 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 5930 << " / wdata = " << std::hex << r_cas_wdata.read() 5931 << " / srcid = " << std::dec << r_cas_copy.read() 5932 << " / inst = " << std::dec << r_cas_copy_inst.read() << std::endl; 5933 } 5934 #endif 5935 } 5936 break; 5937 } 5938 ///////////////// 5939 case CAS_UPT_NEXT: // send a multi-update request to CC_SEND FSM 5940 { 5941 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) 5942 and "VCI_MEM_CACHE ERROR : bad HEAP allocation"); 5943 5944 HeapEntry entry = m_heap.read(r_cas_ptr.read()); 5945 cas_to_cc_send_fifo_srcid = entry.owner.srcid; 5946 cas_to_cc_send_fifo_inst = entry.owner.inst; 5947 cas_to_cc_send_fifo_put = true; 5948 5949 if(m_cas_to_cc_send_inst_fifo.wok()) // request accepted by CC_SEND FSM 5950 { 5951 r_cas_ptr = entry.next; 5952 if(entry.next == r_cas_ptr.read()) // last copy 5953 { 5954 r_cas_to_cc_send_multi_req = true; 5955 r_cas_fsm = CAS_IDLE; // Response will be sent after receiving 5956 // all update responses 5957 cmd_cas_fifo_get = true; 5958 r_cas_cpt = 0; 5959 } 5960 } 5961 5962 #if DEBUG_MEMC_CAS 5963 if(m_debug) 5964 { 5965 std::cout << " <MEMC " << name() << " CAS_UPT_NEXT> Send the next update request to CC_SEND FSM " 5966 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 5967 << " / wdata = " << std::hex << r_cas_wdata.read() 5968 << " / srcid = " << std::dec << entry.owner.srcid 5969 << " / inst = " << std::dec << entry.owner.inst << std::endl; 5970 } 5971 #endif 5972 break; 5973 } 5974 ///////////////////// 5975 case CAS_BC_TRT_LOCK: // get TRT lock to check TRT not full 5976 { 5977 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5978 "MEMC ERROR in CAS_BC_TRT_LOCK state: Bas DIR allocation"); 5979 5980 if(r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 5981 { 5982 size_t wok_index = 0; 5983 bool wok = !m_trt.full(wok_index); 5984 if( wok ) 5985 { 5986 r_cas_trt_index = wok_index; 5987 r_cas_fsm = CAS_BC_IVT_LOCK; 5988 } 5989 else 5990 { 5991 r_cas_fsm = CAS_WAIT; 5992 } 5993 5994 #if DEBUG_MEMC_CAS 5995 if(m_debug) 5996 std::cout << " <MEMC " << name() << " CAS_BC_TRT_LOCK> Check TRT" 5997 << " : wok = " << wok << " / index = " << wok_index << std::endl; 5998 #endif 5999 } 6000 break; 6001 } 6002 ///////////////////// 6003 case CAS_BC_IVT_LOCK: // get IVT lock and register BC transaction in IVT 6004 { 6005 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 6006 "MEMC ERROR in CAS_BC_IVT_LOCK state: Bas DIR allocation"); 6007 6008 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 6009 "MEMC ERROR in CAS_BC_IVT_LOCK state: Bas TRT allocation"); 6010 6011 if( r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS ) 6012 { 6013 // register broadcast inval transaction in IVT 6014 bool wok = false; 6015 size_t index = 0; 6016 size_t srcid = m_cmd_cas_srcid_fifo.read(); 6017 size_t trdid = m_cmd_cas_trdid_fifo.read(); 6018 size_t pktid = m_cmd_cas_pktid_fifo.read(); 6019 addr_t nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6020 size_t nb_copies = r_cas_count.read(); 6021 6022 wok = m_ivt.set( false, // it's an inval transaction 6023 true, // it's a broadcast 6024 true, // response required 6025 false, // no acknowledge required 6026 srcid, 6027 trdid, 6028 pktid, 6029 nline, 6030 nb_copies, 6031 index); 6032 #if DEBUG_MEMC_CAS 6033 if( m_debug and wok ) 6034 std::cout << " <MEMC " << name() << " CAS_BC_IVT_LOCK> Register broadcast inval in IVT" 6035 << " / copies = " << r_cas_count.read() << std::endl; 6036 #endif 6037 r_cas_upt_index = index; 6038 if( wok ) r_cas_fsm = CAS_BC_DIR_INVAL; 6039 else r_cas_fsm = CAS_WAIT; 6040 } 6041 break; 6042 } 6043 ////////////////////// 6044 case CAS_BC_DIR_INVAL: // Register PUT transaction in TRT, 6045 // and inval the DIR entry 6046 { 6047 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 6048 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad DIR allocation"); 6049 6050 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 6051 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad TRT allocation"); 6052 6053 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS) and 6054 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad IVT allocation"); 6055 6056 // set TRT 6057 std::vector<data_t> data_vector; 6058 data_vector.clear(); 6059 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6060 for(size_t i=0; i<m_words; i++) 6061 { 6062 if(i == word) // first modified word 6063 data_vector.push_back( r_cas_wdata.read() ); 6064 else if((i == word+1) and (r_cas_cpt.read() == 4)) // second modified word 6065 data_vector.push_back( m_cmd_cas_wdata_fifo.read() ); 6066 else // unmodified words 6067 data_vector.push_back( r_cas_data[i].read() ); 6068 } 6069 m_trt.set( r_cas_trt_index.read(), 6070 false, // PUT request 6071 m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())], 6072 0, 6073 0, 6074 0, 6075 false, // not a processor read 6076 0, 6077 0, 6078 std::vector<be_t> (m_words,0), 6079 data_vector ); 6080 6081 // invalidate directory entry 6082 DirectoryEntry entry; 6083 entry.valid = false; 6084 entry.dirty = false; 6085 entry.tag = 0; 6086 entry.is_cnt = false; 6087 entry.lock = false; 6088 entry.count = 0; 6089 entry.owner.srcid = 0; 6090 entry.owner.inst = false; 6091 entry.ptr = 0; 6092 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6093 size_t way = r_cas_way.read(); 6094 6095 m_cache_directory.write(set, way, entry); 6096 6097 r_cas_fsm = CAS_BC_CC_SEND; 6098 6099 #if DEBUG_MEMC_CAS 6100 if(m_debug) 6101 std::cout << " <MEMC " << name() << " CAS_BC_DIR_INVAL> Inval DIR & register in TRT:" 6102 << " address = " << m_cmd_cas_addr_fifo.read() << std::endl; 6103 #endif 6104 break; 6105 } 6106 /////////////////// 6107 case CAS_BC_CC_SEND: // Request the broadcast inval to CC_SEND FSM 6108 { 6109 if( not r_cas_to_cc_send_multi_req.read() and 6110 not r_cas_to_cc_send_brdcast_req.read() ) 6111 { 6112 r_cas_to_cc_send_multi_req = false; 6113 r_cas_to_cc_send_brdcast_req = true; 6114 r_cas_to_cc_send_trdid = r_cas_upt_index.read(); 6115 r_cas_to_cc_send_nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6116 r_cas_to_cc_send_index = 0; 6117 r_cas_to_cc_send_wdata = 0; 6118 6119 r_cas_fsm = CAS_BC_XRAM_REQ; 6120 6121 #if DEBUG_MEMC_CAS 6122 if(m_debug) 6123 std::cout << " <MEMC " << name() 6124 << " CAS_BC_CC_SEND> Post a broadcast request to CC_SEND FSM" << std::endl; 6125 #endif 6126 } 6127 break; 6128 } 6129 //////////////////// 6130 case CAS_BC_XRAM_REQ: // request the IXR FSM to start a PUT transaction 6131 { 6132 if( not r_cas_to_ixr_cmd_req.read() ) 6133 { 6134 r_cas_to_ixr_cmd_req = true; 6135 r_cas_to_ixr_cmd_put = true; 6136 r_cas_to_ixr_cmd_index = r_cas_trt_index.read(); 6137 r_cas_fsm = CAS_IDLE; 6138 cmd_cas_fifo_get = true; 6139 r_cas_cpt = 0; 6140 6141 #if DEBUG_MEMC_CAS 6142 if(m_debug) 6143 std::cout << " <MEMC " << name() 6144 << " CAS_BC_XRAM_REQ> Request a PUT transaction to IXR_CMD FSM" << std::hex 6145 << " / address = " << (addr_t) m_cmd_cas_addr_fifo.read() 6146 << " / trt_index = " << r_cas_trt_index.read() << std::endl; 6147 #endif 6148 } 6149 break; 6150 } 6151 ///////////////// 6152 case CAS_RSP_FAIL: // request TGT_RSP FSM to send a failure response 6153 { 6154 if( not r_cas_to_tgt_rsp_req.read() ) 6155 { 6156 cmd_cas_fifo_get = true; 6157 r_cas_cpt = 0; 6158 r_cas_to_tgt_rsp_req = true; 6159 r_cas_to_tgt_rsp_data = 1; 6160 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read(); 6161 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read(); 6162 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read(); 6163 r_cas_fsm = CAS_IDLE; 6164 6165 #if DEBUG_MEMC_CAS 6166 if(m_debug) 6167 std::cout << " <MEMC " << name() 6168 << " CAS_RSP_FAIL> Request TGT_RSP to send a failure response" << std::endl; 6169 #endif 6170 } 6171 break; 6172 } 6173 //////////////////// 6174 case CAS_RSP_SUCCESS: // request TGT_RSP FSM to send a success response 6175 { 6176 if( not r_cas_to_tgt_rsp_req.read() ) 6177 { 6178 cmd_cas_fifo_get = true; 6179 r_cas_cpt = 0; 6180 r_cas_to_tgt_rsp_req = true; 6181 r_cas_to_tgt_rsp_data = 0; 6182 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read(); 6183 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read(); 6184 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read(); 6185 r_cas_fsm = CAS_IDLE; 6186 6187 #if DEBUG_MEMC_CAS 6188 if(m_debug) 6189 std::cout << " <MEMC " << name() 6190 << " CAS_RSP_SUCCESS> Request TGT_RSP to send a success response" << std::endl; 6191 #endif 6192 } 6193 break; 6194 } 6195 /////////////////////// 6196 case CAS_MISS_TRT_LOCK: // cache miss : request access to transaction Table 6197 { 6198 if(r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 6199 { 6200 size_t index = 0; 6201 bool hit_read = m_trt.hit_read( 6202 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()],index); 6203 bool hit_write = m_trt.hit_write( 6204 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()]); 6205 bool wok = not m_trt.full(index); 6206 6207 #if DEBUG_MEMC_CAS 6208 if(m_debug) 6209 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_LOCK> Check TRT state" 6210 << " / hit_read = " << hit_read 6211 << " / hit_write = " << hit_write 6212 << " / wok = " << wok 6213 << " / index = " << index << std::endl; 6214 #endif 6215 6216 if(hit_read or !wok or hit_write) // missing line already requested or TRT full 6217 { 6218 r_cas_fsm = CAS_WAIT; 6219 } 6220 else 6221 { 6222 r_cas_trt_index = index; 6223 r_cas_fsm = CAS_MISS_TRT_SET; 6224 } 6225 } 6226 break; 6227 } 6228 ////////////////////// 6229 case CAS_MISS_TRT_SET: // register the GET transaction in TRT 6230 { 6231 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 6232 "MEMC ERROR in CAS_MISS_TRT_SET state: Bad TRT allocation"); 6233 6234 std::vector<be_t> be_vector; 6235 std::vector<data_t> data_vector; 6236 be_vector.clear(); 6237 data_vector.clear(); 6238 for(size_t i=0; i<m_words; i++) 6239 { 6240 be_vector.push_back(0); 6241 data_vector.push_back(0); 6242 } 6243 6244 m_trt.set( r_cas_trt_index.read(), 6245 true, // GET 6246 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()], 6247 m_cmd_cas_srcid_fifo.read(), 6248 m_cmd_cas_trdid_fifo.read(), 6249 m_cmd_cas_pktid_fifo.read(), 6250 false, // write request from processor 6251 0, 6252 0, 6253 std::vector<be_t>(m_words,0), 6254 std::vector<data_t>(m_words,0) ); 6255 6256 r_cas_fsm = CAS_MISS_XRAM_REQ; 6257 6258 #if DEBUG_MEMC_CAS 6259 if(m_debug) 6260 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_SET> Register GET transaction in TRT" 6261 << " / address = " << std::hex << (addr_t)m_cmd_cas_addr_fifo.read() 6262 << " / trt_index = " << std::dec << r_cas_trt_index.read() << std::endl; 6263 #endif 6264 break; 6265 } 6266 ////////////////////// 6267 case CAS_MISS_XRAM_REQ: // request the IXR_CMD FSM a GET request 6268 { 6269 if( not r_cas_to_ixr_cmd_req.read() ) 6270 { 6271 r_cas_to_ixr_cmd_req = true; 6272 r_cas_to_ixr_cmd_put = false; 6273 r_cas_to_ixr_cmd_index = r_cas_trt_index.read(); 6274 r_cas_fsm = CAS_WAIT; 6275 6276 #if DEBUG_MEMC_CAS 6277 if(m_debug) 6278 std::cout << " <MEMC " << name() << " CAS_MISS_XRAM_REQ> Request a GET transaction" 6279 << " / address = " << std::hex << (addr_t) m_cmd_cas_addr_fifo.read() 6280 << " / trt_index = " << std::dec << r_cas_trt_index.read() << std::endl; 6281 #endif 6282 } 6283 break; 6284 } 6285 } // end switch r_cas_fsm 6286 6287 6288 ////////////////////////////////////////////////////////////////////////////// 6289 // CC_SEND FSM 6290 ////////////////////////////////////////////////////////////////////////////// 6291 // The CC_SEND fsm controls the DSPIN initiator port on the coherence 6292 // network, used to update or invalidate cache lines in L1 caches. 6293 // 6294 // It implements a round-robin priority between the four possible client FSMs 6295 // XRAM_RSP > CAS > WRITE > CONFIG 6296 // 6297 // Each FSM can request the next services: 6298 // - r_xram_rsp_to_cc_send_multi_req : multi-inval 6299 // r_xram_rsp_to_cc_send_brdcast_req : broadcast-inval 6300 // - r_write_to_cc_send_multi_req : multi-update 6301 // r_write_to_cc_send_brdcast_req : broadcast-inval 6302 // - r_cas_to_cc_send_multi_req : multi-update 6303 // r_cas_to_cc_send_brdcast_req : broadcast-inval 6304 // - r_config_to_cc_send_multi_req : multi-inval 6305 // r_config_to_cc_send_brdcast_req : broadcast-inval 6306 // 6307 // An inval request is a double DSPIN flit command containing: 6308 // 1. the index of the line to be invalidated. 6309 // 6310 // An update request is a multi-flit DSPIN command containing: 6311 // 1. the index of the cache line to be updated. 6312 // 2. the index of the first modified word in the line. 6313 // 3. the data to update 6314 /////////////////////////////////////////////////////////////////////////////// 6315 6316 //std::cout << std::endl << "cc_send_fsm" << std::endl; 6317 6318 switch(r_cc_send_fsm.read()) 6319 { 6320 ///////////////////////// 6321 case CC_SEND_CONFIG_IDLE: // XRAM_RSP FSM has highest priority 6322 { 6323 // XRAM_RSP 6324 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 6325 r_xram_rsp_to_cc_send_multi_req.read()) 6326 { 6327 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 6328 m_cpt_inval++; 6329 break; 6330 } 6331 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 6332 { 6333 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 6334 m_cpt_inval++; 6335 break; 6336 } 6337 // CAS 6338 if(m_cas_to_cc_send_inst_fifo.rok() or 6339 r_cas_to_cc_send_multi_req.read()) 6340 { 6341 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6342 m_cpt_update++; 6343 break; 6344 } 6345 if(r_cas_to_cc_send_brdcast_req.read()) 6346 { 6347 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 6348 m_cpt_inval++; 6349 break; 6350 } 6351 // WRITE 6352 if(m_write_to_cc_send_inst_fifo.rok() or 6353 r_write_to_cc_send_multi_req.read()) 6354 { 6355 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 6356 m_cpt_update++; 6357 break; 6358 } 6359 if(r_write_to_cc_send_brdcast_req.read()) 6360 { 6361 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 6362 m_cpt_inval++; 6363 break; 6364 } 6365 // CONFIG 6366 if(r_config_to_cc_send_multi_req.read()) 6367 { 6368 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 6369 m_cpt_inval++; 6370 break; 6371 } 6372 if(r_config_to_cc_send_brdcast_req.read()) 6373 { 6374 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 6375 m_cpt_inval++; 6376 break; 6377 } 6378 break; 6379 } 6380 //////////////////////// 6381 case CC_SEND_WRITE_IDLE: // CONFIG FSM has highest priority 6382 { 6383 // CONFIG 6384 if(r_config_to_cc_send_multi_req.read()) 6385 { 6386 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 6387 m_cpt_inval++; 6388 break; 6389 } 6390 if(r_config_to_cc_send_brdcast_req.read()) 6391 { 6392 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 6393 m_cpt_inval++; 6394 break; 6395 } 6396 // XRAM_RSP 6397 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 6398 r_xram_rsp_to_cc_send_multi_req.read()) 6399 { 6400 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 6401 m_cpt_inval++; 6402 break; 6403 } 6404 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 6405 { 6406 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 6407 m_cpt_inval++; 6408 break; 6409 } 6410 // CAS 6411 if(m_cas_to_cc_send_inst_fifo.rok() or 6412 r_cas_to_cc_send_multi_req.read()) 6413 { 6414 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6415 m_cpt_update++; 6416 break; 6417 } 6418 if(r_cas_to_cc_send_brdcast_req.read()) 6419 { 6420 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 6421 m_cpt_inval++; 6422 break; 6423 } 6424 // WRITE 6425 if(m_write_to_cc_send_inst_fifo.rok() or 6426 r_write_to_cc_send_multi_req.read()) 6427 { 6428 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 6429 m_cpt_update++; 6430 break; 6431 } 6432 if(r_write_to_cc_send_brdcast_req.read()) 6433 { 6434 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 6435 m_cpt_inval++; 6436 break; 6437 } 6438 break; 6439 } 6440 /////////////////////////// 6441 case CC_SEND_XRAM_RSP_IDLE: // CAS FSM has highest priority 6442 { 6443 // CAS 6444 if(m_cas_to_cc_send_inst_fifo.rok() or 6445 r_cas_to_cc_send_multi_req.read()) 6446 { 6447 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6448 m_cpt_update++; 6449 break; 6450 } 6451 if(r_cas_to_cc_send_brdcast_req.read()) 6452 { 6453 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 6454 m_cpt_inval++; 6455 break; 6456 } 6457 // WRITE 6458 if(m_write_to_cc_send_inst_fifo.rok() or 6459 r_write_to_cc_send_multi_req.read()) 6460 { 6461 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 6462 m_cpt_update++; 6463 break; 6464 } 6465 6466 if(r_write_to_cc_send_brdcast_req.read()) 6467 { 6468 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 6469 m_cpt_inval++; 6470 break; 6471 } 6472 // CONFIG 6473 if(r_config_to_cc_send_multi_req.read()) 6474 { 6475 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 6476 m_cpt_inval++; 6477 break; 6478 } 6479 if(r_config_to_cc_send_brdcast_req.read()) 6480 { 6481 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 6482 m_cpt_inval++; 6483 break; 6484 } 6485 // XRAM_RSP 6486 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 6487 r_xram_rsp_to_cc_send_multi_req.read()) 6488 { 6489 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 6490 m_cpt_inval++; 6491 break; 6492 } 6493 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 6494 { 6495 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 6496 m_cpt_inval++; 6497 break; 6498 } 6499 break; 6500 } 6501 ////////////////////// 6502 case CC_SEND_CAS_IDLE: // CLEANUP FSM has highest priority 6503 { 6504 if(m_write_to_cc_send_inst_fifo.rok() or 6505 r_write_to_cc_send_multi_req.read()) 6506 { 6507 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 6508 m_cpt_update++; 6509 break; 6510 } 6511 if(r_write_to_cc_send_brdcast_req.read()) 6512 { 6513 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 6514 m_cpt_inval++; 6515 break; 6516 } 6517 // CONFIG 6518 if(r_config_to_cc_send_multi_req.read()) 6519 { 6520 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 6521 m_cpt_inval++; 6522 break; 6523 } 6524 if(r_config_to_cc_send_brdcast_req.read()) 6525 { 6526 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 6527 m_cpt_inval++; 6528 break; 6529 } 6530 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 6531 r_xram_rsp_to_cc_send_multi_req.read()) 6532 { 6533 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 6534 m_cpt_inval++; 6535 break; 6536 } 6537 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 6538 { 6539 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 6540 m_cpt_inval++; 6541 break; 6542 } 6543 if(m_cas_to_cc_send_inst_fifo.rok() or 6544 r_cas_to_cc_send_multi_req.read()) 6545 { 6546 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6547 m_cpt_update++; 6548 break; 6549 } 6550 if(r_cas_to_cc_send_brdcast_req.read()) 6551 { 6552 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 6553 m_cpt_inval++; 6554 break; 6555 } 6556 break; 6557 } 6558 ///////////////////////////////// 6559 case CC_SEND_CONFIG_INVAL_HEADER: // send first flit multi-inval (from CONFIG FSM) 6560 { 6561 if(m_config_to_cc_send_inst_fifo.rok()) 6562 { 6563 if(not p_dspin_m2p.read) break; 6564 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_NLINE; 6565 break; 6566 } 6567 if(r_config_to_cc_send_multi_req.read()) r_config_to_cc_send_multi_req = false; 6568 r_cc_send_fsm = CC_SEND_CONFIG_IDLE; 6569 break; 6570 } 6571 //////////////////////////////// 6572 case CC_SEND_CONFIG_INVAL_NLINE: // send second flit multi-inval (from CONFIG FSM) 6573 { 6574 if(not p_dspin_m2p.read) break; 6575 m_cpt_inval_mult++; 6576 config_to_cc_send_fifo_get = true; 6577 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 6578 6579 #if DEBUG_MEMC_CC_SEND 6580 if(m_debug) 6581 std::cout << " <MEMC " << name() 6582 << " CC_SEND_CONFIG_INVAL_NLINE> multi-inval for line " 6583 << std::hex << r_config_to_cc_send_nline.read() << std::endl; 6584 #endif 6585 break; 6586 } 6587 /////////////////////////////////// 6588 case CC_SEND_CONFIG_BRDCAST_HEADER: // send first flit BC-inval (from CONFIG FSM) 6589 { 6590 if(not p_dspin_m2p.read) break; 6591 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_NLINE; 6592 break; 6593 } 6594 ////////////////////////////////// 6595 case CC_SEND_CONFIG_BRDCAST_NLINE: // send second flit BC-inval (from CONFIG FSM) 6596 { 6597 if(not p_dspin_m2p.read) break; 6598 m_cpt_inval_brdcast++; 6599 r_config_to_cc_send_brdcast_req = false; 6600 r_cc_send_fsm = CC_SEND_CONFIG_IDLE; 6601 6602 #if DEBUG_MEMC_CC_SEND 6603 if(m_debug) 6604 std::cout << " <MEMC " << name() 6605 << " CC_SEND_CONFIG_BRDCAST_NLINE> BC-Inval for line " 6606 << std::hex << r_config_to_cc_send_nline.read() << std::endl; 6607 #endif 6608 break; 6609 } 6610 /////////////////////////////////// 6611 case CC_SEND_XRAM_RSP_INVAL_HEADER: // send first flit multi-inval (from XRAM_RSP FSM) 6612 { 6613 if(m_xram_rsp_to_cc_send_inst_fifo.rok()) 6614 { 6615 if(not p_dspin_m2p.read) break; 6616 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_NLINE; 6617 break; 6618 } 6619 if(r_xram_rsp_to_cc_send_multi_req.read()) r_xram_rsp_to_cc_send_multi_req = false; 6620 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 6621 break; 6622 } 6623 ////////////////////////////////// 6624 case CC_SEND_XRAM_RSP_INVAL_NLINE: // send second flit multi-inval (from XRAM_RSP FSM) 6625 { 6626 if(not p_dspin_m2p.read) break; 6627 m_cpt_inval_mult++; 6628 xram_rsp_to_cc_send_fifo_get = true; 6629 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 6630 6631 #if DEBUG_MEMC_CC_SEND 6632 if(m_debug) 6633 std::cout << " <MEMC " << name() 6634 << " CC_SEND_XRAM_RSP_INVAL_NLINE> Multicast-Inval for line " 6635 << std::hex << r_xram_rsp_to_cc_send_nline.read() << std::endl; 6636 #endif 6637 break; 6638 } 6639 ///////////////////////////////////// 6640 case CC_SEND_XRAM_RSP_BRDCAST_HEADER: // send first flit broadcast-inval (from XRAM_RSP FSM) 6641 { 6642 if(not p_dspin_m2p.read) break; 6643 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_NLINE; 6644 break; 6645 } 6646 //////////////////////////////////// 6647 case CC_SEND_XRAM_RSP_BRDCAST_NLINE: // send second flit broadcast-inval (from XRAM_RSP FSM) 6648 { 6649 if(not p_dspin_m2p.read) break; 6650 m_cpt_inval_brdcast++; 6651 r_xram_rsp_to_cc_send_brdcast_req = false; 6652 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 6653 6654 #if DEBUG_MEMC_CC_SEND 6655 if(m_debug) 6656 std::cout << " <MEMC " << name() 6657 << " CC_SEND_XRAM_RSP_BRDCAST_NLINE> BC-Inval for line " 6658 << std::hex << r_xram_rsp_to_cc_send_nline.read() << std::endl; 6659 #endif 6660 break; 6661 } 6662 ////////////////////////////////// 6663 case CC_SEND_WRITE_BRDCAST_HEADER: // send first flit broadcast-inval (from WRITE FSM) 6664 { 6665 if(not p_dspin_m2p.read) break; 6666 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_NLINE; 6667 break; 6668 } 6669 ///////////////////////////////// 6670 case CC_SEND_WRITE_BRDCAST_NLINE: // send second flit broadcast-inval (from WRITE FSM) 6671 { 6672 if(not p_dspin_m2p.read) break; 6673 6674 m_cpt_inval_brdcast++; 6675 6676 r_write_to_cc_send_brdcast_req = false; 6677 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 6678 6679 #if DEBUG_MEMC_CC_SEND 6680 if(m_debug) 6681 std::cout << " <MEMC " << name() 6682 << " CC_SEND_WRITE_BRDCAST_NLINE> BC-Inval for line " 6683 << std::hex << r_write_to_cc_send_nline.read() << std::endl; 6684 #endif 6685 break; 6686 } 6687 /////////////////////////////// 6688 case CC_SEND_WRITE_UPDT_HEADER: // send first flit for a multi-update (from WRITE FSM) 6689 { 6690 if(m_write_to_cc_send_inst_fifo.rok()) 6691 { 6692 if(not p_dspin_m2p.read) break; 6693 6694 r_cc_send_fsm = CC_SEND_WRITE_UPDT_NLINE; 6695 break; 6696 } 6697 6698 if(r_write_to_cc_send_multi_req.read()) 6699 { 6700 r_write_to_cc_send_multi_req = false; 6701 } 6702 6703 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 6704 break; 6705 } 6706 ////////////////////////////// 6707 case CC_SEND_WRITE_UPDT_NLINE: // send second flit for a multi-update (from WRITE FSM) 6708 { 6709 if(not p_dspin_m2p.read) break; 6710 m_cpt_update_mult++; 6711 6712 r_cc_send_cpt = 0; 6713 r_cc_send_fsm = CC_SEND_WRITE_UPDT_DATA; 6714 6715 #if DEBUG_MEMC_CC_SEND 6716 if(m_debug) 6717 std::cout << " <MEMC " << name() 6718 << " CC_SEND_WRITE_UPDT_NLINE> Multicast-Update for address " 6719 << r_write_to_cc_send_nline.read()*m_words*4 << std::endl; 6720 #endif 6721 break; 6722 } 6723 ///////////////////////////// 6724 case CC_SEND_WRITE_UPDT_DATA: // send data flits for multi-update (from WRITE FSM) 6725 { 6726 if(not p_dspin_m2p.read) break; 6727 if(r_cc_send_cpt.read() == (r_write_to_cc_send_count.read()-1)) 6728 { 6729 write_to_cc_send_fifo_get = true; 6730 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 6731 break; 6732 } 6733 6734 r_cc_send_cpt = r_cc_send_cpt.read() + 1; 6735 break; 6736 } 6737 //////////////////////////////// 6738 case CC_SEND_CAS_BRDCAST_HEADER: // send first flit broadcast-inval (from CAS FSM) 6739 { 6740 if(not p_dspin_m2p.read) break; 6741 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_NLINE; 6742 break; 6743 } 6744 /////////////////////////////// 6745 case CC_SEND_CAS_BRDCAST_NLINE: // send second flit broadcast-inval (from CAS FSM) 6746 { 6747 if(not p_dspin_m2p.read) break; 6748 m_cpt_inval_brdcast++; 6749 6750 r_cas_to_cc_send_brdcast_req = false; 6751 r_cc_send_fsm = CC_SEND_CAS_IDLE; 6752 6753 #if DEBUG_MEMC_CC_SEND 6754 if(m_debug) 6755 std::cout << " <MEMC " << name() 6756 << " CC_SEND_CAS_BRDCAST_NLINE> Broadcast-Inval for address: " 6757 << r_cas_to_cc_send_nline.read()*m_words*4 << std::endl; 6758 #endif 6759 break; 6760 } 6761 ///////////////////////////// 6762 case CC_SEND_CAS_UPDT_HEADER: // send first flit for a multi-update (from CAS FSM) 6763 { 6764 if(m_cas_to_cc_send_inst_fifo.rok()) 6765 { 6766 if(not p_dspin_m2p.read) break; 6767 6768 r_cc_send_fsm = CC_SEND_CAS_UPDT_NLINE; 6769 break; 6770 } 6771 6772 // no more packets to send for the multi-update 6773 if(r_cas_to_cc_send_multi_req.read()) 6774 { 6775 r_cas_to_cc_send_multi_req = false; 6776 } 6777 6778 r_cc_send_fsm = CC_SEND_CAS_IDLE; 6779 break; 6780 } 6781 //////////////////////////// 6782 case CC_SEND_CAS_UPDT_NLINE: // send second flit for a multi-update (from CAS FSM) 6783 { 6784 if(not p_dspin_m2p.read) break; 6785 6786 m_cpt_update_mult++; 6787 6788 r_cc_send_cpt = 0; 6789 r_cc_send_fsm = CC_SEND_CAS_UPDT_DATA; 6790 6791 #if DEBUG_MEMC_CC_SEND 6792 if(m_debug) 6793 std::cout << " <MEMC " << name() 6794 << " CC_SEND_CAS_UPDT_NLINE> Multicast-Update for address " 6795 << r_cas_to_cc_send_nline.read()*m_words*4 << std::endl; 6796 #endif 6797 break; 6798 } 6799 /////////////////////////// 6800 case CC_SEND_CAS_UPDT_DATA: // send first data for a multi-update (from CAS FSM) 6801 { 6802 if(not p_dspin_m2p.read) break; 6803 6804 if(r_cas_to_cc_send_is_long.read()) 6805 { 6806 r_cc_send_fsm = CC_SEND_CAS_UPDT_DATA_HIGH; 6807 break; 6808 } 6809 6810 cas_to_cc_send_fifo_get = true; 6811 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6812 break; 6813 } 6814 //////////////////////////////// 6815 case CC_SEND_CAS_UPDT_DATA_HIGH: // send second data for multi-update (from CAS FSM) 6816 { 6817 if(not p_dspin_m2p.read) break; 6818 cas_to_cc_send_fifo_get = true; 6819 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 6820 break; 6821 } 6822 } 6823 // end switch r_cc_send_fsm 6824 6825 ////////////////////////////////////////////////////////////////////////////// 6826 // CC_RECEIVE FSM 6827 ////////////////////////////////////////////////////////////////////////////// 6828 // The CC_RECEIVE fsm controls the DSPIN target port on the coherence 6829 // network. 6830 ////////////////////////////////////////////////////////////////////////////// 6831 6832 //std::cout << std::endl << "cc_receive_fsm" << std::endl; 6833 6834 switch(r_cc_receive_fsm.read()) 6835 { 6836 ///////////////////// 6837 case CC_RECEIVE_IDLE: 6838 { 6839 if(not p_dspin_p2m.write) break; 6840 6841 uint8_t type = 6842 DspinDhccpParam::dspin_get( 6843 p_dspin_p2m.data.read(), 6844 DspinDhccpParam::P2M_TYPE); 6845 6846 if((type == DspinDhccpParam::TYPE_CLEANUP_DATA) or 6847 (type == DspinDhccpParam::TYPE_CLEANUP_INST)) 6848 { 6849 r_cc_receive_fsm = CC_RECEIVE_CLEANUP; 6850 break; 6851 } 6852 6853 if(type == DspinDhccpParam::TYPE_MULTI_ACK) 6854 { 6855 r_cc_receive_fsm = CC_RECEIVE_MULTI_ACK; 6856 break; 6857 } 6858 6859 assert(false and 6860 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 6861 "Illegal type in coherence request"); 6862 6863 break; 6864 } 6865 //////////////////////// 6866 case CC_RECEIVE_CLEANUP: 6867 { 6868 // write first CLEANUP flit in CC_RECEIVE to CLEANUP fifo 6869 6870 if(not p_dspin_p2m.write or not m_cc_receive_to_cleanup_fifo.wok()) 6871 break; 6872 6873 assert(not p_dspin_p2m.eop.read() and 6874 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 6875 "CLEANUP command must have two flits"); 6876 6877 cc_receive_to_cleanup_fifo_put = true; 6878 r_cc_receive_fsm = CC_RECEIVE_CLEANUP_EOP; 6879 6880 break; 6881 } 6882 //////////////////////////// 6883 case CC_RECEIVE_CLEANUP_EOP: 6884 { 6885 // write second CLEANUP flit in CC_RECEIVE to CLEANUP fifo or more in case of cleanup data (ODCCP) 6886 6887 if(not p_dspin_p2m.write or not m_cc_receive_to_cleanup_fifo.wok()) 6888 break; 6889 6890 cc_receive_to_cleanup_fifo_put = true; 6891 if(p_dspin_p2m.eop.read()) 6892 r_cc_receive_fsm = CC_RECEIVE_IDLE; 6893 6894 break; 6895 } 6896 6897 ////////////////////////// 6898 case CC_RECEIVE_MULTI_ACK: 6899 { 6900 // write MULTI_ACK flit in CC_RECEIVE to MULTI_ACK fifo 6901 6902 // wait for a WOK in the CC_RECEIVE to MULTI_ACK fifo 6903 if(not p_dspin_p2m.write or not m_cc_receive_to_multi_ack_fifo.wok()) 6904 break; 6905 6906 assert(p_dspin_p2m.eop.read() and 6907 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 6908 "MULTI_ACK command must have one flit"); 6909 6910 cc_receive_to_multi_ack_fifo_put = true; 6911 r_cc_receive_fsm = CC_RECEIVE_IDLE; 6912 break; 6913 } 6914 } 6915 6916 ////////////////////////////////////////////////////////////////////////// 6917 // TGT_RSP FSM 6918 ////////////////////////////////////////////////////////////////////////// 6919 // The TGT_RSP fsm sends the responses on the VCI target port 6920 // with a round robin priority between eigth requests : 6921 // - r_config_to_tgt_rsp_req 6922 // - r_tgt_cmd_to_tgt_rsp_req 6923 // - r_read_to_tgt_rsp_req 6924 // - r_write_to_tgt_rsp_req 6925 // - r_cas_to_tgt_rsp_req 6926 // - r_cleanup_to_tgt_rsp_req 6927 // - r_xram_rsp_to_tgt_rsp_req 6928 // - r_multi_ack_to_tgt_rsp_req 6929 // 6930 // The ordering is : 6931 // config >tgt_cmd > read > write > cas > xram > multi_ack > cleanup 6932 ////////////////////////////////////////////////////////////////////////// 6933 6934 //std::cout << std::endl << "tgt_rsp_fsm" << std::endl; 6935 6936 switch(r_tgt_rsp_fsm.read()) 6937 { 6938 ///////////////////////// 6939 case TGT_RSP_CONFIG_IDLE: // tgt_cmd requests have the highest priority 6940 { 6941 if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6942 else if(r_read_to_tgt_rsp_req) 6943 { 6944 r_tgt_rsp_fsm = TGT_RSP_READ; 6945 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6946 } 6947 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6948 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 6949 else if(r_xram_rsp_to_tgt_rsp_req) 6950 { 6951 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6952 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6953 } 6954 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 6955 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6956 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6957 break; 6958 } 6959 ////////////////////////// 6960 case TGT_RSP_TGT_CMD_IDLE: // read requests have the highest priority 6961 { 6962 if(r_read_to_tgt_rsp_req) 6963 { 6964 r_tgt_rsp_fsm = TGT_RSP_READ; 6965 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6966 } 6967 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6968 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 6969 else if(r_xram_rsp_to_tgt_rsp_req) 6970 { 6971 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6972 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6973 } 6974 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 6975 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6976 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6977 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6978 break; 6979 } 6980 /////////////////////// 6981 case TGT_RSP_READ_IDLE: // write requests have the highest priority 6982 { 6983 if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 6984 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 6985 else if(r_xram_rsp_to_tgt_rsp_req) 6986 { 6987 r_tgt_rsp_fsm = TGT_RSP_XRAM; 6988 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 6989 } 6990 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 6991 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 6992 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 6993 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 6994 else if(r_read_to_tgt_rsp_req) 6995 { 6996 r_tgt_rsp_fsm = TGT_RSP_READ; 6997 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 6998 } 6999 break; 7000 } 7001 //////////////////////// 7002 case TGT_RSP_WRITE_IDLE: // cas requests have the highest priority 7003 { 7004 if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 7005 else if(r_xram_rsp_to_tgt_rsp_req) 7006 { 7007 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7008 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7009 } 7010 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 7011 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7012 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7013 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7014 else if(r_read_to_tgt_rsp_req) 7015 { 7016 r_tgt_rsp_fsm = TGT_RSP_READ; 7017 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7018 } 7019 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7020 break; 7021 } 7022 /////////////////////// 7023 case TGT_RSP_CAS_IDLE: // xram_rsp requests have the highest priority 7024 { 7025 if(r_xram_rsp_to_tgt_rsp_req) 7026 { 7027 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7028 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7029 } 7030 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK ; 7031 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7032 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7033 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7034 else if(r_read_to_tgt_rsp_req) 7035 { 7036 r_tgt_rsp_fsm = TGT_RSP_READ; 7037 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7038 } 7039 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7040 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 7041 break; 7042 } 7043 /////////////////////// 7044 case TGT_RSP_XRAM_IDLE: // multi ack requests have the highest priority 7045 { 7046 7047 if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK ; 7048 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7049 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7050 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7051 else if(r_read_to_tgt_rsp_req) 7052 { 7053 r_tgt_rsp_fsm = TGT_RSP_READ; 7054 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7055 } 7056 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7057 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 7058 else if(r_xram_rsp_to_tgt_rsp_req) 7059 { 7060 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7061 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7062 } 7063 break; 7064 } 7065 //////////////////////////// 7066 case TGT_RSP_MULTI_ACK_IDLE: // cleanup requests have the highest priority 7067 { 7068 if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7069 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7070 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7071 else if(r_read_to_tgt_rsp_req) 7072 { 7073 r_tgt_rsp_fsm = TGT_RSP_READ; 7074 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7075 } 7076 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7077 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 7078 else if(r_xram_rsp_to_tgt_rsp_req) 7079 { 7080 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7081 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7082 } 7083 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 7084 break; 7085 } 7086 ////////////////////////// 7087 case TGT_RSP_CLEANUP_IDLE: // tgt cmd requests have the highest priority 7088 { 7089 if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 7090 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 7091 else if(r_read_to_tgt_rsp_req) 7092 { 7093 r_tgt_rsp_fsm = TGT_RSP_READ; 7094 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 7095 } 7096 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 7097 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS ; 7098 else if(r_xram_rsp_to_tgt_rsp_req) 7099 { 7100 r_tgt_rsp_fsm = TGT_RSP_XRAM; 7101 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 7102 } 7103 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK ; 7104 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 7105 break; 7106 } 7107 //////////////////// 7108 case TGT_RSP_CONFIG: // send the response for a config transaction 7109 { 7110 if ( p_vci_tgt.rspack ) 7111 { 7112 r_config_to_tgt_rsp_req = false; 7113 r_tgt_rsp_fsm = TGT_RSP_CONFIG_IDLE; 7114 7115 #if DEBUG_MEMC_TGT_RSP 7116 if( m_debug ) 7117 { 7118 std::cout 7119 << " <MEMC " << name() 7120 << " TGT_RSP_CONFIG> Config transaction completed response" 7121 << " / rsrcid = " << std::hex << r_config_to_tgt_rsp_srcid.read() 7122 << " / rtrdid = " << r_config_to_tgt_rsp_trdid.read() 7123 << " / rpktid = " << r_config_to_tgt_rsp_pktid.read() 7124 << std::endl; 7125 } 7126 #endif 7127 } 7128 break; 7129 } 7130 ///////////////////// 7131 case TGT_RSP_TGT_CMD: // send the response for a configuration access 7132 { 7133 if ( p_vci_tgt.rspack ) 7134 { 7135 r_tgt_cmd_to_tgt_rsp_req = false; 7136 r_tgt_rsp_fsm = TGT_RSP_TGT_CMD_IDLE; 7137 7138 #if DEBUG_MEMC_TGT_RSP 7139 if( m_debug ) 7140 { 7141 std::cout 7142 << " <MEMC " << name() 7143 << " TGT_RSP_TGT_CMD> Send response for a configuration access" 7144 << " / rsrcid = " << std::hex << r_tgt_cmd_to_tgt_rsp_srcid.read() 7145 << " / rtrdid = " << r_tgt_cmd_to_tgt_rsp_trdid.read() 7146 << " / rpktid = " << r_tgt_cmd_to_tgt_rsp_pktid.read() 7147 << " / error = " << r_tgt_cmd_to_tgt_rsp_error.read() 7148 << std::endl; 7149 } 7150 #endif 7151 } 7152 break; 7153 } 7154 ////////////////// 7155 case TGT_RSP_READ: // send the response to a read 7156 { 7157 if ( p_vci_tgt.rspack ) 7158 { 7159 7160 #if DEBUG_MEMC_TGT_RSP 7161 if( m_debug ) 7162 { 7163 std::cout 7164 << " <MEMC " << name() << " TGT_RSP_READ> Read response" 7165 << " / rsrcid = " << std::hex << r_read_to_tgt_rsp_srcid.read() 7166 << " / rtrdid = " << r_read_to_tgt_rsp_trdid.read() 7167 << " / rpktid = " << r_read_to_tgt_rsp_pktid.read() 7168 << " / rdata = " << r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() 7169 << " / cpt = " << std::dec << r_tgt_rsp_cpt.read() << std::endl; 7170 } 7171 #endif 7172 7173 uint32_t last_word_idx = r_read_to_tgt_rsp_word.read() + 7174 r_read_to_tgt_rsp_length.read() - 1; 7175 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 7176 bool is_ll = ((r_read_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 7177 7178 if ((is_last_word and not is_ll) or 7179 (r_tgt_rsp_key_sent.read() and is_ll)) 7180 { 7181 // Last word in case of READ or second flit in case if LL 7182 r_tgt_rsp_key_sent = false; 7183 r_read_to_tgt_rsp_req = false; 7184 r_tgt_rsp_fsm = TGT_RSP_READ_IDLE; 7185 } 7186 else 7187 { 7188 if (is_ll) 7189 { 7190 r_tgt_rsp_key_sent = true; // Send second flit of ll 7191 } 7192 else 7193 { 7194 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; // Send next word of read 7195 } 7196 } 7197 } 7198 break; 7199 } 7200 ////////////////// 7201 case TGT_RSP_WRITE: // send the write acknowledge 7202 { 7203 if(p_vci_tgt.rspack) 7204 { 7205 7206 #if DEBUG_MEMC_TGT_RSP 7207 if(m_debug) 7208 std::cout << " <MEMC " << name() << " TGT_RSP_WRITE> Write response" 7209 << " / rsrcid = " << std::hex << r_write_to_tgt_rsp_srcid.read() 7210 << " / rtrdid = " << r_write_to_tgt_rsp_trdid.read() 7211 << " / rpktid = " << r_write_to_tgt_rsp_pktid.read() << std::endl; 7212 #endif 7213 r_tgt_rsp_fsm = TGT_RSP_WRITE_IDLE; 7214 r_write_to_tgt_rsp_req = false; 7215 } 7216 break; 7217 } 7218 ///////////////////// 7219 case TGT_RSP_CLEANUP: // pas clair pour moi (AG) 7220 { 7221 if(p_vci_tgt.rspack) 7222 { 7223 7224 #if DEBUG_MEMC_TGT_RSP 7225 if(m_debug) 7226 std::cout << " <MEMC " << name() << " TGT_RSP_CLEANUP> Cleanup response" 7227 << " / rsrcid = " << std::hex << r_cleanup_to_tgt_rsp_srcid.read() 7228 << " / rtrdid = " << r_cleanup_to_tgt_rsp_trdid.read() 7229 << " / rpktid = " << r_cleanup_to_tgt_rsp_pktid.read() << std::endl; 7230 #endif 7231 r_tgt_rsp_fsm = TGT_RSP_CLEANUP_IDLE; 7232 r_cleanup_to_tgt_rsp_req = false; 7233 } 7234 break; 7235 } 7236 ///////////////// 7237 case TGT_RSP_CAS: // send one atomic word response 7238 { 7239 if(p_vci_tgt.rspack) 7240 { 7241 7242 #if DEBUG_MEMC_TGT_RSP 7243 if(m_debug) 7244 std::cout << " <MEMC " << name() << " TGT_RSP_CAS> CAS response" 7245 << " / rsrcid = " << std::hex << r_cas_to_tgt_rsp_srcid.read() 7246 << " / rtrdid = " << r_cas_to_tgt_rsp_trdid.read() 7247 << " / rpktid = " << r_cas_to_tgt_rsp_pktid.read() << std::endl; 7248 #endif 7249 r_tgt_rsp_fsm = TGT_RSP_CAS_IDLE; 7250 r_cas_to_tgt_rsp_req = false; 7251 } 7252 break; 7253 } 7254 ////////////////// 7255 case TGT_RSP_XRAM: // send the response after XRAM access 7256 { 7257 if ( p_vci_tgt.rspack ) 7258 { 7259 7260 #if DEBUG_MEMC_TGT_RSP 7261 if( m_debug ) 7262 std::cout << " <MEMC " << name() << " TGT_RSP_XRAM> Response following XRAM access" 7263 << " / rsrcid = " << std::hex << r_xram_rsp_to_tgt_rsp_srcid.read() 7264 << " / rtrdid = " << r_xram_rsp_to_tgt_rsp_trdid.read() 7265 << " / rpktid = " << r_xram_rsp_to_tgt_rsp_pktid.read() 7266 << " / rdata = " << r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() 7267 << " / cpt = " << std::dec << r_tgt_rsp_cpt.read() << std::endl; 7268 #endif 7269 uint32_t last_word_idx = r_xram_rsp_to_tgt_rsp_word.read() + 7270 r_xram_rsp_to_tgt_rsp_length.read() - 1; 7271 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 7272 bool is_ll = ((r_xram_rsp_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 7273 bool is_error = r_xram_rsp_to_tgt_rsp_rerror.read(); 7274 7275 if (((is_last_word or is_error) and not is_ll) or 7276 (r_tgt_rsp_key_sent.read() and is_ll)) 7277 { 7278 // Last word sent in case of READ or second flit sent in case if LL 7279 r_tgt_rsp_key_sent = false; 7280 r_xram_rsp_to_tgt_rsp_req = false; 7281 r_tgt_rsp_fsm = TGT_RSP_XRAM_IDLE; 7282 } 7283 else 7284 { 7285 if (is_ll) 7286 { 7287 r_tgt_rsp_key_sent = true; // Send second flit of ll 7288 } 7289 else 7290 { 7291 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; // Send next word of read 7292 } 7293 } 7294 } 7295 break; 7296 } 7297 /////////////////////// 7298 case TGT_RSP_MULTI_ACK: // send the write response after coherence transaction 7299 { 7300 if(p_vci_tgt.rspack) 7301 { 7302 7303 #if DEBUG_MEMC_TGT_RSP 7304 if(m_debug) 7305 std::cout << " <MEMC " << name() << " TGT_RSP_MULTI_ACK> Write response after coherence transaction" 7306 << " / rsrcid = " << std::hex << r_multi_ack_to_tgt_rsp_srcid.read() 7307 << " / rtrdid = " << r_multi_ack_to_tgt_rsp_trdid.read() 7308 << " / rpktid = " << r_multi_ack_to_tgt_rsp_pktid.read() << std::endl; 7309 #endif 7310 r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK_IDLE; 7311 r_multi_ack_to_tgt_rsp_req = false; 7312 } 7313 break; 7314 } 7315 } // end switch tgt_rsp_fsm 7316 7317 //////////////////////////////////////////////////////////////////////////////////// 7318 // ALLOC_UPT FSM 7319 //////////////////////////////////////////////////////////////////////////////////// 7320 // The ALLOC_UPT FSM allocates the access to the Update Table (UPT), 7321 // with a round robin priority between three FSMs, with the following order: 7322 // WRITE -> CAS -> MULTI_ACK 7323 // - The WRITE FSM initiates update transaction and sets a new entry in UPT. 7324 // - The CAS FSM does the same thing as the WRITE FSM. 7325 // - The MULTI_ACK FSM complete those trasactions and erase the UPT entry. 7326 // The resource is always allocated. 7327 ///////////////////////////////////////////////////////////////////////////////////// 7328 7329 //std::cout << std::endl << "alloc_upt_fsm" << std::endl; 7330 7331 switch(r_alloc_upt_fsm.read()) 7332 { 7333 ///////////////////////// 7334 case ALLOC_UPT_WRITE: // allocated to WRITE FSM 7335 if (r_write_fsm.read() != WRITE_UPT_LOCK) 7336 { 7337 if (r_cas_fsm.read() == CAS_UPT_LOCK) 7338 r_alloc_upt_fsm = ALLOC_UPT_CAS; 7339 7340 else if (r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK) 7341 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK; 7342 } 7343 break; 7344 7345 ///////////////////////// 7346 case ALLOC_UPT_CAS: // allocated to CAS FSM 7347 if (r_cas_fsm.read() != CAS_UPT_LOCK) 7348 { 7349 if (r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK) 7350 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK; 7351 7352 else if (r_write_fsm.read() == WRITE_UPT_LOCK) 7353 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 7354 } 7355 break; 7356 7357 ///////////////////////// 7358 case ALLOC_UPT_MULTI_ACK: // allocated to MULTI_ACK FSM 7359 if ((r_multi_ack_fsm.read() != MULTI_ACK_UPT_LOCK ) and 7360 (r_multi_ack_fsm.read() != MULTI_ACK_UPT_CLEAR)) 7361 { 7362 if (r_write_fsm.read() == WRITE_UPT_LOCK) 7363 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 7364 7365 else if (r_cas_fsm.read() == CAS_UPT_LOCK) 7366 r_alloc_upt_fsm = ALLOC_UPT_CAS; 7367 } 7368 break; 7369 } // end switch r_alloc_upt_fsm 7370 7371 //////////////////////////////////////////////////////////////////////////////////// 7372 // ALLOC_IVT FSM 7373 //////////////////////////////////////////////////////////////////////////////////// 7374 // The ALLOC_IVT FSM allocates the access to the Invalidate Table (IVT), 7375 // with a round robin priority between five FSMs, with the following order: 7376 // WRITE -> XRAM_RSP -> CLEANUP -> CAS -> CONFIG 7377 // - The WRITE FSM initiates broadcast invalidate transactions and sets a new entry 7378 // in IVT. 7379 // - The CAS FSM does the same thing as the WRITE FSM. 7380 // - The XRAM_RSP FSM initiates broadcast/multicast invalidate transaction and sets 7381 // a new entry in the IVT 7382 // - The CONFIG FSM does the same thing as the XRAM_RSP FSM 7383 // - The CLEANUP FSM complete those trasactions and erase the IVT entry. 7384 // The resource is always allocated. 7385 ///////////////////////////////////////////////////////////////////////////////////// 7386 7387 //std::cout << std::endl << "alloc_ivt_fsm" << std::endl; 7388 7389 switch(r_alloc_ivt_fsm.read()) 7390 { 7391 ///////////////////// 7392 case ALLOC_IVT_WRITE: // allocated to WRITE FSM 7393 if (r_write_fsm.read() != WRITE_BC_IVT_LOCK) 7394 { 7395 if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 7396 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 7397 7398 else if (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 7399 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 7400 7401 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 7402 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 7403 7404 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 7405 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 7406 7407 else 7408 m_cpt_ivt_unused++; 7409 } 7410 break; 7411 7412 //////////////////////// 7413 case ALLOC_IVT_XRAM_RSP: // allocated to XRAM_RSP FSM 7414 if(r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK) 7415 { 7416 if(r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 7417 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 7418 7419 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 7420 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 7421 7422 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 7423 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 7424 7425 else if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 7426 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 7427 7428 else 7429 m_cpt_ivt_unused++; 7430 } 7431 break; 7432 7433 /////////////////////// 7434 case ALLOC_IVT_CLEANUP: // allocated to CLEANUP FSM 7435 if ((r_cleanup_fsm.read() != CLEANUP_IVT_LOCK ) and 7436 (r_cleanup_fsm.read() != CLEANUP_IVT_DECREMENT)) 7437 { 7438 if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 7439 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 7440 7441 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 7442 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 7443 7444 else if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 7445 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 7446 7447 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 7448 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 7449 } 7450 break; 7451 7452 ////////////////////////// 7453 case ALLOC_IVT_CAS: // allocated to CAS FSM 7454 if (r_cas_fsm.read() != CAS_BC_IVT_LOCK) 7455 { 7456 if (r_config_fsm.read() == CONFIG_IVT_LOCK) 7457 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 7458 7459 else if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 7460 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 7461 7462 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 7463 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 7464 7465 else if (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 7466 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 7467 7468 } 7469 break; 7470 7471 ////////////////////////// 7472 case ALLOC_IVT_CONFIG: // allocated to CONFIG FSM 7473 if (r_config_fsm.read() != CONFIG_IVT_LOCK) 7474 { 7475 if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 7476 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 7477 7478 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 7479 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 7480 7481 else if (r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) 7482 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 7483 7484 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 7485 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 7486 7487 } 7488 break; 7489 7490 } // end switch r_alloc_ivt_fsm 7491 7492 //////////////////////////////////////////////////////////////////////////////////// 7493 // ALLOC_DIR FSM 7494 //////////////////////////////////////////////////////////////////////////////////// 7495 // The ALLOC_DIR FSM allocates the access to the directory and 7496 // the data cache with a round robin priority between 6 user FSMs : 7497 // The cyclic ordering is CONFIG > READ > WRITE > CAS > CLEANUP > XRAM_RSP 7498 // The ressource is always allocated. 7499 ///////////////////////////////////////////////////////////////////////////////////// 7500 7501 //std::cout << std::endl << "alloc_dir_fsm" << std::endl; 7502 7503 switch(r_alloc_dir_fsm.read()) 7504 { 7505 ///////////////////// 7506 case ALLOC_DIR_RESET: // Initializes the directory one SET per cycle. 7507 // All the WAYS of a SET initialized in parallel 7508 7509 r_alloc_dir_reset_cpt.write(r_alloc_dir_reset_cpt.read() + 1); 7510 7511 if(r_alloc_dir_reset_cpt.read() == (m_sets - 1)) 7512 { 7513 m_cache_directory.init(); 7514 r_alloc_dir_fsm = ALLOC_DIR_READ; 7515 } 7516 break; 7517 7518 ////////////////////// 7519 case ALLOC_DIR_CONFIG: // allocated to CONFIG FSM 7520 if ( (r_config_fsm.read() != CONFIG_DIR_REQ) and 7521 (r_config_fsm.read() != CONFIG_DIR_ACCESS) and 7522 (r_config_fsm.read() != CONFIG_TRT_LOCK) and 7523 (r_config_fsm.read() != CONFIG_TRT_SET) and 7524 (r_config_fsm.read() != CONFIG_IVT_LOCK) ) 7525 { 7526 if(r_read_fsm.read() == READ_DIR_REQ) 7527 r_alloc_dir_fsm = ALLOC_DIR_READ; 7528 7529 else if(r_write_fsm.read() == WRITE_DIR_REQ) 7530 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 7531 7532 else if(r_cas_fsm.read() == CAS_DIR_REQ) 7533 r_alloc_dir_fsm = ALLOC_DIR_CAS; 7534 7535 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 7536 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 7537 7538 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 7539 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 7540 } 7541 break; 7542 7543 //////////////////// 7544 case ALLOC_DIR_READ: // allocated to READ FSM 7545 if( ((r_read_fsm.read() != READ_DIR_REQ) and 7546 (r_read_fsm.read() != READ_DIR_LOCK) and 7547 (r_read_fsm.read() != READ_TRT_LOCK) and 7548 (r_read_fsm.read() != READ_HEAP_REQ)) 7549 or 7550 ((r_read_fsm.read() == READ_TRT_LOCK) and 7551 (r_alloc_trt_fsm.read() == ALLOC_TRT_READ)) ) 7552 { 7553 if(r_write_fsm.read() == WRITE_DIR_REQ) 7554 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 7555 7556 else if(r_cas_fsm.read() == CAS_DIR_REQ) 7557 r_alloc_dir_fsm = ALLOC_DIR_CAS; 7558 7559 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 7560 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 7561 7562 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 7563 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 7564 7565 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 7566 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 7567 7568 else 7569 m_cpt_dir_unused++; 7570 } 7571 else 7572 m_cpt_read_fsm_dir_used++; 7573 break; 7574 7575 ///////////////////// 7576 case ALLOC_DIR_WRITE: // allocated to WRITE FSM 7577 if(((r_write_fsm.read() != WRITE_DIR_REQ) and 7578 (r_write_fsm.read() != WRITE_DIR_LOCK) and 7579 (r_write_fsm.read() != WRITE_BC_DIR_READ) and 7580 (r_write_fsm.read() != WRITE_DIR_HIT) and 7581 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 7582 (r_write_fsm.read() != WRITE_BC_IVT_LOCK) and 7583 (r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 7584 (r_write_fsm.read() != WRITE_UPT_LOCK) and 7585 (r_write_fsm.read() != WRITE_UPT_HEAP_LOCK)) 7586 or 7587 ((r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) and 7588 (r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE)) 7589 or 7590 ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) and 7591 (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE))) 7592 { 7593 if(r_cas_fsm.read() == CAS_DIR_REQ) 7594 r_alloc_dir_fsm = ALLOC_DIR_CAS; 7595 7596 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 7597 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 7598 7599 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 7600 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 7601 7602 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 7603 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 7604 7605 else if(r_read_fsm.read() == READ_DIR_REQ) 7606 r_alloc_dir_fsm = ALLOC_DIR_READ; 7607 7608 else 7609 m_cpt_dir_unused++; 7610 } 7611 else 7612 m_cpt_write_fsm_dir_used++; 7613 break; 7614 7615 /////////////////// 7616 case ALLOC_DIR_CAS: // allocated to CAS FSM 7617 if(((r_cas_fsm.read() != CAS_DIR_REQ) and 7618 (r_cas_fsm.read() != CAS_DIR_LOCK) and 7619 (r_cas_fsm.read() != CAS_DIR_HIT_READ) and 7620 (r_cas_fsm.read() != CAS_DIR_HIT_COMPARE) and 7621 (r_cas_fsm.read() != CAS_DIR_HIT_WRITE) and 7622 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 7623 (r_cas_fsm.read() != CAS_BC_IVT_LOCK) and 7624 (r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 7625 (r_cas_fsm.read() != CAS_UPT_LOCK) and 7626 (r_cas_fsm.read() != CAS_UPT_HEAP_LOCK)) 7627 or 7628 ((r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) and 7629 (r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS)) 7630 or 7631 ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) and 7632 (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS))) 7633 { 7634 if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 7635 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 7636 7637 else if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 7638 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 7639 7640 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 7641 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 7642 7643 else if(r_read_fsm.read() == READ_DIR_REQ) 7644 r_alloc_dir_fsm = ALLOC_DIR_READ; 7645 7646 else if(r_write_fsm.read() == WRITE_DIR_REQ) 7647 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 7648 7649 else 7650 m_cpt_dir_unused++; 7651 } 7652 else 7653 m_cpt_cas_fsm_dir_used++; 7654 break; 7655 7656 /////////////////////// 7657 case ALLOC_DIR_CLEANUP: // allocated to CLEANUP FSM 7658 if((r_cleanup_fsm.read() != CLEANUP_DIR_REQ) and 7659 (r_cleanup_fsm.read() != CLEANUP_DIR_LOCK) and 7660 (r_cleanup_fsm.read() != CLEANUP_HEAP_REQ) and 7661 (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK)) 7662 { 7663 if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 7664 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 7665 7666 else if(r_config_fsm.read() == CONFIG_DIR_REQ) 7667 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 7668 7669 else if(r_read_fsm.read() == READ_DIR_REQ) 7670 r_alloc_dir_fsm = ALLOC_DIR_READ; 7671 7672 else if(r_write_fsm.read() == WRITE_DIR_REQ) 7673 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 7674 7675 else if(r_cas_fsm.read() == CAS_DIR_REQ) 7676 r_alloc_dir_fsm = ALLOC_DIR_CAS; 7677 7678 else 7679 m_cpt_dir_unused++; 7680 } 7681 else 7682 m_cpt_cleanup_fsm_dir_used++; 7683 break; 7684 7685 //////////////////////// 7686 case ALLOC_DIR_XRAM_RSP: // allocated to XRAM_RSP FSM 7687 if( (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) and 7688 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) and 7689 (r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK)) 7690 { 7691 if(r_config_fsm.read() == CONFIG_DIR_REQ) 7692 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 7693 7694 else if(r_read_fsm.read() == READ_DIR_REQ) 7695 r_alloc_dir_fsm = ALLOC_DIR_READ; 7696 7697 else if(r_write_fsm.read() == WRITE_DIR_REQ) 7698 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 7699 7700 else if(r_cas_fsm.read() == CAS_DIR_REQ) 7701 r_alloc_dir_fsm = ALLOC_DIR_CAS; 7702 7703 else if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 7704 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 7705 7706 else 7707 m_cpt_dir_unused++; 7708 } 7709 else 7710 m_cpt_xram_rsp_fsm_dir_used++; 7711 break; 7712 7713 } // end switch alloc_dir_fsm 7714 7715 //////////////////////////////////////////////////////////////////////////////////// 7716 // ALLOC_TRT FSM 7717 //////////////////////////////////////////////////////////////////////////////////// 7718 // The ALLOC_TRT fsm allocates the access to the Transaction Table (write buffer) 7719 // with a round robin priority between 7 user FSMs : 7720 // The priority is READ > WRITE > CAS > IXR_CMD > XRAM_RSP > IXR_RSP > CONFIG 7721 // The ressource is always allocated. 7722 /////////////////////////////////////////////////////////////////////////////////// 7723 7724 //std::cout << std::endl << "alloc_trt_fsm" << std::endl; 7725 7726 switch(r_alloc_trt_fsm.read()) 7727 { 7728 //////////////////// 7729 case ALLOC_TRT_READ: 7730 if(r_read_fsm.read() != READ_TRT_LOCK) 7731 { 7732 if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7733 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7734 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7735 7736 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7737 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7738 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7739 7740 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7741 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7742 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7743 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7744 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 7745 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7746 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7747 7748 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7749 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7750 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7751 7752 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7753 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7754 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7755 7756 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7757 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7758 7759 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 7760 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 7761 } 7762 break; 7763 7764 ///////////////////// 7765 case ALLOC_TRT_WRITE: 7766 if((r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 7767 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 7768 (r_write_fsm.read() != WRITE_BC_IVT_LOCK)) 7769 { 7770 if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7771 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7772 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7773 7774 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7775 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7776 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7777 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7778 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 7779 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7780 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7781 7782 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7783 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7784 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7785 7786 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7787 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7788 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7789 7790 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7791 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7792 7793 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 7794 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 7795 7796 else if(r_read_fsm.read() == READ_TRT_LOCK) 7797 r_alloc_trt_fsm = ALLOC_TRT_READ; 7798 } 7799 break; 7800 /////////////////// 7801 case ALLOC_TRT_CAS: 7802 if((r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 7803 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 7804 (r_cas_fsm.read() != CAS_BC_IVT_LOCK)) 7805 { 7806 if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7807 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7808 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7809 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7810 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 7811 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7812 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7813 7814 if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7815 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7816 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7817 7818 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7819 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7820 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7821 7822 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7823 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7824 7825 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 7826 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 7827 7828 else if(r_read_fsm.read() == READ_TRT_LOCK) 7829 r_alloc_trt_fsm = ALLOC_TRT_READ; 7830 7831 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7832 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7833 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7834 } 7835 break; 7836 /////////////////////// 7837 case ALLOC_TRT_IXR_CMD: 7838 if((r_ixr_cmd_fsm.read() != IXR_CMD_READ_TRT) and 7839 (r_ixr_cmd_fsm.read() != IXR_CMD_WRITE_TRT) and 7840 (r_ixr_cmd_fsm.read() != IXR_CMD_CAS_TRT) and 7841 (r_ixr_cmd_fsm.read() != IXR_CMD_XRAM_TRT) and 7842 (r_ixr_cmd_fsm.read() != IXR_CMD_CLEANUP_TRT) and 7843 (r_ixr_cmd_fsm.read() != IXR_CMD_CONFIG_TRT)) 7844 { 7845 if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7846 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7847 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7848 7849 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7850 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7851 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7852 7853 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7854 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7855 7856 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 7857 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 7858 7859 else if(r_read_fsm.read() == READ_TRT_LOCK) 7860 r_alloc_trt_fsm = ALLOC_TRT_READ; 7861 7862 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7863 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7864 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7865 7866 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7867 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7868 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7869 } 7870 break; 7871 //////////////////////// 7872 case ALLOC_TRT_XRAM_RSP: 7873 if(((r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) or 7874 (r_alloc_dir_fsm.read() != ALLOC_DIR_XRAM_RSP)) and 7875 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) and 7876 (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_UPDT) and 7877 (r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK)) 7878 { 7879 if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7880 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7881 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7882 7883 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7884 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7885 7886 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 7887 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 7888 7889 else if(r_read_fsm.read() == READ_TRT_LOCK) 7890 r_alloc_trt_fsm = ALLOC_TRT_READ; 7891 7892 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7893 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7894 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7895 7896 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7897 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7898 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7899 7900 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7901 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7902 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7903 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7904 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 7905 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7906 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7907 7908 } 7909 break; 7910 /////////////////////// 7911 case ALLOC_TRT_IXR_RSP: 7912 if((r_ixr_rsp_fsm.read() != IXR_RSP_TRT_ERASE) and 7913 (r_ixr_rsp_fsm.read() != IXR_RSP_TRT_READ)) 7914 { 7915 if(r_config_fsm.read() == CONFIG_TRT_LOCK) 7916 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7917 7918 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 7919 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 7920 7921 else if(r_read_fsm.read() == READ_TRT_LOCK) 7922 r_alloc_trt_fsm = ALLOC_TRT_READ; 7923 7924 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7925 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7926 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7927 7928 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7929 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7930 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7931 7932 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7933 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7934 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7935 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7936 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 7937 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7938 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7939 7940 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7941 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7942 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7943 } 7944 break; 7945 ////////////////////// 7946 case ALLOC_TRT_CONFIG: 7947 if((r_config_fsm.read() != CONFIG_TRT_LOCK) and 7948 (r_config_fsm.read() != CONFIG_TRT_SET)) 7949 { 7950 if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 7951 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 7952 7953 else if(r_read_fsm.read() == READ_TRT_LOCK) 7954 r_alloc_trt_fsm = ALLOC_TRT_READ; 7955 7956 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7957 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7958 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7959 7960 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7961 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7962 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7963 7964 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7965 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7966 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7967 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7968 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 7969 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7970 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7971 7972 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7973 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7974 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7975 7976 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7977 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7978 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7979 7980 } 7981 break; 7982 7983 //////////////////////// 7984 case ALLOC_TRT_CLEANUP: 7985 if(r_cleanup_fsm.read() != CLEANUP_IXR_REQ) 7986 { 7987 if(r_read_fsm.read() == READ_TRT_LOCK) 7988 r_alloc_trt_fsm = ALLOC_TRT_READ; 7989 7990 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7991 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7992 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7993 7994 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7995 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7996 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7997 7998 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7999 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 8000 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 8001 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 8002 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 8003 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 8004 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 8005 8006 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 8007 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 8008 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 8009 8010 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 8011 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 8012 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 8013 8014 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 8015 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 8016 } 8017 break; 8018 8019 8020 } // end switch alloc_trt_fsm 8021 8022 //////////////////////////////////////////////////////////////////////////////////// 8023 // ALLOC_HEAP FSM 8024 //////////////////////////////////////////////////////////////////////////////////// 8025 // The ALLOC_HEAP FSM allocates the access to the heap 8026 // with a round robin priority between 6 user FSMs : 8027 // The cyclic ordering is READ > WRITE > CAS > CLEANUP > XRAM_RSP > CONFIG 8028 // The ressource is always allocated. 8029 ///////////////////////////////////////////////////////////////////////////////////// 8030 8031 //std::cout << std::endl << "alloc_heap_fsm" << std::endl; 8032 8033 switch(r_alloc_heap_fsm.read()) 8034 { 8035 //////////////////// 8036 case ALLOC_HEAP_RESET: 8037 // Initializes the heap one ENTRY each cycle. 8038 8039 r_alloc_heap_reset_cpt.write(r_alloc_heap_reset_cpt.read() + 1); 8040 8041 if(r_alloc_heap_reset_cpt.read() == (m_heap_size-1)) 8042 { 8043 m_heap.init(); 8044 8045 r_alloc_heap_fsm = ALLOC_HEAP_READ; 8046 } 8047 break; 8048 8049 //////////////////// 8050 case ALLOC_HEAP_READ: 8051 if((r_read_fsm.read() != READ_HEAP_REQ) and 8052 (r_read_fsm.read() != READ_HEAP_LOCK) and 8053 (r_read_fsm.read() != READ_HEAP_ERASE)) 8054 { 8055 if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 8056 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 8057 8058 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 8059 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 8060 8061 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 8062 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 8063 8064 else if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 8065 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 8066 8067 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 8068 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 8069 else 8070 m_cpt_heap_unused++; 8071 } 8072 else 8073 m_cpt_read_fsm_heap_used++; 8074 break; 8075 8076 ///////////////////// 8077 case ALLOC_HEAP_WRITE: 8078 if((r_write_fsm.read() != WRITE_UPT_HEAP_LOCK) and 8079 (r_write_fsm.read() != WRITE_UPT_REQ) and 8080 (r_write_fsm.read() != WRITE_UPT_NEXT)) 8081 { 8082 if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 8083 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 8084 8085 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 8086 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 8087 8088 else if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 8089 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 8090 8091 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 8092 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 8093 8094 else if(r_read_fsm.read() == READ_HEAP_REQ) 8095 r_alloc_heap_fsm = ALLOC_HEAP_READ; 8096 8097 else 8098 m_cpt_heap_unused++; 8099 } 8100 else 8101 m_cpt_write_fsm_heap_used++; 8102 break; 8103 8104 //////////////////// 8105 case ALLOC_HEAP_CAS: 8106 if((r_cas_fsm.read() != CAS_UPT_HEAP_LOCK) and 8107 (r_cas_fsm.read() != CAS_UPT_REQ) and 8108 (r_cas_fsm.read() != CAS_UPT_NEXT)) 8109 { 8110 if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 8111 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 8112 8113 else if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 8114 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 8115 8116 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 8117 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 8118 8119 else if(r_read_fsm.read() == READ_HEAP_REQ) 8120 r_alloc_heap_fsm = ALLOC_HEAP_READ; 8121 8122 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 8123 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 8124 8125 else 8126 m_cpt_heap_unused++; 8127 } 8128 else 8129 m_cpt_cas_fsm_heap_used++; 8130 break; 8131 8132 /////////////////////// 8133 case ALLOC_HEAP_CLEANUP: 8134 if((r_cleanup_fsm.read() != CLEANUP_HEAP_REQ) and 8135 (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK) and 8136 (r_cleanup_fsm.read() != CLEANUP_HEAP_SEARCH) and 8137 (r_cleanup_fsm.read() != CLEANUP_HEAP_CLEAN)) 8138 { 8139 if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 8140 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 8141 8142 else if(r_config_fsm.read() == CONFIG_HEAP_REQ) 8143 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 8144 8145 else if(r_read_fsm.read() == READ_HEAP_REQ) 8146 r_alloc_heap_fsm = ALLOC_HEAP_READ; 8147 8148 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 8149 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 8150 8151 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 8152 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 8153 8154 else 8155 m_cpt_heap_unused++; 8156 } 8157 else 8158 m_cpt_cleanup_fsm_heap_used++; 8159 break; 8160 8161 //////////////////////// 8162 case ALLOC_HEAP_XRAM_RSP: 8163 if((r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_REQ) and 8164 (r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_ERASE)) 8165 { 8166 if(r_config_fsm.read() == CONFIG_HEAP_REQ) 8167 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 8168 8169 else if(r_read_fsm.read() == READ_HEAP_REQ) 8170 r_alloc_heap_fsm = ALLOC_HEAP_READ; 8171 8172 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 8173 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 8174 8175 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 8176 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 8177 8178 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 8179 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 8180 8181 } 8182 break; 8183 8184 /////////////////////// 8185 case ALLOC_HEAP_CONFIG: 8186 if((r_config_fsm.read() != CONFIG_HEAP_REQ) and 8187 (r_config_fsm.read() != CONFIG_HEAP_SCAN)) 8188 { 8189 if(r_read_fsm.read() == READ_HEAP_REQ) 8190 r_alloc_heap_fsm = ALLOC_HEAP_READ; 8191 8192 else if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 8193 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 8194 8195 else if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 8196 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 8197 8198 else if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 8199 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 8200 8201 if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 8202 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 8203 else 8204 m_cpt_heap_unused++; 8205 } 8206 else 8207 m_cpt_xram_rsp_fsm_heap_used++; 8208 break; 8209 8210 } // end switch alloc_heap_fsm 8211 8212 //std::cout << std::endl << "fifo_update" << std::endl; 8213 8214 ///////////////////////////////////////////////////////////////////// 8215 // TGT_CMD to READ FIFO 8216 ///////////////////////////////////////////////////////////////////// 8217 8218 m_cmd_read_addr_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 8219 p_vci_tgt.address.read() ); 8220 m_cmd_read_length_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 8221 p_vci_tgt.plen.read()>>2 ); 8222 m_cmd_read_srcid_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 8223 p_vci_tgt.srcid.read() ); 8224 m_cmd_read_trdid_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 8225 p_vci_tgt.trdid.read() ); 8226 m_cmd_read_pktid_fifo.update( cmd_read_fifo_get, cmd_read_fifo_put, 8227 p_vci_tgt.pktid.read() ); 8228 8229 ///////////////////////////////////////////////////////////////////// 8230 // TGT_CMD to WRITE FIFO 8231 ///////////////////////////////////////////////////////////////////// 8232 8233 m_cmd_write_addr_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 8234 (addr_t)p_vci_tgt.address.read() ); 8235 m_cmd_write_eop_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 8236 p_vci_tgt.eop.read() ); 8237 m_cmd_write_srcid_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 8238 p_vci_tgt.srcid.read() ); 8239 m_cmd_write_trdid_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 8240 p_vci_tgt.trdid.read() ); 8241 m_cmd_write_pktid_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 8242 p_vci_tgt.pktid.read() ); 8243 m_cmd_write_data_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 8244 p_vci_tgt.wdata.read() ); 8245 m_cmd_write_be_fifo.update( cmd_write_fifo_get, cmd_write_fifo_put, 8246 p_vci_tgt.be.read() ); 8247 8248 //////////////////////////////////////////////////////////////////////////////////// 8249 // TGT_CMD to CAS FIFO 8250 //////////////////////////////////////////////////////////////////////////////////// 8251 8252 m_cmd_cas_addr_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 8253 (addr_t)p_vci_tgt.address.read() ); 8254 m_cmd_cas_eop_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 8255 p_vci_tgt.eop.read() ); 8256 m_cmd_cas_srcid_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 8257 p_vci_tgt.srcid.read() ); 8258 m_cmd_cas_trdid_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 8259 p_vci_tgt.trdid.read() ); 8260 m_cmd_cas_pktid_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 8261 p_vci_tgt.pktid.read() ); 8262 m_cmd_cas_wdata_fifo.update( cmd_cas_fifo_get, cmd_cas_fifo_put, 8263 p_vci_tgt.wdata.read() ); 8264 8265 //////////////////////////////////////////////////////////////////////////////////// 8266 // CC_RECEIVE to CLEANUP FIFO 8267 //////////////////////////////////////////////////////////////////////////////////// 8268 8269 /* 8270 if(cc_receive_to_cleanup_fifo_put) 8271 { 8272 if(cc_receive_to_cleanup_fifo_get) 8273 { 8274 m_cc_receive_to_cleanup_fifo.put_and_get( ( (uint64_t)(p_dspin_in.eop.read() & 0x1 ) << 32 ) | p_dspin_in.data.read()); 8275 } 8276 else // TODO PAS METTRE 32 en DUR !!!! 8277 { 8278 m_cc_receive_to_cleanup_fifo.simple_put( ( (uint64_t)(p_dspin_in.eop.read() & 0x1 ) << 32 ) | p_dspin_in.data.read()); 8279 //m_cc_receive_to_cleanup_fifo.simple_put(p_dspin_in.data.read()); 8280 } 8281 } 8282 else 8283 { 8284 if(cc_receive_to_cleanup_fifo_get) 8285 { 8286 m_cc_receive_to_cleanup_fifo.simple_get(); 8287 } 8288 } 8289 */ 8290 m_cc_receive_to_cleanup_fifo.update( cc_receive_to_cleanup_fifo_get, 8291 cc_receive_to_cleanup_fifo_put, 8292 ( (uint64_t)(p_dspin_p2m.eop.read() & 0x1 ) << 32 ) | p_dspin_p2m.data.read() ); 8293 8294 //////////////////////////////////////////////////////////////////////////////////// 8295 // CC_RECEIVE to MULTI_ACK FIFO 8296 //////////////////////////////////////////////////////////////////////////////////// 8297 8298 m_cc_receive_to_multi_ack_fifo.update( cc_receive_to_multi_ack_fifo_get, 8299 cc_receive_to_multi_ack_fifo_put, 8300 p_dspin_p2m.data.read() ); 8301 8302 //////////////////////////////////////////////////////////////////////////////////// 8303 // WRITE to CC_SEND FIFO 8304 //////////////////////////////////////////////////////////////////////////////////// 8305 8306 m_write_to_cc_send_inst_fifo.update( write_to_cc_send_fifo_get, 8307 write_to_cc_send_fifo_put, 8308 write_to_cc_send_fifo_inst ); 8309 m_write_to_cc_send_srcid_fifo.update( write_to_cc_send_fifo_get, 8310 write_to_cc_send_fifo_put, 8311 write_to_cc_send_fifo_srcid ); 8312 8313 //////////////////////////////////////////////////////////////////////////////////// 8314 // CONFIG to CC_SEND FIFO 8315 //////////////////////////////////////////////////////////////////////////////////// 8316 8317 m_config_to_cc_send_inst_fifo.update( config_to_cc_send_fifo_get, 8318 config_to_cc_send_fifo_put, 8319 config_to_cc_send_fifo_inst ); 8320 m_config_to_cc_send_srcid_fifo.update( config_to_cc_send_fifo_get, 8321 config_to_cc_send_fifo_put, 8322 config_to_cc_send_fifo_srcid ); 8323 8324 //////////////////////////////////////////////////////////////////////////////////// 8325 // XRAM_RSP to CC_SEND FIFO 8326 //////////////////////////////////////////////////////////////////////////////////// 8327 8328 m_xram_rsp_to_cc_send_inst_fifo.update( xram_rsp_to_cc_send_fifo_get, 8329 xram_rsp_to_cc_send_fifo_put, 8330 xram_rsp_to_cc_send_fifo_inst ); 8331 m_xram_rsp_to_cc_send_srcid_fifo.update( xram_rsp_to_cc_send_fifo_get, 8332 xram_rsp_to_cc_send_fifo_put, 8333 xram_rsp_to_cc_send_fifo_srcid ); 8334 8335 //////////////////////////////////////////////////////////////////////////////////// 8336 // CAS to CC_SEND FIFO 8337 //////////////////////////////////////////////////////////////////////////////////// 8338 8339 m_cas_to_cc_send_inst_fifo.update( cas_to_cc_send_fifo_get, 8340 cas_to_cc_send_fifo_put, 8341 cas_to_cc_send_fifo_inst ); 8342 m_cas_to_cc_send_srcid_fifo.update( cas_to_cc_send_fifo_get, 8343 cas_to_cc_send_fifo_put, 8344 cas_to_cc_send_fifo_srcid ); 8345 m_cpt_cycles++; 8346 8347 } // end transition() 8348 8349 ///////////////////////////// 8350 tmpl(void)::genMoore() 8351 ///////////////////////////// 8352 { 8353 //////////////////////////////////////////////////////////// 8354 // Command signals on the p_vci_ixr port 8355 //////////////////////////////////////////////////////////// 8356 8357 // DATA width is 8 bytes 8358 // The following values are not transmitted to XRAM 8359 // p_vci_ixr.be 8360 // p_vci_ixr.pktid 8361 // p_vci_ixr.cons 8362 // p_vci_ixr.wrap 8363 // p_vci_ixr.contig 8364 // p_vci_ixr.clen 8365 // p_vci_ixr.cfixed 8366 8367 p_vci_ixr.plen = 64; 8368 p_vci_ixr.srcid = m_srcid_x; 8369 p_vci_ixr.trdid = r_ixr_cmd_trdid.read(); 8370 p_vci_ixr.address = (addr_t)r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2); 8371 p_vci_ixr.be = 0xFF; 8372 p_vci_ixr.pktid = 0; 8373 p_vci_ixr.cons = false; 8374 p_vci_ixr.wrap = false; 8375 p_vci_ixr.contig = true; 8376 p_vci_ixr.clen = 0; 8377 p_vci_ixr.cfixed = false; 8378 8379 if ( (r_ixr_cmd_fsm.read() == IXR_CMD_READ_SEND) or 8380 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_SEND) or 8381 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_SEND) or 8382 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_SEND) or 8383 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_SEND) ) 8384 { 8385 p_vci_ixr.cmdval = true; 8386 8387 if ( r_ixr_cmd_get.read() ) // GET 8388 { 8389 p_vci_ixr.cmd = vci_param_ext::CMD_READ; 8390 p_vci_ixr.wdata = 0; 8391 p_vci_ixr.eop = true; 8392 } 8393 else // PUT 8394 { 8395 size_t word = r_ixr_cmd_word.read(); 8396 p_vci_ixr.cmd = vci_param_ext::CMD_WRITE; 8397 p_vci_ixr.wdata = ((wide_data_t)(r_ixr_cmd_wdata[word].read())) | 8398 ((wide_data_t)(r_ixr_cmd_wdata[word+1].read()) << 32); 8399 p_vci_ixr.eop = (word == (m_words-2)); 8400 } 8401 } 8402 /*ODCCP*/ 8403 else if (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_DATA_SEND) 8404 { 8405 p_vci_ixr.cmd = vci_param_ext::CMD_WRITE; 8406 p_vci_ixr.cmdval = true; 8407 #if ODCCP_NON_INCLUSIVE 8408 p_vci_ixr.address = (addr_t)((r_cleanup_to_ixr_cmd_nline.read() * m_words + 8409 r_ixr_cmd_word.read()) * 4); 8410 p_vci_ixr.wdata = ((wide_data_t)(r_cleanup_to_ixr_cmd_data[r_ixr_cmd_word.read()].read()) | 8411 ((wide_data_t)(r_cleanup_to_ixr_cmd_data[r_ixr_cmd_word.read() + 1].read()) << 32)); 8412 #else 8413 /*p_vci_ixr.address = (addr_t)((r_cleanup_to_ixr_cmd_nline.read() * m_words + 8414 r_ixr_cmd_word.read()) * 4);*/ 8415 p_vci_ixr.address = (addr_t)r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2); 8416 p_vci_ixr.wdata = ((wide_data_t)(r_ixr_cmd_wdata[r_ixr_cmd_word.read()].read()) | 8417 ((wide_data_t)(r_ixr_cmd_wdata[r_ixr_cmd_word.read() + 1].read()) << 32)); 8418 #endif 8419 8420 p_vci_ixr.trdid = r_cleanup_to_ixr_cmd_index.read(); 8421 p_vci_ixr.eop = (r_ixr_cmd_word == (m_words - 2)); 8422 } 8423 else 8424 { 8425 p_vci_ixr.cmdval = false; 8426 } 8427 8428 //////////////////////////////////////////////////// 8429 // Response signals on the p_vci_ixr port 8430 //////////////////////////////////////////////////// 8431 8432 if( (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ) or 8433 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) ) 8434 { 8435 p_vci_ixr.rspack = (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP); 8436 } 8437 else if (r_ixr_rsp_fsm.read() == IXR_RSP_ACK) 8438 { 8439 p_vci_ixr.rspack = true; 8440 } 8441 else // r_ixr_rsp_fsm == IXR_RSP_IDLE 8442 { 8443 p_vci_ixr.rspack = false; 8444 } 8445 8446 //////////////////////////////////////////////////// 8447 // Command signals on the p_vci_tgt port 8448 //////////////////////////////////////////////////// 8449 8450 switch((tgt_cmd_fsm_state_e) r_tgt_cmd_fsm.read()) 8451 { 8452 case TGT_CMD_IDLE: 8453 p_vci_tgt.cmdack = false; 8454 break; 8455 8456 case TGT_CMD_CONFIG: 8457 case TGT_CMD_ERROR: 8458 p_vci_tgt.cmdack = not r_tgt_cmd_to_tgt_rsp_req.read(); 8459 break; 8460 8461 case TGT_CMD_READ: 8462 p_vci_tgt.cmdack = m_cmd_read_addr_fifo.wok(); 8463 break; 8464 8465 case TGT_CMD_WRITE: 8466 p_vci_tgt.cmdack = m_cmd_write_addr_fifo.wok(); 8467 break; 8468 8469 case TGT_CMD_CAS: 8470 p_vci_tgt.cmdack = m_cmd_cas_addr_fifo.wok(); 8471 break; 8472 } 8473 8474 //////////////////////////////////////////////////// 8475 // Response signals on the p_vci_tgt port 8476 //////////////////////////////////////////////////// 8477 8478 switch(r_tgt_rsp_fsm.read()) 8479 { 8480 case TGT_RSP_CONFIG_IDLE: 8481 case TGT_RSP_TGT_CMD_IDLE: 8482 case TGT_RSP_READ_IDLE: 8483 case TGT_RSP_WRITE_IDLE: 8484 case TGT_RSP_CAS_IDLE: 8485 case TGT_RSP_XRAM_IDLE: 8486 case TGT_RSP_MULTI_ACK_IDLE: 8487 case TGT_RSP_CLEANUP_IDLE: 8488 { 8489 p_vci_tgt.rspval = false; 8490 p_vci_tgt.rsrcid = 0; 8491 p_vci_tgt.rdata = 0; 8492 p_vci_tgt.rpktid = 0; 8493 p_vci_tgt.rtrdid = 0; 8494 p_vci_tgt.rerror = 0; 8495 p_vci_tgt.reop = false; 8496 break; 8497 } 8498 case TGT_RSP_CONFIG: 8499 { 8500 p_vci_tgt.rspval = true; 8501 p_vci_tgt.rdata = 0; 8502 p_vci_tgt.rsrcid = r_config_to_tgt_rsp_srcid.read(); 8503 p_vci_tgt.rtrdid = r_config_to_tgt_rsp_trdid.read(); 8504 p_vci_tgt.rpktid = r_config_to_tgt_rsp_pktid.read(); 8505 p_vci_tgt.rerror = r_config_to_tgt_rsp_error.read(); 8506 p_vci_tgt.reop = true; 8507 8508 break; 8509 } 8510 case TGT_RSP_TGT_CMD: 8511 { 8512 p_vci_tgt.rspval = true; 8513 p_vci_tgt.rdata = r_tgt_cmd_to_tgt_rsp_rdata.read(); 8514 p_vci_tgt.rsrcid = r_tgt_cmd_to_tgt_rsp_srcid.read(); 8515 p_vci_tgt.rtrdid = r_tgt_cmd_to_tgt_rsp_trdid.read(); 8516 p_vci_tgt.rpktid = r_tgt_cmd_to_tgt_rsp_pktid.read(); 8517 p_vci_tgt.rerror = r_tgt_cmd_to_tgt_rsp_error.read(); 8518 p_vci_tgt.reop = true; 8519 8520 break; 8521 } 8522 case TGT_RSP_READ: 8523 { 8524 uint32_t last_word_idx = r_read_to_tgt_rsp_word.read() + r_read_to_tgt_rsp_length - 1; 8525 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 8526 bool is_ll = ((r_read_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 8527 8528 p_vci_tgt.rspval = true; 8529 8530 if ( is_ll and not r_tgt_rsp_key_sent.read() ) 8531 { 8532 // LL response first flit 8533 p_vci_tgt.rdata = r_read_to_tgt_rsp_ll_key.read(); 8534 } 8535 else 8536 { 8537 // LL response second flit or READ response 8538 p_vci_tgt.rdata = r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 8539 } 8540 8541 p_vci_tgt.rsrcid = r_read_to_tgt_rsp_srcid.read(); 8542 p_vci_tgt.rtrdid = r_read_to_tgt_rsp_trdid.read(); 8543 p_vci_tgt.rpktid = r_read_to_tgt_rsp_pktid.read(); 8544 p_vci_tgt.rerror = 0; 8545 p_vci_tgt.reop = (is_last_word and not is_ll) or (r_tgt_rsp_key_sent.read() and is_ll); 8546 break; 8547 } 8548 8549 case TGT_RSP_WRITE: 8550 p_vci_tgt.rspval = true; 8551 if(((r_write_to_tgt_rsp_pktid.read() & 0x7) == TYPE_SC) and r_write_to_tgt_rsp_sc_fail.read()) 8552 p_vci_tgt.rdata = 1; 8553 else 8554 p_vci_tgt.rdata = 0; 8555 p_vci_tgt.rsrcid = r_write_to_tgt_rsp_srcid.read(); 8556 p_vci_tgt.rtrdid = r_write_to_tgt_rsp_trdid.read(); 8557 p_vci_tgt.rpktid = r_write_to_tgt_rsp_pktid.read(); 8558 p_vci_tgt.rerror = 0; 8559 p_vci_tgt.reop = true; 8560 break; 8561 8562 case TGT_RSP_CLEANUP: 8563 p_vci_tgt.rspval = true; 8564 p_vci_tgt.rdata = 0; 8565 p_vci_tgt.rsrcid = r_cleanup_to_tgt_rsp_srcid.read(); 8566 p_vci_tgt.rtrdid = r_cleanup_to_tgt_rsp_trdid.read(); 8567 p_vci_tgt.rpktid = r_cleanup_to_tgt_rsp_pktid.read(); 8568 p_vci_tgt.rerror = 0; // Can be a CAS rsp 8569 p_vci_tgt.reop = true; 8570 break; 8571 8572 case TGT_RSP_CAS: 8573 p_vci_tgt.rspval = true; 8574 p_vci_tgt.rdata = r_cas_to_tgt_rsp_data.read(); 8575 p_vci_tgt.rsrcid = r_cas_to_tgt_rsp_srcid.read(); 8576 p_vci_tgt.rtrdid = r_cas_to_tgt_rsp_trdid.read(); 8577 p_vci_tgt.rpktid = r_cas_to_tgt_rsp_pktid.read(); 8578 p_vci_tgt.rerror = 0; 8579 p_vci_tgt.reop = true; 8580 break; 8581 8582 case TGT_RSP_XRAM: 8583 { 8584 uint32_t last_word_idx = r_xram_rsp_to_tgt_rsp_word.read() + r_xram_rsp_to_tgt_rsp_length.read() - 1; 8585 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 8586 bool is_ll = ((r_xram_rsp_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 8587 bool is_error = r_xram_rsp_to_tgt_rsp_rerror.read(); 8588 8589 p_vci_tgt.rspval = true; 8590 8591 if( is_ll and not r_tgt_rsp_key_sent.read() ) { 8592 // LL response first flit 8593 p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_ll_key.read(); 8594 } 8595 else { 8596 // LL response second flit or READ response 8597 p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 8598 } 8599 8600 p_vci_tgt.rsrcid = r_xram_rsp_to_tgt_rsp_srcid.read(); 8601 p_vci_tgt.rtrdid = r_xram_rsp_to_tgt_rsp_trdid.read(); 8602 p_vci_tgt.rpktid = r_xram_rsp_to_tgt_rsp_pktid.read(); 8603 p_vci_tgt.rerror = is_error; 8604 p_vci_tgt.reop = (((is_last_word or is_error) and not is_ll) or 8605 (r_tgt_rsp_key_sent.read() and is_ll)); 8606 break; 8607 } 8608 8609 case TGT_RSP_MULTI_ACK: 8610 p_vci_tgt.rspval = true; 8611 p_vci_tgt.rdata = 0; // Can be a CAS or SC rsp 8612 p_vci_tgt.rsrcid = r_multi_ack_to_tgt_rsp_srcid.read(); 8613 p_vci_tgt.rtrdid = r_multi_ack_to_tgt_rsp_trdid.read(); 8614 p_vci_tgt.rpktid = r_multi_ack_to_tgt_rsp_pktid.read(); 8615 p_vci_tgt.rerror = 0; 8616 p_vci_tgt.reop = true; 8617 break; 8618 } // end switch r_tgt_rsp_fsm 8619 8620 //////////////////////////////////////////////////////////////////// 8621 // p_dspin_m2p port (CC_SEND FSM) 8622 //////////////////////////////////////////////////////////////////// 8623 8624 p_dspin_m2p.write = false; 8625 p_dspin_m2p.eop = false; 8626 p_dspin_m2p.data = 0; 8627 8628 switch(r_cc_send_fsm.read()) 8629 { 8630 /////////////////////////// 8631 case CC_SEND_CONFIG_IDLE: 8632 case CC_SEND_XRAM_RSP_IDLE: 8633 case CC_SEND_WRITE_IDLE: 8634 case CC_SEND_CAS_IDLE: 8635 { 8636 break; 8637 } 8638 //////////////////////////////// 8639 case CC_SEND_CONFIG_INVAL_HEADER: 8640 { 8641 uint8_t multi_inval_type; 8642 if(m_config_to_cc_send_inst_fifo.read()) 8643 { 8644 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_INST; 8645 } 8646 else 8647 { 8648 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_DATA; 8649 } 8650 8651 uint64_t flit = 0; 8652 uint64_t dest = m_config_to_cc_send_srcid_fifo.read() << 8653 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 8654 8655 DspinDhccpParam::dspin_set( flit, 8656 dest, 8657 DspinDhccpParam::MULTI_INVAL_DEST); 8658 8659 DspinDhccpParam::dspin_set( flit, 8660 m_cc_global_id, 8661 DspinDhccpParam::MULTI_INVAL_SRCID); 8662 8663 DspinDhccpParam::dspin_set( flit, 8664 r_config_to_cc_send_trdid.read(), 8665 DspinDhccpParam::MULTI_INVAL_UPDT_INDEX); 8666 8667 DspinDhccpParam::dspin_set( flit, 8668 multi_inval_type, 8669 DspinDhccpParam::M2P_TYPE); 8670 p_dspin_m2p.write = true; 8671 p_dspin_m2p.data = flit; 8672 break; 8673 } 8674 //////////////////////////////// 8675 case CC_SEND_CONFIG_INVAL_NLINE: 8676 { 8677 uint64_t flit = 0; 8678 DspinDhccpParam::dspin_set( flit, 8679 r_config_to_cc_send_nline.read(), 8680 DspinDhccpParam::MULTI_INVAL_NLINE); 8681 p_dspin_m2p.eop = true; 8682 p_dspin_m2p.write = true; 8683 p_dspin_m2p.data = flit; 8684 break; 8685 } 8686 /////////////////////////////////// 8687 case CC_SEND_XRAM_RSP_INVAL_HEADER: 8688 { 8689 if(not m_xram_rsp_to_cc_send_inst_fifo.rok()) break; 8690 8691 uint8_t multi_inval_type; 8692 if(m_xram_rsp_to_cc_send_inst_fifo.read()) 8693 { 8694 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_INST; 8695 } 8696 else 8697 { 8698 multi_inval_type = DspinDhccpParam::TYPE_MULTI_INVAL_DATA; 8699 } 8700 8701 uint64_t flit = 0; 8702 uint64_t dest = m_xram_rsp_to_cc_send_srcid_fifo.read() << 8703 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 8704 8705 DspinDhccpParam::dspin_set( flit, 8706 dest, 8707 DspinDhccpParam::MULTI_INVAL_DEST); 8708 8709 DspinDhccpParam::dspin_set( flit, 8710 m_cc_global_id, 8711 DspinDhccpParam::MULTI_INVAL_SRCID); 8712 8713 DspinDhccpParam::dspin_set( flit, 8714 r_xram_rsp_to_cc_send_trdid.read(), 8715 DspinDhccpParam::MULTI_INVAL_UPDT_INDEX); 8716 8717 DspinDhccpParam::dspin_set( flit, 8718 multi_inval_type, 8719 DspinDhccpParam::M2P_TYPE); 8720 p_dspin_m2p.write = true; 8721 p_dspin_m2p.data = flit; 8722 break; 8723 } 8724 8725 ////////////////////////////////// 8726 case CC_SEND_XRAM_RSP_INVAL_NLINE: 8727 { 8728 uint64_t flit = 0; 8729 8730 DspinDhccpParam::dspin_set( flit, 8731 r_xram_rsp_to_cc_send_nline.read(), 8732 DspinDhccpParam::MULTI_INVAL_NLINE); 8733 p_dspin_m2p.eop = true; 8734 p_dspin_m2p.write = true; 8735 p_dspin_m2p.data = flit; 8736 break; 8737 } 8738 8739 ///////////////////////////////////// 8740 case CC_SEND_CONFIG_BRDCAST_HEADER: 8741 case CC_SEND_XRAM_RSP_BRDCAST_HEADER: 8742 case CC_SEND_WRITE_BRDCAST_HEADER: 8743 case CC_SEND_CAS_BRDCAST_HEADER: 8744 { 8745 uint64_t flit = 0; 8746 8747 DspinDhccpParam::dspin_set( flit, 8748 m_broadcast_boundaries, 8749 DspinDhccpParam::BROADCAST_BOX); 8750 8751 DspinDhccpParam::dspin_set( flit, 8752 m_cc_global_id, 8753 DspinDhccpParam::BROADCAST_SRCID); 8754 8755 DspinDhccpParam::dspin_set( flit, 8756 1ULL, 8757 DspinDhccpParam::M2P_BC); 8758 p_dspin_m2p.write = true; 8759 p_dspin_m2p.data = flit; 8760 break; 8761 } 8762 //////////////////////////////////// 8763 case CC_SEND_XRAM_RSP_BRDCAST_NLINE: 8764 { 8765 uint64_t flit = 0; 8766 DspinDhccpParam::dspin_set( flit, 8767 r_xram_rsp_to_cc_send_nline.read(), 8768 DspinDhccpParam::BROADCAST_NLINE); 8769 p_dspin_m2p.write = true; 8770 p_dspin_m2p.eop = true; 8771 p_dspin_m2p.data = flit; 8772 break; 8773 } 8774 ////////////////////////////////// 8775 case CC_SEND_CONFIG_BRDCAST_NLINE: 8776 { 8777 uint64_t flit = 0; 8778 DspinDhccpParam::dspin_set( flit, 8779 r_config_to_cc_send_nline.read(), 8780 DspinDhccpParam::BROADCAST_NLINE); 8781 p_dspin_m2p.write = true; 8782 p_dspin_m2p.eop = true; 8783 p_dspin_m2p.data = flit; 8784 break; 8785 } 8786 ///////////////////////////////// 8787 case CC_SEND_WRITE_BRDCAST_NLINE: 8788 { 8789 uint64_t flit = 0; 8790 DspinDhccpParam::dspin_set( flit, 8791 r_write_to_cc_send_nline.read(), 8792 DspinDhccpParam::BROADCAST_NLINE); 8793 p_dspin_m2p.write = true; 8794 p_dspin_m2p.eop = true; 8795 p_dspin_m2p.data = flit; 8796 break; 8797 } 8798 /////////////////////////////// 8799 case CC_SEND_CAS_BRDCAST_NLINE: 8800 { 8801 uint64_t flit = 0; 8802 DspinDhccpParam::dspin_set( flit, 8803 r_cas_to_cc_send_nline.read(), 8804 DspinDhccpParam::BROADCAST_NLINE); 8805 p_dspin_m2p.write = true; 8806 p_dspin_m2p.eop = true; 8807 p_dspin_m2p.data = flit; 8808 break; 8809 } 8810 /////////////////////////////// 8811 case CC_SEND_WRITE_UPDT_HEADER: 8812 { 8813 if(not m_write_to_cc_send_inst_fifo.rok()) break; 8814 8815 uint8_t multi_updt_type; 8816 if(m_write_to_cc_send_inst_fifo.read()) 8817 { 8818 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_INST; 8819 } 8820 else 8821 { 8822 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_DATA; 8823 } 8824 8825 uint64_t flit = 0; 8826 uint64_t dest = 8827 m_write_to_cc_send_srcid_fifo.read() << 8828 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 8829 8830 DspinDhccpParam::dspin_set( 8831 flit, 8832 dest, 8833 DspinDhccpParam::MULTI_UPDT_DEST); 8834 8835 DspinDhccpParam::dspin_set( 8836 flit, 8837 m_cc_global_id, 8838 DspinDhccpParam::MULTI_UPDT_SRCID); 8839 8840 DspinDhccpParam::dspin_set( 8841 flit, 8842 r_write_to_cc_send_trdid.read(), 8843 DspinDhccpParam::MULTI_UPDT_UPDT_INDEX); 8844 8845 DspinDhccpParam::dspin_set( 8846 flit, 8847 multi_updt_type, 8848 DspinDhccpParam::M2P_TYPE); 8849 8850 p_dspin_m2p.write = true; 8851 p_dspin_m2p.data = flit; 8852 8853 break; 8854 } 8855 ////////////////////////////// 8856 case CC_SEND_WRITE_UPDT_NLINE: 8857 { 8858 uint64_t flit = 0; 8859 8860 DspinDhccpParam::dspin_set( 8861 flit, 8862 r_write_to_cc_send_index.read(), 8863 DspinDhccpParam::MULTI_UPDT_WORD_INDEX); 8864 8865 DspinDhccpParam::dspin_set( 8866 flit, 8867 r_write_to_cc_send_nline.read(), 8868 DspinDhccpParam::MULTI_UPDT_NLINE); 8869 8870 p_dspin_m2p.write = true; 8871 p_dspin_m2p.data = flit; 8872 8873 break; 8874 } 8875 ///////////////////////////// 8876 case CC_SEND_WRITE_UPDT_DATA: 8877 { 8878 8879 uint8_t multi_updt_cpt = 8880 r_cc_send_cpt.read() + r_write_to_cc_send_index.read(); 8881 8882 uint8_t multi_updt_be = r_write_to_cc_send_be[multi_updt_cpt].read(); 8883 uint32_t multi_updt_data = r_write_to_cc_send_data[multi_updt_cpt].read(); 8884 8885 uint64_t flit = 0; 8886 8887 DspinDhccpParam::dspin_set( 8888 flit, 8889 multi_updt_be, 8890 DspinDhccpParam::MULTI_UPDT_BE); 8891 8892 DspinDhccpParam::dspin_set( 8893 flit, 8894 multi_updt_data, 8895 DspinDhccpParam::MULTI_UPDT_DATA); 8896 8897 p_dspin_m2p.write = true; 8898 p_dspin_m2p.eop = (r_cc_send_cpt.read() == (r_write_to_cc_send_count.read()-1)); 8899 p_dspin_m2p.data = flit; 8900 8901 break; 8902 } 8903 //////////////////////////// 8904 case CC_SEND_CAS_UPDT_HEADER: 8905 { 8906 if (not m_cas_to_cc_send_inst_fifo.rok()) break; 8907 8908 uint8_t multi_updt_type; 8909 if(m_cas_to_cc_send_inst_fifo.read()) 8910 { 8911 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_INST; 8912 } 8913 else 8914 { 8915 multi_updt_type = DspinDhccpParam::TYPE_MULTI_UPDT_DATA; 8916 } 8917 8918 uint64_t flit = 0; 8919 uint64_t dest = 8920 m_cas_to_cc_send_srcid_fifo.read() << 8921 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 8922 8923 DspinDhccpParam::dspin_set( 8924 flit, 8925 dest, 8926 DspinDhccpParam::MULTI_UPDT_DEST); 8927 8928 DspinDhccpParam::dspin_set( 8929 flit, 8930 m_cc_global_id, 8931 DspinDhccpParam::MULTI_UPDT_SRCID); 8932 8933 DspinDhccpParam::dspin_set( 8934 flit, 8935 r_cas_to_cc_send_trdid.read(), 8936 DspinDhccpParam::MULTI_UPDT_UPDT_INDEX); 8937 8938 DspinDhccpParam::dspin_set( 8939 flit, 8940 multi_updt_type, 8941 DspinDhccpParam::M2P_TYPE); 8942 8943 p_dspin_m2p.write = true; 8944 p_dspin_m2p.data = flit; 8945 8946 break; 8947 } 8948 //////////////////////////// 8949 case CC_SEND_CAS_UPDT_NLINE: 8950 { 8951 uint64_t flit = 0; 8952 8953 DspinDhccpParam::dspin_set( 8954 flit, 8955 r_cas_to_cc_send_index.read(), 8956 DspinDhccpParam::MULTI_UPDT_WORD_INDEX); 8957 8958 DspinDhccpParam::dspin_set( 8959 flit, 8960 r_cas_to_cc_send_nline.read(), 8961 DspinDhccpParam::MULTI_UPDT_NLINE); 8962 8963 p_dspin_m2p.write = true; 8964 p_dspin_m2p.data = flit; 8965 8966 break; 8967 } 8968 /////////////////////////// 8969 case CC_SEND_CAS_UPDT_DATA: 8970 { 8971 uint64_t flit = 0; 8972 8973 DspinDhccpParam::dspin_set( 8974 flit, 8975 0xF, 8976 DspinDhccpParam::MULTI_UPDT_BE); 8977 8978 DspinDhccpParam::dspin_set( 8979 flit, 8980 r_cas_to_cc_send_wdata.read(), 8981 DspinDhccpParam::MULTI_UPDT_DATA); 8982 8983 p_dspin_m2p.write = true; 8984 p_dspin_m2p.eop = not r_cas_to_cc_send_is_long.read(); 8985 p_dspin_m2p.data = flit; 8986 8987 break; 8988 } 8989 //////////////////////////////// 8990 case CC_SEND_CAS_UPDT_DATA_HIGH: 8991 { 8992 uint64_t flit = 0; 8993 8994 DspinDhccpParam::dspin_set( 8995 flit, 8996 0xF, 8997 DspinDhccpParam::MULTI_UPDT_BE); 8998 8999 DspinDhccpParam::dspin_set( 9000 flit, 9001 r_cas_to_cc_send_wdata_high.read(), 9002 DspinDhccpParam::MULTI_UPDT_DATA); 9003 9004 p_dspin_m2p.write = true; 9005 p_dspin_m2p.eop = true; 9006 p_dspin_m2p.data = flit; 9007 9008 break; 9009 } 9010 } 9011 9012 //////////////////////////////////////////////////////////////////// 9013 // p_dspin_clack port (CLEANUP FSM) 9014 //////////////////////////////////////////////////////////////////// 9015 9016 if ( r_cleanup_fsm.read() == CLEANUP_SEND_CLACK ) 9017 { 9018 uint8_t cleanup_ack_type; 9019 if(r_cleanup_inst.read()) 9020 { 9021 cleanup_ack_type = DspinDhccpParam::TYPE_CLACK_INST; 9022 } 9023 else 9024 { 9025 cleanup_ack_type = DspinDhccpParam::TYPE_CLACK_DATA; 9026 } 9027 9028 uint64_t flit = 0; 9029 uint64_t dest = r_cleanup_srcid.read() << 9030 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 9031 9032 DspinDhccpParam::dspin_set( 9033 flit, 9034 dest, 9035 DspinDhccpParam::CLACK_DEST); 9036 9037 DspinDhccpParam::dspin_set( 9038 flit, 9039 r_cleanup_nline.read() & 0xFFFF, 9040 DspinDhccpParam::CLACK_SET); 9041 9042 DspinDhccpParam::dspin_set( 9043 flit, 9044 r_cleanup_way_index.read(), 9045 DspinDhccpParam::CLACK_WAY); 9046 9047 DspinDhccpParam::dspin_set( 9048 flit, 9049 cleanup_ack_type, 9050 DspinDhccpParam::CLACK_TYPE); 9051 9052 p_dspin_clack.eop = true; 9053 p_dspin_clack.write = true; 9054 p_dspin_clack.data = flit; 9055 } 9056 else 9057 { 9058 p_dspin_clack.write = false; 9059 p_dspin_clack.eop = false; 9060 p_dspin_clack.data = 0; 9061 } 9062 9063 /////////////////////////////////////////////////////////////////// 9064 // p_dspin_p2m port (CC_RECEIVE FSM) 9065 /////////////////////////////////////////////////////////////////// 9066 // 9067 switch(r_cc_receive_fsm.read()) 9068 { 9069 case CC_RECEIVE_IDLE: 9070 { 9071 p_dspin_p2m.read = false; 9072 break; 9073 } 9074 case CC_RECEIVE_CLEANUP: 9075 case CC_RECEIVE_CLEANUP_EOP: 9076 { 9077 p_dspin_p2m.read = m_cc_receive_to_cleanup_fifo.wok(); 9078 break; 9079 } 9080 case CC_RECEIVE_MULTI_ACK: 9081 { 9082 p_dspin_p2m.read = m_cc_receive_to_multi_ack_fifo.wok(); 9083 break; 9084 } 9085 } 9086 // end switch r_cc_send_fsm 9087 } // end genMoore() 9088 // end switch r_cc_send_fsm 9089 } // end genMoore() 9088 9090 9089 9091 } … … 9097 9099 // End: 9098 9100 9099 // vim: filetype=cpp:expandtab:shiftwidth= 2:tabstop=2:softtabstop=29101 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=4:softtabstop=4
Note: See TracChangeset
for help on using the changeset viewer.