Changeset 273 for trunk/modules/vci_mem_cache_v4
- Timestamp:
- Nov 28, 2012, 11:51:48 AM (12 years ago)
- Location:
- trunk/modules/vci_mem_cache_v4/caba
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/modules/vci_mem_cache_v4/caba/metadata/vci_mem_cache_v4.sd
r20 r273 6 6 7 7 Module('caba:vci_mem_cache_v4', 8 classname = 'soclib::caba::VciMemCacheV4', 9 tmpl_parameters = [parameter.Module('vci_param', default = 'caba:vci_param'),], 10 header_files = ['../source/include/vci_mem_cache_v4.h', 11 '../source/include/xram_transaction_v4.h', 12 '../source/include/mem_cache_directory_v4.h', 13 '../source/include/update_tab_v4.h',], 14 implementation_files = ['../source/src/vci_mem_cache_v4.cpp',], 15 uses = [Uses('caba:base_module'), 16 Uses('common:loader'), 17 Uses('common:mapping_table'), 18 Uses('caba:generic_fifo'), 19 ], 20 ports = [Port('caba:vci_target', 'p_vci_tgt'), 21 Port('caba:vci_target','p_vci_tgt_cleanup'), 22 Port('caba:vci_initiator','p_vci_ini'), 23 Port('caba:vci_initiator','p_vci_ixr'), 24 Port('caba:bit_in', 'p_resetn', auto = 'resetn'), 25 Port('caba:clock_in', 'p_clk', auto = 'clock'),], 26 instance_parameters = [ 27 parameter.Module('mtp', 'common:mapping_table'), 28 parameter.Module('mtc', 'common:mapping_table'), 29 parameter.Module('mtx', 'common:mapping_table'), 30 parameter.IntTab('vci_ixr_index'), 31 parameter.IntTab('vci_ini_index'), 32 parameter.IntTab('vci_tgt_index'), 33 parameter.IntTab('vci_tgt_index_cleanup'), 34 parameter.Int('nways'), 35 parameter.Int('nsets'), 36 parameter.Int('nwords'), 37 parameter.Int('heap_size'), 38 ], 39 extensions = [ 40 'dsx:get_ident=' 41 'vci_ini_index:p_vci_ini:mtc,' 42 'vci_tgt_index_cleanup:p_vci_tgt_cleanup:mtc,' 43 'vci_tgt_index:p_vci_tgt:mtp,' 44 'vci_ixr_index:p_vci_ixr:mtx', 45 'dsx:addressable=vci_tgt_index,vci_tgt_index_cleanup', 46 ], 8 classname = 'soclib::caba::VciMemCacheV4', 9 10 tmpl_parameters = [ parameter.Module('vci_param', default = 'caba:vci_param') ], 11 12 header_files = [ 13 '../source/include/vci_mem_cache_v4.h', 14 '../source/include/xram_transaction_v4.h', 15 '../source/include/mem_cache_directory_v4.h', 16 '../source/include/update_tab_v4.h' 17 ], 18 19 implementation_files = [ '../source/src/vci_mem_cache_v4.cpp' ], 20 21 uses = [ 22 Uses('caba:base_module'), 23 Uses('common:loader'), 24 Uses('common:mapping_table'), 25 Uses('caba:generic_fifo'), 26 ], 27 28 ports = [ 29 Port( 'caba:vci_target' , 'p_vci_tgt' ), 30 Port( 'caba:vci_target' , 'p_vci_tgt_cleanup' ), 31 Port( 'caba:vci_initiator', 'p_vci_ini' ), 32 Port( 'caba:vci_initiator', 'p_vci_ixr' ), 33 Port( 'caba:bit_in' , 'p_resetn' , auto = 'resetn' ), 34 Port( 'caba:clock_in' , 'p_clk' , auto = 'clock' ), 35 ], 36 37 instance_parameters = [ 38 parameter.Module( 'mtp', 'common:mapping_table' ), 39 parameter.Module( 'mtc', 'common:mapping_table' ), 40 parameter.Module( 'mtx', 'common:mapping_table' ), 41 parameter.IntTab( 'vci_ixr_index' ), 42 parameter.IntTab( 'vci_ini_index' ), 43 parameter.IntTab( 'vci_tgt_index' ), 44 parameter.IntTab( 'vci_tgt_index_cleanup '), 45 parameter.Int ( 'nways' ), 46 parameter.Int ( 'nsets' ), 47 parameter.Int ( 'nwords' ), 48 parameter.Int ( 'heap_size' ), 49 ], 50 51 extensions = [ 52 'dsx:get_ident=' 53 'vci_ini_index:p_vci_ini:mtc,' 54 'vci_tgt_index_cleanup:p_vci_tgt_cleanup:mtc,' 55 'vci_tgt_index:p_vci_tgt:mtp,' 56 'vci_ixr_index:p_vci_ixr:mtx', 57 'dsx:addressable=vci_tgt_index,vci_tgt_index_cleanup', 58 ], 47 59 ) -
trunk/modules/vci_mem_cache_v4/caba/source/include/vci_mem_cache_v4.h
r245 r273 26 26 * 27 27 * Maintainers: alain eric.guthmuller@polytechnique.edu 28 */ 29 /* 28 * cesar.fuguet-tortolero@lip6.fr 30 29 * 31 30 * Modifications done by Christophe Choichillon on the 7/04/2009: … … 33 32 * - Adding a new VCI target port for the CLEANUP network 34 33 * - Adding new state in the ALLOC_UPT_FSM : ALLOC_UPT_CLEANUP 35 * 34 * 36 35 * Modifications to do : 37 36 * - Adding new variables used by the CLEANUP FSM … … 58 57 #include "update_tab_v4.h" 59 58 60 #define TRANSACTION_TAB_LINES 4// Number of lines in the transaction tab61 #define UPDATE_TAB_LINES 4// Number of lines in the update tab59 #define TRANSACTION_TAB_LINES 4 // Number of lines in the transaction tab 60 #define UPDATE_TAB_LINES 4 // Number of lines in the update tab 62 61 63 62 namespace soclib { namespace caba { … … 81 80 TGT_CMD_READ, 82 81 TGT_CMD_WRITE, 83 TGT_CMD_ATOMIC ,82 TGT_CMD_ATOMIC 84 83 }; 85 84 … … 97 96 TGT_RSP_XRAM, 98 97 TGT_RSP_INIT, 99 TGT_RSP_CLEANUP ,98 TGT_RSP_CLEANUP 100 99 }; 101 100 … … 115 114 INIT_CMD_SC_UPDT_INDEX, 116 115 INIT_CMD_SC_UPDT_DATA, 117 INIT_CMD_SC_UPDT_DATA_HIGH ,116 INIT_CMD_SC_UPDT_DATA_HIGH 118 117 }; 119 118 … … 123 122 INIT_RSP_UPT_LOCK, 124 123 INIT_RSP_UPT_CLEAR, 125 INIT_RSP_END ,124 INIT_RSP_END 126 125 }; 127 126 … … 129 128 enum read_fsm_state_e{ 130 129 READ_IDLE, 130 READ_DIR_REQ, 131 131 READ_DIR_LOCK, 132 132 READ_DIR_HIT, 133 READ_HEAP_REQ, 133 134 READ_HEAP_LOCK, 134 135 READ_HEAP_WRITE, … … 138 139 READ_TRT_LOCK, 139 140 READ_TRT_SET, 140 READ_TRT_REQ ,141 READ_TRT_REQ 141 142 }; 142 143 … … 145 146 WRITE_IDLE, 146 147 WRITE_NEXT, 148 WRITE_DIR_REQ, 147 149 WRITE_DIR_LOCK, 148 150 WRITE_DIR_READ, … … 163 165 WRITE_BC_CC_SEND, 164 166 WRITE_BC_XRAM_REQ, 165 WRITE_WAIT ,167 WRITE_WAIT 166 168 }; 167 169 … … 171 173 IXR_RSP_ACK, 172 174 IXR_RSP_TRT_ERASE, 173 IXR_RSP_TRT_READ ,175 IXR_RSP_TRT_READ 174 176 }; 175 177 … … 186 188 XRAM_RSP_INVAL, 187 189 XRAM_RSP_WRITE_DIRTY, 190 XRAM_RSP_HEAP_REQ, 188 191 XRAM_RSP_HEAP_ERASE, 189 192 XRAM_RSP_HEAP_LAST, 190 193 XRAM_RSP_ERROR_ERASE, 191 XRAM_RSP_ERROR_RSP ,194 XRAM_RSP_ERROR_RSP 192 195 }; 193 196 … … 201 204 IXR_CMD_WRITE_NLINE, 202 205 IXR_CMD_SC_NLINE, 203 IXR_CMD_XRAM_DATA ,206 IXR_CMD_XRAM_DATA 204 207 }; 205 208 … … 207 210 enum sc_fsm_state_e{ 208 211 SC_IDLE, 212 SC_DIR_REQ, 209 213 SC_DIR_LOCK, 210 214 SC_DIR_HIT_READ, … … 224 228 SC_MISS_TRT_SET, 225 229 SC_MISS_XRAM_REQ, 226 SC_WAIT ,230 SC_WAIT 227 231 }; 228 232 … … 230 234 enum cleanup_fsm_state_e{ 231 235 CLEANUP_IDLE, 236 CLEANUP_DIR_REQ, 232 237 CLEANUP_DIR_LOCK, 233 238 CLEANUP_DIR_WRITE, 239 CLEANUP_HEAP_REQ, 234 240 CLEANUP_HEAP_LOCK, 235 241 CLEANUP_HEAP_SEARCH, … … 239 245 CLEANUP_UPT_WRITE, 240 246 CLEANUP_WRITE_RSP, 241 CLEANUP_RSP ,247 CLEANUP_RSP 242 248 }; 243 249 244 250 /* States of the ALLOC_DIR fsm */ 245 251 enum alloc_dir_fsm_state_e{ 252 ALLOC_DIR_RESET, 246 253 ALLOC_DIR_READ, 247 254 ALLOC_DIR_WRITE, 248 255 ALLOC_DIR_SC, 249 256 ALLOC_DIR_CLEANUP, 250 ALLOC_DIR_XRAM_RSP ,257 ALLOC_DIR_XRAM_RSP 251 258 }; 252 259 … … 257 264 ALLOC_TRT_SC, 258 265 ALLOC_TRT_XRAM_RSP, 259 ALLOC_TRT_IXR_RSP ,266 ALLOC_TRT_IXR_RSP 260 267 }; 261 268 … … 266 273 ALLOC_UPT_INIT_RSP, 267 274 ALLOC_UPT_CLEANUP, 268 ALLOC_UPT_SC ,275 ALLOC_UPT_SC 269 276 }; 270 277 271 278 /* States of the ALLOC_HEAP fsm */ 272 279 enum alloc_heap_fsm_state_e{ 280 ALLOC_HEAP_RESET, 273 281 ALLOC_HEAP_READ, 274 282 ALLOC_HEAP_WRITE, 275 283 ALLOC_HEAP_SC, 276 284 ALLOC_HEAP_CLEANUP, 277 ALLOC_HEAP_XRAM_RSP ,285 ALLOC_HEAP_XRAM_RSP 278 286 }; 279 287 … … 301 309 302 310 // instrumentation counters 303 uint32_t m_cpt_cycles; // Counter of cycles304 uint32_t m_cpt_read;// Number of READ transactions305 uint32_t m_cpt_read_miss; // Number of MISS READ306 uint32_t m_cpt_write;// Number of WRITE transactions307 uint32_t m_cpt_write_miss;// Number of MISS WRITE308 uint32_t m_cpt_write_cells; 309 uint32_t m_cpt_write_dirty; 310 uint32_t m_cpt_update;// Number of UPDATE transactions311 uint32_t m_cpt_trt_rb; 312 uint32_t m_cpt_trt_full; 313 uint32_t m_cpt_update_mult;// Number of targets for UPDATE314 uint32_t m_cpt_inval;// Number of INVAL transactions315 uint32_t m_cpt_inval_mult; // Number of targets for INVAL316 uint32_t m_cpt_inval_brdcast; // Number of BROADCAST INVAL317 uint32_t m_cpt_cleanup;// Number of CLEANUP transactions318 uint32_t m_cpt_ll;// Number of LL transactions319 uint32_t m_cpt_sc;// Number of SC transactions311 uint32_t m_cpt_cycles; // Counter of cycles 312 uint32_t m_cpt_read; // Number of READ transactions 313 uint32_t m_cpt_read_miss; // Number of MISS READ 314 uint32_t m_cpt_write; // Number of WRITE transactions 315 uint32_t m_cpt_write_miss; // Number of MISS WRITE 316 uint32_t m_cpt_write_cells; // Cumulated length for WRITE transactions 317 uint32_t m_cpt_write_dirty; // Cumulated length for WRITE transactions 318 uint32_t m_cpt_update; // Number of UPDATE transactions 319 uint32_t m_cpt_trt_rb; // Read blocked by a hit in trt 320 uint32_t m_cpt_trt_full; // Transaction blocked due to a full trt 321 uint32_t m_cpt_update_mult; // Number of targets for UPDATE 322 uint32_t m_cpt_inval; // Number of INVAL transactions 323 uint32_t m_cpt_inval_mult; // Number of targets for INVAL 324 uint32_t m_cpt_inval_brdcast; // Number of BROADCAST INVAL 325 uint32_t m_cpt_cleanup; // Number of CLEANUP transactions 326 uint32_t m_cpt_ll; // Number of LL transactions 327 uint32_t m_cpt_sc; // Number of SC transactions 320 328 321 329 size_t m_prev_count; … … 326 334 327 335 public: 328 sc_in<bool> 329 sc_in<bool> 330 soclib::caba::VciTarget<vci_param> 331 soclib::caba::VciTarget<vci_param> 332 soclib::caba::VciInitiator<vci_param> p_vci_ini;333 soclib::caba::VciInitiator<vci_param> 336 sc_in<bool> p_clk; 337 sc_in<bool> p_resetn; 338 soclib::caba::VciTarget<vci_param> p_vci_tgt; 339 soclib::caba::VciTarget<vci_param> p_vci_tgt_cleanup; 340 soclib::caba::VciInitiator<vci_param> p_vci_ini; 341 soclib::caba::VciInitiator<vci_param> p_vci_ixr; 334 342 335 343 VciMemCacheV4( 336 sc_module_name name, // Instance Name 344 sc_module_name name, // Instance Name 337 345 const soclib::common::MappingTable &mtp, // Mapping table for primary requets 338 346 const soclib::common::MappingTable &mtc, // Mapping table for coherence requets … … 342 350 const soclib::common::IntTab &vci_tgt_index, // VCI port to PROC (target) 343 351 const soclib::common::IntTab &vci_tgt_index_cleanup,// VCI port to PROC (target) for cleanup 344 size_t nways, // Number of ways per set 352 size_t nways, // Number of ways per set 345 353 size_t nsets, // Number of sets 346 354 size_t nwords, // Number of words per line … … 348 356 size_t transaction_tab_lines=TRANSACTION_TAB_LINES, // Size of the TRT 349 357 size_t update_tab_lines=UPDATE_TAB_LINES, // Size of the UPT 350 size_t debug_start_cycle=0, 358 size_t debug_start_cycle=0, 351 359 bool debug_ok=false); 352 360 … … 366 374 367 375 // Component attributes 368 const size_t m_initiators; // Number of initiators 369 const size_t m_heap_size; // Size of the heap 370 const size_t m_ways; // Number of ways in a set 371 const size_t m_sets; // Number of cache sets 372 const size_t m_words; // Number of words in a line 373 const size_t m_srcid_ixr; // Srcid for requests to XRAM 374 const size_t m_srcid_ini; // Srcid for requests to processors 375 std::list<soclib::common::Segment> m_seglist; // memory cached into the cache 376 std::list<soclib::common::Segment> m_cseglist; // coherence segment for the cache 377 vci_addr_t *m_coherence_table; // address(srcid) 378 uint32_t m_transaction_tab_lines; 379 TransactionTab m_transaction_tab; // xram transaction table 380 uint32_t m_update_tab_lines; 381 UpdateTab m_update_tab; // pending update & invalidate 382 CacheDirectory m_cache_directory; // data cache directory 383 HeapDirectory m_heap; // heap for copies 384 385 data_t ***m_cache_data; // data array[set][way][word] 376 std::list<soclib::common::Segment> m_seglist; // memory cached into the cache 377 std::list<soclib::common::Segment> m_cseglist; // coherence segment for the cache 378 379 const size_t m_initiators; // Number of initiators 380 const size_t m_heap_size; // Size of the heap 381 const size_t m_ways; // Number of ways in a set 382 const size_t m_sets; // Number of cache sets 383 const size_t m_words; // Number of words in a line 384 const size_t m_srcid_ixr; // Srcid for requests to XRAM 385 const size_t m_srcid_ini; // Srcid for requests to processors 386 387 uint32_t m_transaction_tab_lines; 388 TransactionTab m_transaction_tab; // xram transaction table 389 uint32_t m_update_tab_lines; 390 UpdateTab m_update_tab; // pending update & invalidate 391 CacheDirectory m_cache_directory; // data cache directory 392 HeapDirectory m_heap; // heap for copies 393 394 data_t *** m_cache_data; // data array[set][way][word] 386 395 387 396 // adress masks 388 const soclib::common::AddressMaskingTable<vci_addr_t> 389 const soclib::common::AddressMaskingTable<vci_addr_t> 390 const soclib::common::AddressMaskingTable<vci_addr_t> 391 const soclib::common::AddressMaskingTable<vci_addr_t> m_nline;397 const soclib::common::AddressMaskingTable<vci_addr_t> m_x; 398 const soclib::common::AddressMaskingTable<vci_addr_t> m_y; 399 const soclib::common::AddressMaskingTable<vci_addr_t> m_z; 400 const soclib::common::AddressMaskingTable<vci_addr_t> m_nline; 392 401 393 402 // broadcast address 394 vci_addr_t 403 vci_addr_t m_broadcast_address; 395 404 396 405 ////////////////////////////////////////////////// 397 406 // Others registers 398 407 ////////////////////////////////////////////////// 399 sc_signal<size_t> 400 sc_signal<size_t> 408 sc_signal<size_t> r_copies_limit; // Limit of the number of copies for one line 409 sc_signal<size_t> xxx_count; 401 410 402 411 ////////////////////////////////////////////////// … … 411 420 GenericFifo<size_t> m_cmd_read_pktid_fifo; 412 421 413 // Fifo between TGT_CMD fsm and WRITE fsm 422 // Fifo between TGT_CMD fsm and WRITE fsm 414 423 GenericFifo<uint64_t> m_cmd_write_addr_fifo; 415 424 GenericFifo<bool> m_cmd_write_eop_fifo; … … 418 427 GenericFifo<size_t> m_cmd_write_pktid_fifo; 419 428 GenericFifo<data_t> m_cmd_write_data_fifo; 420 GenericFifo<be_t> 429 GenericFifo<be_t> m_cmd_write_be_fifo; 421 430 422 431 // Fifo between TGT_CMD fsm and SC fsm … … 434 443 soclib::common::Segment **m_seg; 435 444 soclib::common::Segment **m_cseg; 436 437 445 /////////////////////////////////////////////////////// 438 446 // Registers controlled by the READ fsm 439 447 /////////////////////////////////////////////////////// 440 448 441 sc_signal<int> r_read_fsm; // FSM state442 sc_signal<size_t> 443 sc_signal<size_t> 444 sc_signal<bool> 445 sc_signal<tag_t> r_read_tag;// cache line tag (in directory)446 sc_signal<bool> r_read_is_cnt;// is_cnt bit (in directory)447 sc_signal<bool> r_read_lock;// lock bit (in directory)448 sc_signal<bool> r_read_dirty;// dirty bit (in directory)449 sc_signal<size_t> 450 sc_signal<size_t> 451 sc_signal<data_t> *r_read_data; // data (one cache line)452 sc_signal<size_t> 453 sc_signal<size_t> 454 sc_signal<size_t> 455 sc_signal<bool> 456 457 // Buffer between READ fsm and IXR_CMD fsm (ask a missing cache line to XRAM) 458 sc_signal<bool> r_read_to_ixr_cmd_req;// valid request459 sc_signal<addr_t> r_read_to_ixr_cmd_nline;// cache line index460 sc_signal<size_t> r_read_to_ixr_cmd_trdid;// index in Transaction Table449 sc_signal<int> r_read_fsm; // FSM state 450 sc_signal<size_t> r_read_copy; // Srcid of the first copy 451 sc_signal<size_t> r_read_copy_cache; // Srcid of the first copy 452 sc_signal<bool> r_read_copy_inst; // Type of the first copy 453 sc_signal<tag_t> r_read_tag; // cache line tag (in directory) 454 sc_signal<bool> r_read_is_cnt; // is_cnt bit (in directory) 455 sc_signal<bool> r_read_lock; // lock bit (in directory) 456 sc_signal<bool> r_read_dirty; // dirty bit (in directory) 457 sc_signal<size_t> r_read_count; // number of copies 458 sc_signal<size_t> r_read_ptr; // pointer to the heap 459 sc_signal<data_t> * r_read_data; // data (one cache line) 460 sc_signal<size_t> r_read_way; // associative way (in cache) 461 sc_signal<size_t> r_read_trt_index; // Transaction Table index 462 sc_signal<size_t> r_read_next_ptr; // Next entry to point to 463 sc_signal<bool> r_read_last_free; // Last free entry 464 465 // Buffer between READ fsm and IXR_CMD fsm (ask a missing cache line to XRAM) 466 sc_signal<bool> r_read_to_ixr_cmd_req; // valid request 467 sc_signal<addr_t> r_read_to_ixr_cmd_nline; // cache line index 468 sc_signal<size_t> r_read_to_ixr_cmd_trdid; // index in Transaction Table 461 469 462 470 // Buffer between READ fsm and TGT_RSP fsm (send a hit read response to L1 cache) 463 sc_signal<bool> r_read_to_tgt_rsp_req;// valid request464 sc_signal<size_t> r_read_to_tgt_rsp_srcid;// Transaction srcid465 sc_signal<size_t> r_read_to_tgt_rsp_trdid;// Transaction trdid466 sc_signal<size_t> r_read_to_tgt_rsp_pktid;// Transaction pktid467 sc_signal<data_t> *r_read_to_tgt_rsp_data;// data (one cache line)468 sc_signal<size_t> r_read_to_tgt_rsp_word;// first word of the response469 sc_signal<size_t> r_read_to_tgt_rsp_length;// length of the response471 sc_signal<bool> r_read_to_tgt_rsp_req; // valid request 472 sc_signal<size_t> r_read_to_tgt_rsp_srcid; // Transaction srcid 473 sc_signal<size_t> r_read_to_tgt_rsp_trdid; // Transaction trdid 474 sc_signal<size_t> r_read_to_tgt_rsp_pktid; // Transaction pktid 475 sc_signal<data_t> * r_read_to_tgt_rsp_data; // data (one cache line) 476 sc_signal<size_t> r_read_to_tgt_rsp_word; // first word of the response 477 sc_signal<size_t> r_read_to_tgt_rsp_length; // length of the response 470 478 471 479 /////////////////////////////////////////////////////////////// … … 473 481 /////////////////////////////////////////////////////////////// 474 482 475 sc_signal<int> r_write_fsm;// FSM state476 sc_signal<addr_t> r_write_address;// first word address477 sc_signal<size_t> r_write_word_index;// first word index in line478 sc_signal<size_t> r_write_word_count;// number of words in line479 sc_signal<size_t> r_write_srcid;// transaction srcid480 sc_signal<size_t> r_write_trdid;// transaction trdid481 sc_signal<size_t> r_write_pktid;// transaction pktid482 sc_signal<data_t> *r_write_data; // data (one cache line)483 sc_signal<be_t> *r_write_be;// one byte enable per word484 sc_signal<bool> r_write_byte;// (BE != 0X0) and (BE != 0xF)485 sc_signal<bool> r_write_is_cnt;// is_cnt bit (in directory)486 sc_signal<bool> r_write_lock;// lock bit (in directory)487 sc_signal<tag_t> r_write_tag;// cache line tag (in directory)488 sc_signal<size_t> r_write_copy;// first owner of the line489 sc_signal<size_t> r_write_copy_cache;// first owner of the line490 sc_signal<bool> r_write_copy_inst;// is this owner a ICache ?491 sc_signal<size_t> r_write_count;// number of copies492 sc_signal<size_t> r_write_ptr;// pointer to the heap493 sc_signal<size_t> r_write_next_ptr;// next pointer to the heap494 sc_signal<bool> r_write_to_dec;// need to decrement update counter495 sc_signal<size_t> r_write_way;// way of the line496 sc_signal<size_t> r_write_trt_index;// index in Transaction Table497 sc_signal<size_t> r_write_upt_index;// index in Update Table483 sc_signal<int> r_write_fsm; // FSM state 484 sc_signal<addr_t> r_write_address; // first word address 485 sc_signal<size_t> r_write_word_index; // first word index in line 486 sc_signal<size_t> r_write_word_count; // number of words in line 487 sc_signal<size_t> r_write_srcid; // transaction srcid 488 sc_signal<size_t> r_write_trdid; // transaction trdid 489 sc_signal<size_t> r_write_pktid; // transaction pktid 490 sc_signal<data_t> * r_write_data; // data (one cache line) 491 sc_signal<be_t> * r_write_be; // one byte enable per word 492 sc_signal<bool> r_write_byte; // (BE != 0X0) and (BE != 0xF) 493 sc_signal<bool> r_write_is_cnt; // is_cnt bit (in directory) 494 sc_signal<bool> r_write_lock; // lock bit (in directory) 495 sc_signal<tag_t> r_write_tag; // cache line tag (in directory) 496 sc_signal<size_t> r_write_copy; // first owner of the line 497 sc_signal<size_t> r_write_copy_cache; // first owner of the line 498 sc_signal<bool> r_write_copy_inst; // is this owner a ICache ? 499 sc_signal<size_t> r_write_count; // number of copies 500 sc_signal<size_t> r_write_ptr; // pointer to the heap 501 sc_signal<size_t> r_write_next_ptr; // next pointer to the heap 502 sc_signal<bool> r_write_to_dec; // need to decrement update counter 503 sc_signal<size_t> r_write_way; // way of the line 504 sc_signal<size_t> r_write_trt_index; // index in Transaction Table 505 sc_signal<size_t> r_write_upt_index; // index in Update Table 498 506 499 507 // Buffer between WRITE fsm and TGT_RSP fsm (acknowledge a write command from L1) 500 sc_signal<bool> r_write_to_tgt_rsp_req;// valid request501 sc_signal<size_t> r_write_to_tgt_rsp_srcid;// transaction srcid502 sc_signal<size_t> r_write_to_tgt_rsp_trdid;// transaction trdid503 sc_signal<size_t> r_write_to_tgt_rsp_pktid;// transaction pktid504 505 // Buffer between WRITE fsm and IXR_CMD fsm (ask a missing cache line to XRAM) 506 sc_signal<bool> r_write_to_ixr_cmd_req;// valid request507 sc_signal<bool> r_write_to_ixr_cmd_write;// write request508 sc_signal<addr_t> r_write_to_ixr_cmd_nline;// cache line index509 sc_signal<data_t> *r_write_to_ixr_cmd_data;// cache line data510 sc_signal<size_t> r_write_to_ixr_cmd_trdid;// index in Transaction Table508 sc_signal<bool> r_write_to_tgt_rsp_req; // valid request 509 sc_signal<size_t> r_write_to_tgt_rsp_srcid; // transaction srcid 510 sc_signal<size_t> r_write_to_tgt_rsp_trdid; // transaction trdid 511 sc_signal<size_t> r_write_to_tgt_rsp_pktid; // transaction pktid 512 513 // Buffer between WRITE fsm and IXR_CMD fsm (ask a missing cache line to XRAM) 514 sc_signal<bool> r_write_to_ixr_cmd_req; // valid request 515 sc_signal<bool> r_write_to_ixr_cmd_write; // write request 516 sc_signal<addr_t> r_write_to_ixr_cmd_nline; // cache line index 517 sc_signal<data_t> * r_write_to_ixr_cmd_data; // cache line data 518 sc_signal<size_t> r_write_to_ixr_cmd_trdid; // index in Transaction Table 511 519 512 520 // Buffer between WRITE fsm and INIT_CMD fsm (Update/Invalidate L1 caches) 513 sc_signal<bool> r_write_to_init_cmd_multi_req;// valid multicast request514 sc_signal<bool> r_write_to_init_cmd_brdcast_req;// valid brdcast request515 sc_signal<addr_t> r_write_to_init_cmd_nline;// cache line index516 sc_signal<size_t> r_write_to_init_cmd_trdid;// index in Update Table517 sc_signal<data_t> *r_write_to_init_cmd_data;// data (one cache line)518 sc_signal<be_t> *r_write_to_init_cmd_be;// word enable519 sc_signal<size_t> r_write_to_init_cmd_count;// number of words in line520 sc_signal<size_t> r_write_to_init_cmd_index;// index of first word in line521 GenericFifo<bool> m_write_to_init_cmd_inst_fifo;// fifo for the L1 type522 GenericFifo<size_t> m_write_to_init_cmd_srcid_fifo;// fifo for srcids523 GenericFifo<size_t> m_write_to_init_cmd_cache_id_fifo;// fifo for srcids521 sc_signal<bool> r_write_to_init_cmd_multi_req; // valid multicast request 522 sc_signal<bool> r_write_to_init_cmd_brdcast_req; // valid brdcast request 523 sc_signal<addr_t> r_write_to_init_cmd_nline; // cache line index 524 sc_signal<size_t> r_write_to_init_cmd_trdid; // index in Update Table 525 sc_signal<data_t> * r_write_to_init_cmd_data; // data (one cache line) 526 sc_signal<be_t> * r_write_to_init_cmd_be; // word enable 527 sc_signal<size_t> r_write_to_init_cmd_count; // number of words in line 528 sc_signal<size_t> r_write_to_init_cmd_index; // index of first word in line 529 GenericFifo<bool> m_write_to_init_cmd_inst_fifo; // fifo for the L1 type 530 GenericFifo<size_t> m_write_to_init_cmd_srcid_fifo; // fifo for srcids 531 GenericFifo<size_t> m_write_to_init_cmd_cache_id_fifo; // fifo for srcids 524 532 525 533 // Buffer between WRITE fsm and INIT_RSP fsm (Decrement UPT entry) 526 sc_signal<bool> r_write_to_init_rsp_req;// valid request527 sc_signal<size_t> r_write_to_init_rsp_upt_index;// index in update table534 sc_signal<bool> r_write_to_init_rsp_req; // valid request 535 sc_signal<size_t> r_write_to_init_rsp_upt_index; // index in update table 528 536 529 537 ///////////////////////////////////////////////////////// … … 531 539 ////////////////////////////////////////////////////////// 532 540 533 sc_signal<int> r_init_rsp_fsm;// FSM state534 sc_signal<size_t> r_init_rsp_upt_index;// index in the Update Table535 sc_signal<size_t> r_init_rsp_srcid; // pending write srcid536 sc_signal<size_t> r_init_rsp_trdid; // pending write trdid537 sc_signal<size_t> r_init_rsp_pktid; // pending write pktid538 sc_signal<addr_t> r_init_rsp_nline; // pending write nline541 sc_signal<int> r_init_rsp_fsm; // FSM state 542 sc_signal<size_t> r_init_rsp_upt_index; // index in the Update Table 543 sc_signal<size_t> r_init_rsp_srcid; // pending write srcid 544 sc_signal<size_t> r_init_rsp_trdid; // pending write trdid 545 sc_signal<size_t> r_init_rsp_pktid; // pending write pktid 546 sc_signal<addr_t> r_init_rsp_nline; // pending write nline 539 547 540 548 // Buffer between INIT_RSP fsm and TGT_RSP fsm (complete write/update transaction) 541 sc_signal<bool> r_init_rsp_to_tgt_rsp_req;// valid request542 sc_signal<size_t> r_init_rsp_to_tgt_rsp_srcid;// Transaction srcid543 sc_signal<size_t> r_init_rsp_to_tgt_rsp_trdid;// Transaction trdid544 sc_signal<size_t> r_init_rsp_to_tgt_rsp_pktid;// Transaction pktid549 sc_signal<bool> r_init_rsp_to_tgt_rsp_req; // valid request 550 sc_signal<size_t> r_init_rsp_to_tgt_rsp_srcid; // Transaction srcid 551 sc_signal<size_t> r_init_rsp_to_tgt_rsp_trdid; // Transaction trdid 552 sc_signal<size_t> r_init_rsp_to_tgt_rsp_pktid; // Transaction pktid 545 553 546 554 /////////////////////////////////////////////////////// … … 548 556 /////////////////////////////////////////////////////// 549 557 550 sc_signal<int> r_cleanup_fsm;// FSM state551 sc_signal<size_t> r_cleanup_srcid;// transaction srcid552 sc_signal<size_t> r_cleanup_trdid;// transaction trdid553 sc_signal<size_t> r_cleanup_pktid;// transaction pktid554 sc_signal<addr_t> r_cleanup_nline;// cache line index555 556 sc_signal<copy_t> r_cleanup_copy;// first copy557 sc_signal<copy_t> r_cleanup_copy_cache;// first copy558 sc_signal<size_t> r_cleanup_copy_inst;// type of the first copy559 sc_signal<copy_t> r_cleanup_count; // number of copies560 sc_signal<size_t> r_cleanup_ptr;// pointer to the heap561 sc_signal<size_t> r_cleanup_prev_ptr;// previous pointer to the heap562 sc_signal<size_t> r_cleanup_prev_srcid;// srcid of previous heap entry563 sc_signal<size_t> r_cleanup_prev_cache_id;// srcid of previous heap entry564 sc_signal<bool> r_cleanup_prev_inst;// inst bit of previous heap entry565 sc_signal<size_t> r_cleanup_next_ptr;// next pointer to the heap566 sc_signal<tag_t> r_cleanup_tag;// cache line tag (in directory)567 sc_signal<bool> r_cleanup_is_cnt;// inst bit (in directory)568 sc_signal<bool> r_cleanup_lock;// lock bit (in directory)569 sc_signal<bool> r_cleanup_dirty;// dirty bit (in directory)570 sc_signal<size_t> r_cleanup_way;// associative way (in cache)571 572 sc_signal<size_t> r_cleanup_write_srcid;// srcid of write response573 sc_signal<size_t> r_cleanup_write_trdid;// trdid of write rsp574 sc_signal<size_t> r_cleanup_write_pktid;// pktid of write rsp575 sc_signal<bool> r_cleanup_need_rsp;// needs a write rsp576 577 sc_signal<size_t> r_cleanup_index;// index of the INVAL line (in the UPT)558 sc_signal<int> r_cleanup_fsm; // FSM state 559 sc_signal<size_t> r_cleanup_srcid; // transaction srcid 560 sc_signal<size_t> r_cleanup_trdid; // transaction trdid 561 sc_signal<size_t> r_cleanup_pktid; // transaction pktid 562 sc_signal<addr_t> r_cleanup_nline; // cache line index 563 564 sc_signal<copy_t> r_cleanup_copy; // first copy 565 sc_signal<copy_t> r_cleanup_copy_cache; // first copy 566 sc_signal<size_t> r_cleanup_copy_inst; // type of the first copy 567 sc_signal<copy_t> r_cleanup_count; // number of copies 568 sc_signal<size_t> r_cleanup_ptr; // pointer to the heap 569 sc_signal<size_t> r_cleanup_prev_ptr; // previous pointer to the heap 570 sc_signal<size_t> r_cleanup_prev_srcid; // srcid of previous heap entry 571 sc_signal<size_t> r_cleanup_prev_cache_id; // srcid of previous heap entry 572 sc_signal<bool> r_cleanup_prev_inst; // inst bit of previous heap entry 573 sc_signal<size_t> r_cleanup_next_ptr; // next pointer to the heap 574 sc_signal<tag_t> r_cleanup_tag; // cache line tag (in directory) 575 sc_signal<bool> r_cleanup_is_cnt; // inst bit (in directory) 576 sc_signal<bool> r_cleanup_lock; // lock bit (in directory) 577 sc_signal<bool> r_cleanup_dirty; // dirty bit (in directory) 578 sc_signal<size_t> r_cleanup_way; // associative way (in cache) 579 580 sc_signal<size_t> r_cleanup_write_srcid; // srcid of write response 581 sc_signal<size_t> r_cleanup_write_trdid; // trdid of write rsp 582 sc_signal<size_t> r_cleanup_write_pktid; // pktid of write rsp 583 sc_signal<bool> r_cleanup_need_rsp; // needs a write rsp 584 585 sc_signal<size_t> r_cleanup_index; // index of the INVAL line (in the UPT) 578 586 579 587 // Buffer between CLEANUP fsm and TGT_RSP fsm (acknowledge a write command from L1) 580 sc_signal<bool> r_cleanup_to_tgt_rsp_req;// valid request581 sc_signal<size_t> r_cleanup_to_tgt_rsp_srcid;// transaction srcid582 sc_signal<size_t> r_cleanup_to_tgt_rsp_trdid;// transaction trdid583 sc_signal<size_t> r_cleanup_to_tgt_rsp_pktid;// transaction pktid588 sc_signal<bool> r_cleanup_to_tgt_rsp_req; // valid request 589 sc_signal<size_t> r_cleanup_to_tgt_rsp_srcid; // transaction srcid 590 sc_signal<size_t> r_cleanup_to_tgt_rsp_trdid; // transaction trdid 591 sc_signal<size_t> r_cleanup_to_tgt_rsp_pktid; // transaction pktid 584 592 585 593 /////////////////////////////////////////////////////// … … 587 595 /////////////////////////////////////////////////////// 588 596 589 sc_signal<int> r_sc_fsm;// FSM state590 sc_signal<data_t> r_sc_wdata;// write data word591 sc_signal<data_t> *r_sc_rdata;// read data word592 sc_signal<uint32_t> r_sc_lfsr;// lfsr for random introducing593 sc_signal<size_t> r_sc_cpt;// size of command594 sc_signal<copy_t> r_sc_copy;// Srcid of the first copy595 sc_signal<copy_t> r_sc_copy_cache;// Srcid of the first copy596 sc_signal<bool> r_sc_copy_inst;// Type of the first copy597 sc_signal<size_t> r_sc_count;// number of copies598 sc_signal<size_t> r_sc_ptr;// pointer to the heap599 sc_signal<size_t> r_sc_next_ptr;// next pointer to the heap600 sc_signal<bool> r_sc_is_cnt;// is_cnt bit (in directory)601 sc_signal<bool> r_sc_dirty;// dirty bit (in directory)602 sc_signal<size_t> r_sc_way;// way in directory603 sc_signal<size_t> r_sc_set;// set in directory604 sc_signal<data_t> r_sc_tag;// cache line tag (in directory)605 sc_signal<size_t> r_sc_trt_index;// Transaction Table index606 sc_signal<size_t> r_sc_upt_index;// Update Table index607 608 // Buffer between SC fsm and INIT_CMD fsm (XRAM read) 609 sc_signal<bool> r_sc_to_ixr_cmd_req;// valid request610 sc_signal<addr_t> r_sc_to_ixr_cmd_nline;// cache line index611 sc_signal<size_t> r_sc_to_ixr_cmd_trdid;// index in Transaction Table612 sc_signal<bool> r_sc_to_ixr_cmd_write;// write request613 sc_signal<data_t> *r_sc_to_ixr_cmd_data;// cache line data597 sc_signal<int> r_sc_fsm; // FSM state 598 sc_signal<data_t> r_sc_wdata; // write data word 599 sc_signal<data_t> * r_sc_rdata; // read data word 600 sc_signal<uint32_t> r_sc_lfsr; // lfsr for random introducing 601 sc_signal<size_t> r_sc_cpt; // size of command 602 sc_signal<copy_t> r_sc_copy; // Srcid of the first copy 603 sc_signal<copy_t> r_sc_copy_cache; // Srcid of the first copy 604 sc_signal<bool> r_sc_copy_inst; // Type of the first copy 605 sc_signal<size_t> r_sc_count; // number of copies 606 sc_signal<size_t> r_sc_ptr; // pointer to the heap 607 sc_signal<size_t> r_sc_next_ptr; // next pointer to the heap 608 sc_signal<bool> r_sc_is_cnt; // is_cnt bit (in directory) 609 sc_signal<bool> r_sc_dirty; // dirty bit (in directory) 610 sc_signal<size_t> r_sc_way; // way in directory 611 sc_signal<size_t> r_sc_set; // set in directory 612 sc_signal<data_t> r_sc_tag; // cache line tag (in directory) 613 sc_signal<size_t> r_sc_trt_index; // Transaction Table index 614 sc_signal<size_t> r_sc_upt_index; // Update Table index 615 616 // Buffer between SC fsm and INIT_CMD fsm (XRAM read) 617 sc_signal<bool> r_sc_to_ixr_cmd_req; // valid request 618 sc_signal<addr_t> r_sc_to_ixr_cmd_nline; // cache line index 619 sc_signal<size_t> r_sc_to_ixr_cmd_trdid; // index in Transaction Table 620 sc_signal<bool> r_sc_to_ixr_cmd_write; // write request 621 sc_signal<data_t> * r_sc_to_ixr_cmd_data; // cache line data 614 622 615 623 616 624 // Buffer between SC fsm and TGT_RSP fsm 617 sc_signal<bool> r_sc_to_tgt_rsp_req;// valid request618 sc_signal<data_t> r_sc_to_tgt_rsp_data;// read data word619 sc_signal<size_t> r_sc_to_tgt_rsp_srcid;// Transaction srcid620 sc_signal<size_t> r_sc_to_tgt_rsp_trdid;// Transaction trdid621 sc_signal<size_t> r_sc_to_tgt_rsp_pktid;// Transaction pktid625 sc_signal<bool> r_sc_to_tgt_rsp_req; // valid request 626 sc_signal<data_t> r_sc_to_tgt_rsp_data; // read data word 627 sc_signal<size_t> r_sc_to_tgt_rsp_srcid; // Transaction srcid 628 sc_signal<size_t> r_sc_to_tgt_rsp_trdid; // Transaction trdid 629 sc_signal<size_t> r_sc_to_tgt_rsp_pktid; // Transaction pktid 622 630 623 631 // Buffer between SC fsm and INIT_CMD fsm (Update/Invalidate L1 caches) 624 sc_signal<bool> r_sc_to_init_cmd_multi_req;// valid request625 sc_signal<bool> r_sc_to_init_cmd_brdcast_req;// brdcast request626 sc_signal<addr_t> r_sc_to_init_cmd_nline;// cache line index627 sc_signal<size_t> r_sc_to_init_cmd_trdid;// index in Update Table628 sc_signal<data_t> r_sc_to_init_cmd_wdata;// data (one word)629 sc_signal<bool> r_sc_to_init_cmd_is_long;// it is a 64 bits SC630 sc_signal<data_t> r_sc_to_init_cmd_wdata_high;// data high (one word)631 sc_signal<size_t> r_sc_to_init_cmd_index;// index of the word in line632 GenericFifo<bool> m_sc_to_init_cmd_inst_fifo;// fifo for the L1 type633 GenericFifo<size_t> m_sc_to_init_cmd_srcid_fifo;// fifo for srcids634 GenericFifo<size_t> m_sc_to_init_cmd_cache_id_fifo;// fifo for srcids632 sc_signal<bool> r_sc_to_init_cmd_multi_req; // valid request 633 sc_signal<bool> r_sc_to_init_cmd_brdcast_req; // brdcast request 634 sc_signal<addr_t> r_sc_to_init_cmd_nline; // cache line index 635 sc_signal<size_t> r_sc_to_init_cmd_trdid; // index in Update Table 636 sc_signal<data_t> r_sc_to_init_cmd_wdata; // data (one word) 637 sc_signal<bool> r_sc_to_init_cmd_is_long; // it is a 64 bits SC 638 sc_signal<data_t> r_sc_to_init_cmd_wdata_high; // data high (one word) 639 sc_signal<size_t> r_sc_to_init_cmd_index; // index of the word in line 640 GenericFifo<bool> m_sc_to_init_cmd_inst_fifo; // fifo for the L1 type 641 GenericFifo<size_t> m_sc_to_init_cmd_srcid_fifo; // fifo for srcids 642 GenericFifo<size_t> m_sc_to_init_cmd_cache_id_fifo; // fifo for srcids 635 643 636 644 // Buffer between SC fsm and INIT_RSP fsm (Decrement UPT entry) 637 sc_signal<bool> r_sc_to_init_rsp_req;// valid request638 sc_signal<size_t> r_sc_to_init_rsp_upt_index;// index in update table645 sc_signal<bool> r_sc_to_init_rsp_req; // valid request 646 sc_signal<size_t> r_sc_to_init_rsp_upt_index; // index in update table 639 647 640 648 //////////////////////////////////////////////////// … … 642 650 //////////////////////////////////////////////////// 643 651 644 sc_signal<int> 645 sc_signal<size_t> r_ixr_rsp_trt_index;// TRT entry index646 sc_signal<size_t> r_ixr_rsp_cpt;// word counter652 sc_signal<int> r_ixr_rsp_fsm; // FSM state 653 sc_signal<size_t> r_ixr_rsp_trt_index; // TRT entry index 654 sc_signal<size_t> r_ixr_rsp_cpt; // word counter 647 655 648 656 // Buffer between IXR_RSP fsm and XRAM_RSP fsm (response from the XRAM) 649 sc_signal<bool> *r_ixr_rsp_to_xram_rsp_rok;// A xram response is ready657 sc_signal<bool> * r_ixr_rsp_to_xram_rsp_rok; // A xram response is ready 650 658 651 659 //////////////////////////////////////////////////// … … 653 661 //////////////////////////////////////////////////// 654 662 655 sc_signal<int> r_xram_rsp_fsm;// FSM state656 sc_signal<size_t> r_xram_rsp_trt_index;// TRT entry index657 TransactionTabEntry r_xram_rsp_trt_buf;// TRT entry local buffer658 sc_signal<bool> r_xram_rsp_victim_inval; // victim line invalidate659 sc_signal<bool> r_xram_rsp_victim_is_cnt;// victim line inst bit660 sc_signal<bool> r_xram_rsp_victim_dirty;// victim line dirty bit661 sc_signal<size_t> r_xram_rsp_victim_way;// victim line way662 sc_signal<size_t> r_xram_rsp_victim_set;// victim line set663 sc_signal<addr_t> r_xram_rsp_victim_nline;// victim line index664 sc_signal<copy_t> r_xram_rsp_victim_copy;// victim line first copy665 sc_signal<copy_t> r_xram_rsp_victim_copy_cache;// victim line first copy666 sc_signal<bool> r_xram_rsp_victim_copy_inst;// victim line type of first copy667 sc_signal<size_t> r_xram_rsp_victim_count;// victim line number of copies668 sc_signal<size_t> r_xram_rsp_victim_ptr;// victim line pointer to the heap669 sc_signal<data_t> *r_xram_rsp_victim_data;// victim line data670 sc_signal<size_t> r_xram_rsp_upt_index;// UPT entry index671 sc_signal<size_t> r_xram_rsp_next_ptr;// Next pointer to the heap663 sc_signal<int> r_xram_rsp_fsm; // FSM state 664 sc_signal<size_t> r_xram_rsp_trt_index; // TRT entry index 665 TransactionTabEntry r_xram_rsp_trt_buf; // TRT entry local buffer 666 sc_signal<bool> r_xram_rsp_victim_inval; // victim line invalidate 667 sc_signal<bool> r_xram_rsp_victim_is_cnt; // victim line inst bit 668 sc_signal<bool> r_xram_rsp_victim_dirty; // victim line dirty bit 669 sc_signal<size_t> r_xram_rsp_victim_way; // victim line way 670 sc_signal<size_t> r_xram_rsp_victim_set; // victim line set 671 sc_signal<addr_t> r_xram_rsp_victim_nline; // victim line index 672 sc_signal<copy_t> r_xram_rsp_victim_copy; // victim line first copy 673 sc_signal<copy_t> r_xram_rsp_victim_copy_cache; // victim line first copy 674 sc_signal<bool> r_xram_rsp_victim_copy_inst; // victim line type of first copy 675 sc_signal<size_t> r_xram_rsp_victim_count; // victim line number of copies 676 sc_signal<size_t> r_xram_rsp_victim_ptr; // victim line pointer to the heap 677 sc_signal<data_t> * r_xram_rsp_victim_data; // victim line data 678 sc_signal<size_t> r_xram_rsp_upt_index; // UPT entry index 679 sc_signal<size_t> r_xram_rsp_next_ptr; // Next pointer to the heap 672 680 673 681 // Buffer between XRAM_RSP fsm and TGT_RSP fsm (response to L1 cache) 674 sc_signal<bool> r_xram_rsp_to_tgt_rsp_req;// Valid request675 sc_signal<size_t> r_xram_rsp_to_tgt_rsp_srcid;// Transaction srcid676 sc_signal<size_t> r_xram_rsp_to_tgt_rsp_trdid;// Transaction trdid677 sc_signal<size_t> r_xram_rsp_to_tgt_rsp_pktid;// Transaction pktid678 sc_signal<data_t> *r_xram_rsp_to_tgt_rsp_data;// data (one cache line)679 sc_signal<size_t> r_xram_rsp_to_tgt_rsp_word;// first word index680 sc_signal<size_t> r_xram_rsp_to_tgt_rsp_length;// length of the response681 sc_signal<bool> r_xram_rsp_to_tgt_rsp_rerror;// send error to requester682 683 // Buffer between XRAM_RSP fsm and INIT_CMD fsm (Inval L1 Caches) 684 sc_signal<bool> r_xram_rsp_to_init_cmd_multi_req;// Valid request685 sc_signal<bool> r_xram_rsp_to_init_cmd_brdcast_req;// Broadcast request686 sc_signal<addr_t> r_xram_rsp_to_init_cmd_nline;// cache line index;687 sc_signal<size_t> r_xram_rsp_to_init_cmd_trdid;// index of UPT entry688 GenericFifo<bool> m_xram_rsp_to_init_cmd_inst_fifo;// fifo for the L1 type689 GenericFifo<size_t> m_xram_rsp_to_init_cmd_srcid_fifo;// fifo for srcids690 GenericFifo<size_t> m_xram_rsp_to_init_cmd_cache_id_fifo;// fifo for srcids682 sc_signal<bool> r_xram_rsp_to_tgt_rsp_req; // Valid request 683 sc_signal<size_t> r_xram_rsp_to_tgt_rsp_srcid; // Transaction srcid 684 sc_signal<size_t> r_xram_rsp_to_tgt_rsp_trdid; // Transaction trdid 685 sc_signal<size_t> r_xram_rsp_to_tgt_rsp_pktid; // Transaction pktid 686 sc_signal<data_t> * r_xram_rsp_to_tgt_rsp_data; // data (one cache line) 687 sc_signal<size_t> r_xram_rsp_to_tgt_rsp_word; // first word index 688 sc_signal<size_t> r_xram_rsp_to_tgt_rsp_length; // length of the response 689 sc_signal<bool> r_xram_rsp_to_tgt_rsp_rerror; // send error to requester 690 691 // Buffer between XRAM_RSP fsm and INIT_CMD fsm (Inval L1 Caches) 692 sc_signal<bool> r_xram_rsp_to_init_cmd_multi_req; // Valid request 693 sc_signal<bool> r_xram_rsp_to_init_cmd_brdcast_req; // Broadcast request 694 sc_signal<addr_t> r_xram_rsp_to_init_cmd_nline; // cache line index; 695 sc_signal<size_t> r_xram_rsp_to_init_cmd_trdid; // index of UPT entry 696 GenericFifo<bool> m_xram_rsp_to_init_cmd_inst_fifo; // fifo for the L1 type 697 GenericFifo<size_t> m_xram_rsp_to_init_cmd_srcid_fifo; // fifo for srcids 698 GenericFifo<size_t> m_xram_rsp_to_init_cmd_cache_id_fifo; // fifo for srcids 691 699 692 700 // Buffer between XRAM_RSP fsm and IXR_CMD fsm (XRAM write) 693 sc_signal<bool> r_xram_rsp_to_ixr_cmd_req;// Valid request694 sc_signal<addr_t> r_xram_rsp_to_ixr_cmd_nline;// cache line index695 sc_signal<data_t> *r_xram_rsp_to_ixr_cmd_data;// cache line data696 sc_signal<size_t> r_xram_rsp_to_ixr_cmd_trdid;// index in transaction table701 sc_signal<bool> r_xram_rsp_to_ixr_cmd_req; // Valid request 702 sc_signal<addr_t> r_xram_rsp_to_ixr_cmd_nline; // cache line index 703 sc_signal<data_t> * r_xram_rsp_to_ixr_cmd_data; // cache line data 704 sc_signal<size_t> r_xram_rsp_to_ixr_cmd_trdid; // index in transaction table 697 705 698 706 //////////////////////////////////////////////////// … … 700 708 //////////////////////////////////////////////////// 701 709 702 sc_signal<int> 703 sc_signal<size_t> 710 sc_signal<int> r_ixr_cmd_fsm; 711 sc_signal<size_t> r_ixr_cmd_cpt; 704 712 705 713 //////////////////////////////////////////////////// … … 707 715 //////////////////////////////////////////////////// 708 716 709 sc_signal<int> 710 sc_signal<size_t> 717 sc_signal<int> r_tgt_rsp_fsm; 718 sc_signal<size_t> r_tgt_rsp_cpt; 711 719 712 720 //////////////////////////////////////////////////// … … 714 722 //////////////////////////////////////////////////// 715 723 716 sc_signal<int> 724 sc_signal<int> r_init_cmd_fsm; 717 725 sc_signal<size_t> r_init_cmd_cpt; 718 726 sc_signal<bool> r_init_cmd_inst; … … 722 730 //////////////////////////////////////////////////// 723 731 724 sc_signal<int> r_alloc_dir_fsm; 732 sc_signal<int> r_alloc_dir_fsm; 733 sc_signal<unsigned> r_alloc_dir_reset_cpt; 725 734 726 735 //////////////////////////////////////////////////// … … 728 737 //////////////////////////////////////////////////// 729 738 730 sc_signal<int> 739 sc_signal<int> r_alloc_trt_fsm; 731 740 732 741 //////////////////////////////////////////////////// … … 734 743 //////////////////////////////////////////////////// 735 744 736 sc_signal<int> 745 sc_signal<int> r_alloc_upt_fsm; 737 746 738 747 //////////////////////////////////////////////////// … … 740 749 //////////////////////////////////////////////////// 741 750 742 sc_signal<int> 743 751 sc_signal<int> r_alloc_heap_fsm; 752 sc_signal<unsigned> r_alloc_heap_reset_cpt; 744 753 }; // end class VciMemCacheV4 745 754 … … 755 764 // End: 756 765 757 // vim: filetype=cpp:expandtab:shiftwidth= 4:tabstop=4:softtabstop=4758 766 // vim: filetype=cpp:expandtab:shiftwidth=2:tabstop=2:softtabstop=2 767 -
trunk/modules/vci_mem_cache_v4/caba/source/src/vci_mem_cache_v4.cpp
r253 r273 1 1 /* -*- c++ -*- 2 * File 3 * Date 4 * Copyright 5 * Authors 2 * File : vci_mem_cache_v4.cpp 3 * Date : 30/10/2008 4 * Copyright : UPMC / LIP6 5 * Authors : Alain Greiner / Eric Guthmuller 6 6 * 7 7 * SOCLIB_LGPL_HEADER_BEGIN … … 26 26 * 27 27 * Maintainers: alain eric.guthmuller@polytechnique.edu 28 * cesar.fuguet-tortolero@lip6.fr 28 29 */ 29 30 … … 32 33 ////// debug services /////////////////////////////////////////////////////// 33 34 // All debug messages are conditionned by two variables: 34 // - compile time : DEBUG_MEMC_*** : defined below35 // - execution time : m_debug_*** : defined by constructor arguments35 // - compile time : DEBUG_MEMC_*** : defined below 36 // - execution time : m_debug_*** : defined by constructor arguments 36 37 // m_debug_* = (m_debug_ok) and (m_cpt_cycle > m_debug_start_cycle) 37 38 ///////////////////////////////////////////////////////////////////////////////// 38 39 39 #define DEBUG_MEMC_GLOBAL 0// synthetic trace of all FSMs40 #define DEBUG_MEMC_READ 1// detailed trace of READ FSM41 #define DEBUG_MEMC_WRITE 1 // detailed trace of WRITE FSM42 #define DEBUG_MEMC_SC 1 // detailed trace of SC FSM43 #define DEBUG_MEMC_IXR_CMD 1// detailed trace of IXR_RSP FSM44 #define DEBUG_MEMC_IXR_RSP 1// detailed trace of IXR_RSP FSM45 #define DEBUG_MEMC_XRAM_RSP 1 // detailed trace of XRAM_RSP FSM46 #define DEBUG_MEMC_INIT_CMD 1 // detailed trace of INIT_CMD FSM47 #define DEBUG_MEMC_INIT_RSP 1 // detailed trace of INIT_RSP FSM48 #define DEBUG_MEMC_TGT_CMD 1 // detailed trace of TGT_CMD FSM49 #define DEBUG_MEMC_TGT_RSP 1 // detailed trace of TGT_RSP FSM50 #define DEBUG_MEMC_CLEANUP 1 // detailed trace of CLEANUP FSM51 52 #define RANDOMIZE_SC 40 #define DEBUG_MEMC_GLOBAL 0 // synthetic trace of all FSMs 41 #define DEBUG_MEMC_READ 1 // detailed trace of READ FSM 42 #define DEBUG_MEMC_WRITE 1 // detailed trace of WRITE FSM 43 #define DEBUG_MEMC_SC 1 // detailed trace of SC FSM 44 #define DEBUG_MEMC_IXR_CMD 1 // detailed trace of IXR_RSP FSM 45 #define DEBUG_MEMC_IXR_RSP 1 // detailed trace of IXR_RSP FSM 46 #define DEBUG_MEMC_XRAM_RSP 1 // detailed trace of XRAM_RSP FSM 47 #define DEBUG_MEMC_INIT_CMD 1 // detailed trace of INIT_CMD FSM 48 #define DEBUG_MEMC_INIT_RSP 1 // detailed trace of INIT_RSP FSM 49 #define DEBUG_MEMC_TGT_CMD 1 // detailed trace of TGT_CMD FSM 50 #define DEBUG_MEMC_TGT_RSP 1 // detailed trace of TGT_RSP FSM 51 #define DEBUG_MEMC_CLEANUP 1 // detailed trace of CLEANUP FSM 52 53 #define RANDOMIZE_SC 1 53 54 54 55 namespace soclib { namespace caba { … … 58 59 "TGT_CMD_READ", 59 60 "TGT_CMD_WRITE", 60 "TGT_CMD_ATOMIC" ,61 "TGT_CMD_ATOMIC" 61 62 }; 62 63 const char *tgt_rsp_fsm_str[] = { … … 72 73 "TGT_RSP_XRAM", 73 74 "TGT_RSP_INIT", 74 "TGT_RSP_CLEANUP" ,75 "TGT_RSP_CLEANUP" 75 76 }; 76 77 const char *init_cmd_fsm_str[] = { … … 88 89 "INIT_CMD_SC_UPDT_INDEX", 89 90 "INIT_CMD_SC_UPDT_DATA", 90 "INIT_CMD_SC_UPDT_DATA_HIGH" ,91 "INIT_CMD_SC_UPDT_DATA_HIGH" 91 92 }; 92 93 const char *init_rsp_fsm_str[] = { … … 94 95 "INIT_RSP_UPT_LOCK", 95 96 "INIT_RSP_UPT_CLEAR", 96 "INIT_RSP_END" ,97 "INIT_RSP_END" 97 98 }; 98 99 const char *read_fsm_str[] = { 99 100 "READ_IDLE", 101 "READ_DIR_REQ", 100 102 "READ_DIR_LOCK", 101 103 "READ_DIR_HIT", 104 "READ_HEAP_REQ", 102 105 "READ_HEAP_LOCK", 103 106 "READ_HEAP_WRITE", … … 107 110 "READ_TRT_LOCK", 108 111 "READ_TRT_SET", 109 "READ_TRT_REQ" ,112 "READ_TRT_REQ" 110 113 }; 111 114 const char *write_fsm_str[] = { 112 115 "WRITE_IDLE", 113 116 "WRITE_NEXT", 117 "WRITE_DIR_REQ", 114 118 "WRITE_DIR_LOCK", 115 119 "WRITE_DIR_READ", … … 130 134 "WRITE_BC_CC_SEND", 131 135 "WRITE_BC_XRAM_REQ", 132 "WRITE_WAIT" ,136 "WRITE_WAIT" 133 137 }; 134 138 const char *ixr_rsp_fsm_str[] = { … … 136 140 "IXR_RSP_ACK", 137 141 "IXR_RSP_TRT_ERASE", 138 "IXR_RSP_TRT_READ" ,142 "IXR_RSP_TRT_READ" 139 143 }; 140 144 const char *xram_rsp_fsm_str[] = { … … 149 153 "XRAM_RSP_INVAL", 150 154 "XRAM_RSP_WRITE_DIRTY", 155 "XRAM_RSP_HEAP_REQ", 151 156 "XRAM_RSP_HEAP_ERASE", 152 157 "XRAM_RSP_HEAP_LAST", 153 158 "XRAM_RSP_ERROR_ERASE", 154 "XRAM_RSP_ERROR_RSP" ,159 "XRAM_RSP_ERROR_RSP" 155 160 }; 156 161 const char *ixr_cmd_fsm_str[] = { … … 162 167 "IXR_CMD_WRITE_NLINE", 163 168 "IXR_CMD_SC_NLINE", 164 "IXR_CMD_XRAM_DATA" ,169 "IXR_CMD_XRAM_DATA" 165 170 }; 166 171 const char *sc_fsm_str[] = { 167 172 "SC_IDLE", 173 "SC_DIR_REQ", 168 174 "SC_DIR_LOCK", 169 175 "SC_DIR_HIT_READ", … … 183 189 "SC_MISS_TRT_SET", 184 190 "SC_MISS_XRAM_REQ", 185 "SC_WAIT" ,191 "SC_WAIT" 186 192 }; 187 193 const char *cleanup_fsm_str[] = { 188 194 "CLEANUP_IDLE", 195 "CLEANUP_DIR_REQ", 189 196 "CLEANUP_DIR_LOCK", 190 197 "CLEANUP_DIR_WRITE", 198 "CLEANUP_HEAP_REQ", 191 199 "CLEANUP_HEAP_LOCK", 192 200 "CLEANUP_HEAP_SEARCH", … … 196 204 "CLEANUP_UPT_WRITE", 197 205 "CLEANUP_WRITE_RSP", 198 "CLEANUP_RSP" ,206 "CLEANUP_RSP" 199 207 }; 200 208 const char *alloc_dir_fsm_str[] = { 209 "ALLOC_DIR_RESET", 201 210 "ALLOC_DIR_READ", 202 211 "ALLOC_DIR_WRITE", 203 212 "ALLOC_DIR_SC", 204 213 "ALLOC_DIR_CLEANUP", 205 "ALLOC_DIR_XRAM_RSP" ,214 "ALLOC_DIR_XRAM_RSP" 206 215 }; 207 216 const char *alloc_trt_fsm_str[] = { … … 210 219 "ALLOC_TRT_SC", 211 220 "ALLOC_TRT_XRAM_RSP", 212 "ALLOC_TRT_IXR_RSP" ,221 "ALLOC_TRT_IXR_RSP" 213 222 }; 214 223 const char *alloc_upt_fsm_str[] = { … … 217 226 "ALLOC_UPT_INIT_RSP", 218 227 "ALLOC_UPT_CLEANUP", 228 "ALLOC_UPT_SC" 219 229 }; 220 230 const char *alloc_heap_fsm_str[] = { 231 "ALLOC_HEAP_RESET", 221 232 "ALLOC_HEAP_READ", 222 233 "ALLOC_HEAP_WRITE", 223 234 "ALLOC_HEAP_SC", 224 235 "ALLOC_HEAP_CLEANUP", 225 "ALLOC_HEAP_XRAM_RSP" ,236 "ALLOC_HEAP_XRAM_RSP" 226 237 }; 227 238 … … 231 242 232 243 //////////////////////////////// 233 // Constructor244 // Constructor 234 245 //////////////////////////////// 235 246 236 tmpl(/**/)::VciMemCacheV4( 247 tmpl(/**/)::VciMemCacheV4( 237 248 sc_module_name name, 238 249 const soclib::common::MappingTable &mtp, … … 243 254 const soclib::common::IntTab &vci_tgt_index, 244 255 const soclib::common::IntTab &vci_tgt_index_cleanup, 245 size_t nways, 246 size_t nsets, 247 size_t nwords, 248 size_t heap_size, 249 size_t transaction_tab_lines, 250 size_t update_tab_lines, 256 size_t nways, // number of ways per set 257 size_t nsets, // number of cache sets 258 size_t nwords, // number of words in cache line 259 size_t heap_size, // number of heap entries 260 size_t transaction_tab_lines, // number of TRT entries 261 size_t update_tab_lines, // number of UPT entries 251 262 size_t debug_start_cycle, 252 263 bool debug_ok) … … 287 298 #undef L2 288 299 289 // FIFOs 300 // FIFOs 290 301 291 302 m_cmd_read_addr_fifo("m_cmd_read_addr_fifo", 4), … … 311 322 312 323 r_tgt_cmd_fsm("r_tgt_cmd_fsm"), 313 314 m_nseg(0), 315 m_ncseg(0), 324 325 m_nseg(0), 326 m_ncseg(0), 316 327 317 328 r_read_fsm("r_read_fsm"), … … 352 363 353 364 r_alloc_dir_fsm("r_alloc_dir_fsm"), 365 r_alloc_dir_reset_cpt("r_alloc_dir_reset_cpt"), 354 366 r_alloc_trt_fsm("r_alloc_trt_fsm"), 355 367 r_alloc_upt_fsm("r_alloc_upt_fsm"), 356 r_alloc_heap_fsm("r_alloc_heap_fsm") 368 r_alloc_heap_fsm("r_alloc_heap_fsm"), 369 r_alloc_heap_reset_cpt("r_alloc_heap_reset_cpt") 357 370 { 358 371 assert(IS_POW_OF_2(nsets)); … … 370 383 m_broadcast_address = 0x3 | (0x7C1F << (vci_param::N-20)); 371 384 372 // Get the segments associated to the MemCache 385 // Get the segments associated to the MemCache 373 386 std::list<soclib::common::Segment>::iterator seg; 374 387 size_t i; … … 384 397 385 398 i = 0; 386 for ( seg = m_seglist.begin() ; seg != m_seglist.end() ; seg++ ) { 399 for ( seg = m_seglist.begin() ; seg != m_seglist.end() ; seg++ ) { 387 400 m_seg[i] = &(*seg); 388 401 i++; … … 392 405 393 406 i = 0; 394 for ( seg = m_cseglist.begin() ; seg != m_cseglist.end() ; seg++ ) { 407 for ( seg = m_cseglist.begin() ; seg != m_cseglist.end() ; seg++ ) { 395 408 m_cseg[i] = &(*seg); 396 409 i++; … … 407 420 for ( size_t k=0; k<nwords; k++){ 408 421 m_cache_data[i][j][k]=0; 409 } 422 } 410 423 } 411 424 } 412 425 413 426 // Allocation for IXR_RSP FSM 414 r_ixr_rsp_to_xram_rsp_rok 427 r_ixr_rsp_to_xram_rsp_rok = new sc_signal<bool>[m_transaction_tab_lines]; 415 428 416 429 // Allocation for XRAM_RSP FSM 417 r_xram_rsp_victim_data 418 r_xram_rsp_to_tgt_rsp_data 419 r_xram_rsp_to_ixr_cmd_data 430 r_xram_rsp_victim_data = new sc_signal<data_t>[nwords]; 431 r_xram_rsp_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 432 r_xram_rsp_to_ixr_cmd_data = new sc_signal<data_t>[nwords]; 420 433 421 434 // Allocation for READ FSM 422 r_read_data 423 r_read_to_tgt_rsp_data 435 r_read_data = new sc_signal<data_t>[nwords]; 436 r_read_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 424 437 425 438 // Allocation for WRITE FSM 426 r_write_data 427 r_write_be 428 r_write_to_init_cmd_data 429 r_write_to_init_cmd_be 430 r_write_to_ixr_cmd_data 439 r_write_data = new sc_signal<data_t>[nwords]; 440 r_write_be = new sc_signal<be_t>[nwords]; 441 r_write_to_init_cmd_data = new sc_signal<data_t>[nwords]; 442 r_write_to_init_cmd_be = new sc_signal<be_t>[nwords]; 443 r_write_to_ixr_cmd_data = new sc_signal<data_t>[nwords]; 431 444 432 445 // Allocation for SC FSM 433 r_sc_to_ixr_cmd_data 434 r_sc_rdata 446 r_sc_to_ixr_cmd_data = new sc_signal<data_t>[nwords]; 447 r_sc_rdata = new sc_signal<data_t>[2]; 435 448 436 449 … … 471 484 { 472 485 std::cout << " MEMC Write Monitor : " << buf << " Address = " << std::hex << addr 473 << " / Data = " << data << std::endl; 486 << " / Data = " << data << std::endl; 474 487 } 475 488 } … … 480 493 { 481 494 DirectoryEntry entry = m_cache_directory.read_neutral(addr); 482 if ( (entry.count != m_debug_previous_count) or 495 if ( (entry.count != m_debug_previous_count) or 483 496 (entry.valid != m_debug_previous_hit) ) 484 497 { … … 498 511 { 499 512 std::cout << "MEMC " << name() << std::endl; 500 std::cout << " " << tgt_cmd_fsm_str[r_tgt_cmd_fsm] 501 << " | " << tgt_rsp_fsm_str[r_tgt_rsp_fsm] 513 std::cout << " " << tgt_cmd_fsm_str[r_tgt_cmd_fsm] 514 << " | " << tgt_rsp_fsm_str[r_tgt_rsp_fsm] 502 515 << " | " << read_fsm_str[r_read_fsm] 503 << " | " << write_fsm_str[r_write_fsm] 504 << " | " << sc_fsm_str[r_sc_fsm] 516 << " | " << write_fsm_str[r_write_fsm] 517 << " | " << sc_fsm_str[r_sc_fsm] 505 518 << " | " << cleanup_fsm_str[r_cleanup_fsm] << std::endl; 506 519 std::cout << " " << init_cmd_fsm_str[r_init_cmd_fsm] 507 << " | " << init_rsp_fsm_str[r_init_rsp_fsm] 508 << " | " << ixr_cmd_fsm_str[r_ixr_cmd_fsm] 520 << " | " << init_rsp_fsm_str[r_init_rsp_fsm] 521 << " | " << ixr_cmd_fsm_str[r_ixr_cmd_fsm] 509 522 << " | " << ixr_rsp_fsm_str[r_ixr_rsp_fsm] 510 523 << " | " << xram_rsp_fsm_str[r_xram_rsp_fsm] << std::endl; … … 515 528 ///////////////////////////////////////// 516 529 { 517 std::cout << "----------------------------------" << std::dec << std::endl; 518 std::cout << "MEM_CACHE " << m_srcid_ini << " / Time = " << m_cpt_cycles << std::endl 519 << "- READ RATE = " << (double)m_cpt_read/m_cpt_cycles << std::endl 520 << "- READ TOTAL = " << m_cpt_read << std::endl 521 << "- READ MISS RATE = " << (double)m_cpt_read_miss/m_cpt_read << std::endl 522 << "- WRITE RATE = " << (double)m_cpt_write/m_cpt_cycles << std::endl 523 << "- WRITE TOTAL = " << m_cpt_write << std::endl 524 << "- WRITE MISS RATE = " << (double)m_cpt_write_miss/m_cpt_write << std::endl 525 << "- WRITE BURST LENGTH = " << (double)m_cpt_write_cells/m_cpt_write << std::endl 526 << "- WRITE BURST TOTAL = " << m_cpt_write_cells << std::endl 527 << "- REQUESTS TRT FULL = " << m_cpt_trt_full << std::endl 528 << "- READ TRT BLOKED HIT = " << m_cpt_trt_rb << std::endl 529 << "- UPDATE RATE = " << (double)m_cpt_update/m_cpt_cycles << std::endl 530 << "- UPDATE ARITY = " << (double)m_cpt_update_mult/m_cpt_update << std::endl 531 << "- INVAL MULTICAST RATE = " << (double)(m_cpt_inval-m_cpt_inval_brdcast)/m_cpt_cycles << std::endl 532 << "- INVAL MULTICAST ARITY= " << (double)m_cpt_inval_mult/(m_cpt_inval-m_cpt_inval_brdcast) << std::endl 533 << "- INVAL BROADCAST RATE = " << (double)m_cpt_inval_brdcast/m_cpt_cycles << std::endl 534 << "- SAVE DIRTY RATE = " << (double)m_cpt_write_dirty/m_cpt_cycles << std::endl 535 << "- CLEANUP RATE = " << (double)m_cpt_cleanup/m_cpt_cycles << std::endl 536 << "- LL RATE = " << (double)m_cpt_ll/m_cpt_cycles << std::endl 537 << "- SC RATE = " << (double)m_cpt_sc/m_cpt_cycles << std::endl; 530 std::cout << "----------------------------------" << std::dec << std::endl; 531 std::cout 532 << "MEM_CACHE " << m_srcid_ini << " / Time = " << m_cpt_cycles << std::endl 533 << "- READ RATE = " << (double) m_cpt_read/m_cpt_cycles << std::endl 534 << "- READ TOTAL = " << m_cpt_read << std::endl 535 << "- READ MISS RATE = " << (double) m_cpt_read_miss/m_cpt_read << std::endl 536 << "- WRITE RATE = " << (double) m_cpt_write/m_cpt_cycles << std::endl 537 << "- WRITE TOTAL = " << m_cpt_write << std::endl 538 << "- WRITE MISS RATE = " << (double) m_cpt_write_miss/m_cpt_write << std::endl 539 << "- WRITE BURST LENGTH = " << (double) m_cpt_write_cells/m_cpt_write << std::endl 540 << "- WRITE BURST TOTAL = " << m_cpt_write_cells << std::endl 541 << "- REQUESTS TRT FULL = " << m_cpt_trt_full << std::endl 542 << "- READ TRT BLOKED HIT = " << m_cpt_trt_rb << std::endl 543 << "- UPDATE RATE = " << (double) m_cpt_update/m_cpt_cycles << std::endl 544 << "- UPDATE ARITY = " << (double) m_cpt_update_mult/m_cpt_update << std::endl 545 << "- INVAL MULTICAST RATE = " << (double) (m_cpt_inval-m_cpt_inval_brdcast)/m_cpt_cycles << std::endl 546 << "- INVAL MULTICAST ARITY= " << (double) m_cpt_inval_mult/(m_cpt_inval-m_cpt_inval_brdcast) << std::endl 547 << "- INVAL BROADCAST RATE = " << (double) m_cpt_inval_brdcast/m_cpt_cycles << std::endl 548 << "- SAVE DIRTY RATE = " << (double) m_cpt_write_dirty/m_cpt_cycles << std::endl 549 << "- CLEANUP RATE = " << (double) m_cpt_cleanup/m_cpt_cycles << std::endl 550 << "- LL RATE = " << (double) m_cpt_ll/m_cpt_cycles << std::endl 551 << "- SC RATE = " << (double) m_cpt_sc/m_cpt_cycles << std::endl; 538 552 } 539 553 … … 570 584 ////////////////////////////////// 571 585 { 572 573 574 // RESET575 576 577 //Initializing FSMs578 r_tgt_cmd_fsm= TGT_CMD_IDLE;579 r_tgt_rsp_fsm= TGT_RSP_READ_IDLE;580 r_init_cmd_fsm= INIT_CMD_INVAL_IDLE;581 r_init_rsp_fsm= INIT_RSP_IDLE;582 r_read_fsm= READ_IDLE;583 r_write_fsm= WRITE_IDLE;584 r_sc_fsm= SC_IDLE;585 r_cleanup_fsm= CLEANUP_IDLE;586 r_alloc_dir_fsm = ALLOC_DIR_READ;587 r_alloc_heap_fsm = ALLOC_HEAP_READ;588 r_alloc_trt_fsm= ALLOC_TRT_READ;589 r_alloc_upt_fsm= ALLOC_UPT_WRITE;590 r_ixr_rsp_fsm= IXR_RSP_IDLE;591 r_xram_rsp_fsm= XRAM_RSP_IDLE;592 r_ixr_cmd_fsm= IXR_CMD_READ_IDLE;593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 m_cache_directory.init();611 m_transaction_tab.init();612 m_heap.init(); 613 614 // initializing FIFOs and communication Buffers 615 616 m_cmd_read_addr_fifo.init();617 m_cmd_read_length_fifo.init();618 m_cmd_read_srcid_fifo.init();619 m_cmd_read_trdid_fifo.init();620 m_cmd_read_pktid_fifo.init(); 621 622 m_cmd_write_addr_fifo.init();623 m_cmd_write_eop_fifo.init();624 m_cmd_write_srcid_fifo.init();625 m_cmd_write_trdid_fifo.init();626 m_cmd_write_pktid_fifo.init();627 m_cmd_write_data_fifo.init(); 628 629 m_cmd_sc_addr_fifo.init();630 m_cmd_sc_srcid_fifo.init();631 m_cmd_sc_trdid_fifo.init();632 m_cmd_sc_pktid_fifo.init();633 m_cmd_sc_wdata_fifo.init();634 m_cmd_sc_eop_fifo.init(); 635 636 r_read_to_tgt_rsp_req= false;637 r_read_to_ixr_cmd_req = false; 638 639 r_write_to_tgt_rsp_req= false;640 r_write_to_ixr_cmd_req= false;641 r_write_to_init_cmd_multi_req= false;642 r_write_to_init_cmd_brdcast_req= false;643 r_write_to_init_rsp_req = false; 644 645 586 using soclib::common::uint32_log2; 587 588 // RESET 589 if ( ! p_resetn.read() ) { 590 591 // Initializing FSMs 592 r_tgt_cmd_fsm = TGT_CMD_IDLE; 593 r_tgt_rsp_fsm = TGT_RSP_READ_IDLE; 594 r_init_cmd_fsm = INIT_CMD_INVAL_IDLE; 595 r_init_rsp_fsm = INIT_RSP_IDLE; 596 r_read_fsm = READ_IDLE; 597 r_write_fsm = WRITE_IDLE; 598 r_sc_fsm = SC_IDLE; 599 r_cleanup_fsm = CLEANUP_IDLE; 600 r_alloc_dir_fsm = ALLOC_DIR_RESET; 601 r_alloc_heap_fsm = ALLOC_HEAP_RESET; 602 r_alloc_trt_fsm = ALLOC_TRT_READ; 603 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 604 r_ixr_rsp_fsm = IXR_RSP_IDLE; 605 r_xram_rsp_fsm = XRAM_RSP_IDLE; 606 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 607 608 m_debug_global = false; 609 m_debug_tgt_cmd_fsm = false; 610 m_debug_tgt_rsp_fsm = false; 611 m_debug_init_cmd_fsm = false; 612 m_debug_init_rsp_fsm = false; 613 m_debug_read_fsm = false; 614 m_debug_write_fsm = false; 615 m_debug_sc_fsm = false; 616 m_debug_cleanup_fsm = false; 617 m_debug_ixr_cmd_fsm = false; 618 m_debug_ixr_rsp_fsm = false; 619 m_debug_xram_rsp_fsm = false; 620 m_debug_previous_hit = false; 621 m_debug_previous_count = 0; 622 623 // Initializing Tables 624 m_transaction_tab.init(); 625 m_update_tab.init(); 626 627 // initializing FIFOs and communication Buffers 628 629 m_cmd_read_addr_fifo.init(); 630 m_cmd_read_length_fifo.init(); 631 m_cmd_read_srcid_fifo.init(); 632 m_cmd_read_trdid_fifo.init(); 633 m_cmd_read_pktid_fifo.init(); 634 635 m_cmd_write_addr_fifo.init(); 636 m_cmd_write_eop_fifo.init(); 637 m_cmd_write_srcid_fifo.init(); 638 m_cmd_write_trdid_fifo.init(); 639 m_cmd_write_pktid_fifo.init(); 640 m_cmd_write_data_fifo.init(); 641 642 m_cmd_sc_addr_fifo.init(); 643 m_cmd_sc_srcid_fifo.init(); 644 m_cmd_sc_trdid_fifo.init(); 645 m_cmd_sc_pktid_fifo.init(); 646 m_cmd_sc_wdata_fifo.init(); 647 m_cmd_sc_eop_fifo.init(); 648 649 r_read_to_tgt_rsp_req = false; 650 r_read_to_ixr_cmd_req = false; 651 652 r_write_to_tgt_rsp_req = false; 653 r_write_to_ixr_cmd_req = false; 654 r_write_to_init_cmd_multi_req = false; 655 r_write_to_init_cmd_brdcast_req = false; 656 r_write_to_init_rsp_req = false; 657 658 m_write_to_init_cmd_inst_fifo.init(); 659 m_write_to_init_cmd_srcid_fifo.init(); 646 660 #if L1_MULTI_CACHE 647 m_write_to_init_cmd_cache_id_fifo.init(); 648 #endif 649 650 r_cleanup_to_tgt_rsp_req = false; 651 652 r_init_rsp_to_tgt_rsp_req = false; 653 654 r_sc_to_tgt_rsp_req = false; 655 r_sc_cpt = 0; 656 r_sc_lfsr = -1; 657 r_sc_to_ixr_cmd_req = false; 658 r_sc_to_init_cmd_multi_req = false; 659 r_sc_to_init_cmd_brdcast_req = false; 660 m_sc_to_init_cmd_inst_fifo.init(); 661 m_sc_to_init_cmd_srcid_fifo.init(); 661 m_write_to_init_cmd_cache_id_fifo.init(); 662 #endif 663 664 r_cleanup_to_tgt_rsp_req = false; 665 666 r_init_rsp_to_tgt_rsp_req = false; 667 668 r_sc_to_tgt_rsp_req = false; 669 r_sc_cpt = 0; 670 r_sc_lfsr = -1; 671 r_sc_to_ixr_cmd_req = false; 672 r_sc_to_init_cmd_multi_req = false; 673 r_sc_to_init_cmd_brdcast_req = false; 674 675 m_sc_to_init_cmd_inst_fifo.init(); 676 m_sc_to_init_cmd_srcid_fifo.init(); 662 677 #if L1_MULTI_CACHE 663 m_sc_to_init_cmd_cache_id_fifo.init(); 664 #endif 665 666 for(size_t i=0; i<m_transaction_tab_lines ; i++){ 667 r_ixr_rsp_to_xram_rsp_rok[i] = false; 668 } 669 670 r_xram_rsp_to_tgt_rsp_req = false; 671 r_xram_rsp_to_init_cmd_multi_req = false; 672 r_xram_rsp_to_init_cmd_brdcast_req = false; 673 r_xram_rsp_to_ixr_cmd_req = false; 674 r_xram_rsp_trt_index = 0; 675 m_xram_rsp_to_init_cmd_inst_fifo.init(); 676 m_xram_rsp_to_init_cmd_srcid_fifo.init(); 678 m_sc_to_init_cmd_cache_id_fifo.init(); 679 #endif 680 681 for(size_t i=0; i<m_transaction_tab_lines ; i++){ 682 r_ixr_rsp_to_xram_rsp_rok[i] = false; 683 } 684 685 r_xram_rsp_to_tgt_rsp_req = false; 686 r_xram_rsp_to_init_cmd_multi_req = false; 687 r_xram_rsp_to_init_cmd_brdcast_req = false; 688 r_xram_rsp_to_ixr_cmd_req = false; 689 r_xram_rsp_trt_index = 0; 690 691 m_xram_rsp_to_init_cmd_inst_fifo.init(); 692 m_xram_rsp_to_init_cmd_srcid_fifo.init(); 677 693 #if L1_MULTI_CACHE 678 m_xram_rsp_to_init_cmd_cache_id_fifo.init(); 679 #endif 680 681 r_ixr_cmd_cpt = 0; 682 683 r_copies_limit = 3; 684 685 // Activity counters 686 m_cpt_cycles = 0; 687 m_cpt_read = 0; 688 m_cpt_read_miss = 0; 689 m_cpt_write = 0; 690 m_cpt_write_miss = 0; 691 m_cpt_write_cells = 0; 692 m_cpt_write_dirty = 0; 693 m_cpt_update = 0; 694 m_cpt_update_mult = 0; 695 m_cpt_inval_brdcast = 0; 696 m_cpt_inval = 0; 697 m_cpt_inval_mult = 0; 698 m_cpt_cleanup = 0; 699 m_cpt_ll = 0; 700 m_cpt_sc = 0; 701 m_cpt_trt_full = 0; 702 m_cpt_trt_rb = 0; 703 704 return; 705 } 706 707 bool cmd_read_fifo_put = false; 708 bool cmd_read_fifo_get = false; 709 710 bool cmd_write_fifo_put = false; 711 bool cmd_write_fifo_get = false; 712 713 bool cmd_sc_fifo_put = false; 714 bool cmd_sc_fifo_get = false; 715 716 bool write_to_init_cmd_fifo_put = false; 717 bool write_to_init_cmd_fifo_get = false; 718 bool write_to_init_cmd_fifo_inst = false; 719 size_t write_to_init_cmd_fifo_srcid = 0; 694 m_xram_rsp_to_init_cmd_cache_id_fifo.init(); 695 #endif 696 697 r_ixr_cmd_cpt = 0; 698 r_alloc_dir_reset_cpt = 0; 699 r_alloc_heap_reset_cpt = 0; 700 701 r_copies_limit = 3; 702 703 // Activity counters 704 m_cpt_cycles = 0; 705 m_cpt_read = 0; 706 m_cpt_read_miss = 0; 707 m_cpt_write = 0; 708 m_cpt_write_miss = 0; 709 m_cpt_write_cells = 0; 710 m_cpt_write_dirty = 0; 711 m_cpt_update = 0; 712 m_cpt_update_mult = 0; 713 m_cpt_inval_brdcast = 0; 714 m_cpt_inval = 0; 715 m_cpt_inval_mult = 0; 716 m_cpt_cleanup = 0; 717 m_cpt_ll = 0; 718 m_cpt_sc = 0; 719 m_cpt_trt_full = 0; 720 m_cpt_trt_rb = 0; 721 722 return; 723 } 724 725 bool cmd_read_fifo_put = false; 726 bool cmd_read_fifo_get = false; 727 728 bool cmd_write_fifo_put = false; 729 bool cmd_write_fifo_get = false; 730 731 bool cmd_sc_fifo_put = false; 732 bool cmd_sc_fifo_get = false; 733 734 bool write_to_init_cmd_fifo_put = false; 735 bool write_to_init_cmd_fifo_get = false; 736 bool write_to_init_cmd_fifo_inst = false; 737 size_t write_to_init_cmd_fifo_srcid = 0; 720 738 721 739 #if L1_MULTI_CACHE 722 723 #endif 724 725 bool xram_rsp_to_init_cmd_fifo_put= false;726 bool xram_rsp_to_init_cmd_fifo_get= false;727 bool xram_rsp_to_init_cmd_fifo_inst= false;728 size_t xram_rsp_to_init_cmd_fifo_srcid= 0;740 size_t write_to_init_cmd_fifo_cache_id = 0; 741 #endif 742 743 bool xram_rsp_to_init_cmd_fifo_put = false; 744 bool xram_rsp_to_init_cmd_fifo_get = false; 745 bool xram_rsp_to_init_cmd_fifo_inst = false; 746 size_t xram_rsp_to_init_cmd_fifo_srcid = 0; 729 747 730 748 #if L1_MULTI_CACHE 731 732 #endif 733 734 bool sc_to_init_cmd_fifo_put= false;735 bool sc_to_init_cmd_fifo_get= false;736 bool sc_to_init_cmd_fifo_inst= false;737 size_t sc_to_init_cmd_fifo_srcid= 0;749 size_t xram_rsp_to_init_cmd_fifo_cache_id = 0; 750 #endif 751 752 bool sc_to_init_cmd_fifo_put = false; 753 bool sc_to_init_cmd_fifo_get = false; 754 bool sc_to_init_cmd_fifo_inst = false; 755 size_t sc_to_init_cmd_fifo_srcid = 0; 738 756 739 757 #if L1_MULTI_CACHE 740 741 #endif 742 743 m_debug_global = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok;744 m_debug_tgt_cmd_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok;745 m_debug_tgt_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok;746 m_debug_init_cmd_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok;747 m_debug_init_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok;748 m_debug_read_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok;749 m_debug_write_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok;750 m_debug_sc_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok;751 m_debug_cleanup_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok;752 m_debug_ixr_cmd_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok;753 m_debug_ixr_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok;754 m_debug_xram_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok;755 756 757 #if DEBUG_MEMC_GLOBAL 758 if( m_debug_global ) 759 { 760 std::cout << "---------------------------------------------" << std::dec << std::endl;761 std::cout<< "MEM_CACHE " << m_srcid_ini << " ; Time = " << m_cpt_cycles << std::endl758 size_t sc_to_init_cmd_fifo_cache_id = 0; 759 #endif 760 761 m_debug_global = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 762 m_debug_tgt_cmd_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 763 m_debug_tgt_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 764 m_debug_init_cmd_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 765 m_debug_init_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 766 m_debug_read_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 767 m_debug_write_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 768 m_debug_sc_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 769 m_debug_cleanup_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 770 m_debug_ixr_cmd_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 771 m_debug_ixr_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 772 m_debug_xram_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 773 774 #if DEBUG_MEMC_GLOBAL 775 if( m_debug_global ) 776 { 777 std::cout 778 << "---------------------------------------------" << std::dec << std::endl 779 << "MEM_CACHE " << m_srcid_ini << " ; Time = " << m_cpt_cycles << std::endl 762 780 << " - TGT_CMD FSM = " << tgt_cmd_fsm_str[r_tgt_cmd_fsm] << std::endl 763 781 << " - TGT_RSP FSM = " << tgt_rsp_fsm_str[r_tgt_rsp_fsm] << std::endl … … 775 793 << " - ALLOC_UPT FSM = " << alloc_upt_fsm_str[r_alloc_upt_fsm] << std::endl 776 794 << " - ALLOC_HEAP FSM = " << alloc_heap_fsm_str[r_alloc_heap_fsm] << std::endl; 777 } 778 #endif 779 780 //////////////////////////////////////////////////////////////////////////////////// 781 // TGT_CMD FSM 782 //////////////////////////////////////////////////////////////////////////////////// 783 // The TGT_CMD_FSM controls the incoming VCI command pakets from the processors 784 // 785 // There is 3 types of accepted commands : 786 // - READ : a READ request has a length of 1 VCI cell. It can be a single word 787 // or an entire cache line, depending on the PLEN value. 788 // - WRITE : a WRITE request has a maximum length of 16 cells, and can only 789 // concern words in a same line. 790 // - SC : The SC request has a length of 2 cells or 4 cells. 791 //////////////////////////////////////////////////////////////////////////////////// 792 793 switch ( r_tgt_cmd_fsm.read() ) 795 } 796 #endif 797 798 //////////////////////////////////////////////////////////////////////////////////// 799 // TGT_CMD FSM 800 //////////////////////////////////////////////////////////////////////////////////// 801 // The TGT_CMD_FSM controls the incoming VCI command pakets from the processors 802 // 803 // There is 3 types of accepted commands : 804 // - READ : a READ request has a length of 1 VCI cell. It can be a single word 805 // or an entire cache line, depending on the PLEN value. 806 // - WRITE : a WRITE request has a maximum length of 16 cells, and can only 807 // concern words in a same line. 808 // - SC : The SC request has a length of 2 cells or 4 cells. 809 //////////////////////////////////////////////////////////////////////////////////// 810 811 switch ( r_tgt_cmd_fsm.read() ) 812 { 813 ////////////////// 814 case TGT_CMD_IDLE: 815 if ( p_vci_tgt.cmdval ) 816 { 817 818 #if DEBUG_MEMC_TGT_CMD 819 if( m_debug_tgt_cmd_fsm ) 820 { 821 std::cout 822 << " <MEMC " << name() << ".TGT_CMD_IDLE> Receive command from srcid " 823 << std::dec << p_vci_tgt.srcid.read() 824 << " / for address " << std::hex << p_vci_tgt.address.read() << std::endl; 825 } 826 #endif 827 // checking segmentation violation 828 vci_addr_t address = p_vci_tgt.address.read(); 829 uint32_t plen = p_vci_tgt.plen.read(); 830 bool found = false; 831 for ( size_t seg_id = 0 ; seg_id < m_nseg ; seg_id++ ) 832 { 833 if ( m_seg[seg_id]->contains(address) && 834 m_seg[seg_id]->contains(address + plen - vci_param::B) ) 835 { 836 found = true; 837 } 838 } 839 if ( not found ) 840 { 841 std::cout << "VCI_MEM_CACHE ERROR " << name() << std::endl; 842 std::cout 843 << "Out of segment VCI address in TGT_CMD_IDLE state (address = " 844 << std::hex << address << ", srcid = " << p_vci_tgt.srcid.read() 845 << std::dec << ", cycle = " << m_cpt_cycles << ")" << std::endl; 846 exit(0); 847 } 848 849 if ( p_vci_tgt.cmd.read() == vci_param::CMD_READ ) 850 { 851 r_tgt_cmd_fsm = TGT_CMD_READ; 852 } 853 else if ( p_vci_tgt.cmd.read() == vci_param::CMD_WRITE ) 854 { 855 r_tgt_cmd_fsm = TGT_CMD_WRITE; 856 } 857 else if ( p_vci_tgt.cmd.read() == vci_param::CMD_STORE_COND ) 858 { 859 r_tgt_cmd_fsm = TGT_CMD_ATOMIC; 860 } 861 else 862 { 863 std::cout << "VCI_MEM_CACHE ERROR " << name() 864 << " TGT_CMD_IDLE state" << std::endl; 865 std::cout << " illegal VCI command type" << std::endl; 866 exit(0); 867 } 868 } 869 break; 870 871 ////////////////// 872 case TGT_CMD_READ: 873 if ((m_x[(vci_addr_t)p_vci_tgt.address.read()]+(p_vci_tgt.plen.read()>>2)) > 16) 874 { 875 std::cout 876 << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 877 << std::endl; 878 std::cout 879 << " illegal address/plen combination for VCI read command" << std::endl; 880 exit(0); 881 } 882 if ( !p_vci_tgt.eop.read() ) 883 { 884 std::cout 885 << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" 886 << std::endl; 887 std::cout 888 << " read command packets must contain one single flit" 889 << std::endl; 890 exit(0); 891 } 892 893 if ( p_vci_tgt.cmdval && m_cmd_read_addr_fifo.wok() ) 894 { 895 896 #if DEBUG_MEMC_TGT_CMD 897 if( m_debug_tgt_cmd_fsm ) 898 { 899 std::cout << " <MEMC " << name() << ".TGT_CMD_READ> Push into read_fifo:" 900 << " address = " << std::hex << p_vci_tgt.address.read() 901 << " srcid = " << std::dec << p_vci_tgt.srcid.read() 902 << " trdid = " << p_vci_tgt.trdid.read() 903 << " plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 904 } 905 #endif 906 cmd_read_fifo_put = true; 907 m_cpt_read++; 908 r_tgt_cmd_fsm = TGT_CMD_IDLE; 909 } 910 break; 911 912 /////////////////// 913 case TGT_CMD_WRITE: 914 if ( p_vci_tgt.cmdval && m_cmd_write_addr_fifo.wok() ) 915 { 916 917 #if DEBUG_MEMC_TGT_CMD 918 if( m_debug_tgt_cmd_fsm ) 919 { 920 std::cout << " <MEMC " << name() << ".TGT_CMD_WRITE> Push into write_fifo:" 921 << " address = " << std::hex << p_vci_tgt.address.read() 922 << " srcid = " << std::dec << p_vci_tgt.srcid.read() 923 << " trdid = " << p_vci_tgt.trdid.read() 924 << " wdata = " << std::hex << p_vci_tgt.wdata.read() 925 << " be = " << p_vci_tgt.be.read() 926 << " plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 927 } 928 #endif 929 cmd_write_fifo_put = true; 930 if( p_vci_tgt.eop ) r_tgt_cmd_fsm = TGT_CMD_IDLE; 931 } 932 break; 933 934 //////////////////// 935 case TGT_CMD_ATOMIC: 936 if ( (p_vci_tgt.plen.read() != 8) && (p_vci_tgt.plen.read() != 16) ) 937 { 938 std::cout 939 << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_ATOMIC state" 940 << std::endl 941 << "illegal format for sc command " << std::endl; 942 943 exit(0); 944 } 945 946 if ( p_vci_tgt.cmdval && m_cmd_sc_addr_fifo.wok() ) 947 { 948 949 #if DEBUG_MEMC_TGT_CMD 950 if( m_debug_tgt_cmd_fsm ) 951 { 952 std::cout << " <MEMC " << name() << ".TGT_CMD_ATOMIC> Pushing command into cmd_sc_fifo:" 953 << " address = " << std::hex << p_vci_tgt.address.read() 954 << " srcid = " << std::dec << p_vci_tgt.srcid.read() 955 << " trdid = " << p_vci_tgt.trdid.read() 956 << " wdata = " << std::hex << p_vci_tgt.wdata.read() 957 << " be = " << p_vci_tgt.be.read() 958 << " plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 959 } 960 #endif 961 cmd_sc_fifo_put = true; 962 if( p_vci_tgt.eop ) r_tgt_cmd_fsm = TGT_CMD_IDLE; 963 } 964 break; 965 } // end switch tgt_cmd_fsm 966 967 ///////////////////////////////////////////////////////////////////////// 968 // INIT_RSP FSM 969 ///////////////////////////////////////////////////////////////////////// 970 // This FSM controls the response to the update or inval coherence 971 // requests sent by the memory cache to the L1 caches and update the UPT. 972 // 973 // It can be update or inval requests initiated by the WRITE FSM, 974 // or inval requests initiated by the XRAM_RSP FSM. 975 // It can also be a direct request from the WRITE FSM. 976 // 977 // The FSM decrements the proper entry in UPT. 978 // It sends a request to the TGT_RSP FSM to complete the pending 979 // write transaction (acknowledge response to the writer processor), 980 // and clear the UPT entry when all responses have been received. 981 // 982 // All those response packets are one word, compact 983 // packets complying with the VCI advanced format. 984 // The index in the Table is defined in the RTRDID field, and 985 // the transaction type is defined in the UPT entry. 986 ///////////////////////////////////////////////////////////////////// 987 988 switch ( r_init_rsp_fsm.read() ) 989 { 990 /////////////////// 991 case INIT_RSP_IDLE: // wait a response for a coherence transaction 992 if ( p_vci_ini.rspval ) 993 { 994 995 #if DEBUG_MEMC_INIT_RSP 996 if( m_debug_init_rsp_fsm ) 997 { 998 std::cout << " <MEMC " << name() << ".INIT_RSP_IDLE> Response for UPT entry " 999 << p_vci_ini.rtrdid.read() << std::endl; 1000 } 1001 #endif 1002 if ( p_vci_ini.rtrdid.read() >= m_update_tab.size() ) 1003 { 1004 std::cout 1005 << "VCI_MEM_CACHE ERROR " << name() 1006 << " INIT_RSP_IDLE state" << std::endl 1007 << "index too large for UPT: " 1008 << " / rtrdid = " << std::dec << p_vci_ini.rtrdid.read() 1009 << " / UPT size = " << std::dec << m_update_tab.size() 1010 << std::endl; 1011 1012 exit(0); 1013 } 1014 if ( !p_vci_ini.reop.read() ) 1015 { 1016 std::cout 1017 << "VCI_MEM_CACHE ERROR " << name() 1018 << " INIT_RSP_IDLE state" << std::endl 1019 << "all coherence response packets must be one flit" 1020 << std::endl; 1021 1022 exit(0); 1023 } 1024 1025 r_init_rsp_upt_index = p_vci_ini.rtrdid.read(); 1026 r_init_rsp_fsm = INIT_RSP_UPT_LOCK; 1027 } 1028 else if( r_write_to_init_rsp_req.read() ) 1029 { 1030 r_init_rsp_upt_index = r_write_to_init_rsp_upt_index.read(); 1031 r_write_to_init_rsp_req = false; 1032 r_init_rsp_fsm = INIT_RSP_UPT_LOCK; 1033 } 1034 break; 1035 1036 /////////////////////// 1037 case INIT_RSP_UPT_LOCK: // decrement the number of expected responses 1038 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_INIT_RSP ) 1039 { 1040 size_t count = 0; 1041 bool valid = m_update_tab.decrement(r_init_rsp_upt_index.read(), count); 1042 1043 #if DEBUG_MEMC_INIT_RSP 1044 if( m_debug_init_rsp_fsm ) 1045 { 1046 std::cout << " <MEMC " << name() << ".INIT_RSP_UPT_LOCK> Decrement the responses counter for UPT:" 1047 << " entry = " << r_init_rsp_upt_index.read() 1048 << " / rsp_count = " << std::dec << count << std::endl; 1049 } 1050 #endif 1051 if ( not valid ) 1052 { 1053 std::cout << "VCI_MEM_CACHE ERROR " << name() 1054 << " INIT_RSP_UPT_LOCK state" << std::endl 1055 << "unsuccessful access to decrement the UPT" << std::endl; 1056 1057 exit(0); 1058 } 1059 1060 if ( count == 0 ) r_init_rsp_fsm = INIT_RSP_UPT_CLEAR; 1061 else r_init_rsp_fsm = INIT_RSP_IDLE; 1062 } 1063 break; 1064 1065 //////////////////////// 1066 case INIT_RSP_UPT_CLEAR: // clear the UPT entry 1067 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_INIT_RSP ) 1068 { 1069 r_init_rsp_srcid = m_update_tab.srcid(r_init_rsp_upt_index.read()); 1070 r_init_rsp_trdid = m_update_tab.trdid(r_init_rsp_upt_index.read()); 1071 r_init_rsp_pktid = m_update_tab.pktid(r_init_rsp_upt_index.read()); 1072 r_init_rsp_nline = m_update_tab.nline(r_init_rsp_upt_index.read()); 1073 bool need_rsp = m_update_tab.need_rsp(r_init_rsp_upt_index.read()); 1074 1075 if ( need_rsp ) r_init_rsp_fsm = INIT_RSP_END; 1076 else r_init_rsp_fsm = INIT_RSP_IDLE; 1077 1078 m_update_tab.clear(r_init_rsp_upt_index.read()); 1079 1080 #if DEBUG_MEMC_INIT_RSP 1081 if ( m_debug_init_rsp_fsm ) 1082 { 1083 std::cout << " <MEMC " << name() << ".INIT_RSP_UPT_CLEAR> Clear UPT entry " 1084 << r_init_rsp_upt_index.read() << std::endl; 1085 } 1086 #endif 1087 } 1088 break; 1089 1090 ////////////////// 1091 case INIT_RSP_END: // Post a request to TGT_RSP FSM 1092 if ( !r_init_rsp_to_tgt_rsp_req ) 1093 { 1094 r_init_rsp_to_tgt_rsp_req = true; 1095 r_init_rsp_to_tgt_rsp_srcid = r_init_rsp_srcid.read(); 1096 r_init_rsp_to_tgt_rsp_trdid = r_init_rsp_trdid.read(); 1097 r_init_rsp_to_tgt_rsp_pktid = r_init_rsp_pktid.read(); 1098 r_init_rsp_fsm = INIT_RSP_IDLE; 1099 1100 #if DEBUG_MEMC_INIT_RSP 1101 if ( m_debug_init_rsp_fsm ) 1102 { 1103 std::cout 1104 << " <MEMC " << name() 1105 << ".INIT_RSP_END> Request TGT_RSP FSM to send a response to srcid " 1106 << r_init_rsp_srcid.read() 1107 << std::endl; 1108 } 1109 #endif 1110 } 1111 break; 1112 } // end switch r_init_rsp_fsm 1113 1114 //////////////////////////////////////////////////////////////////////////////////// 1115 // READ FSM 1116 //////////////////////////////////////////////////////////////////////////////////// 1117 // The READ FSM controls the VCI read requests. 1118 // It takes the lock protecting the cache directory to check the cache line status: 1119 // - In case of HIT 1120 // The fsm copies the data (one line, or one single word) 1121 // in the r_read_to_tgt_rsp buffer. It waits if this buffer is not empty. 1122 // The requesting initiator is registered in the cache directory. 1123 // If the number of copy is larger than 1, the new copy is registered 1124 // in the HEAP. 1125 // If the number of copy is larger than the threshold, the HEAP is cleared, 1126 // and the corresponding line switches to the counter mode. 1127 // - In case of MISS 1128 // The READ fsm takes the lock protecting the transaction tab. 1129 // If a read transaction to the XRAM for this line already exists, 1130 // or if the transaction tab is full, the fsm is stalled. 1131 // If a TRT entry is free, the READ request is registered in TRT, 1132 // it is consumed in the request FIFO, and transmited to the IXR_CMD FSM. 1133 // The READ FSM returns in the IDLE state as the read transaction will be 1134 // completed when the missing line will be received. 1135 //////////////////////////////////////////////////////////////////////////////////// 1136 1137 switch ( r_read_fsm.read() ) 1138 { 1139 /////////////// 1140 case READ_IDLE: 1141 // waiting a read request 794 1142 { 795 ////////////////// 796 case TGT_CMD_IDLE: 797 { 798 if ( p_vci_tgt.cmdval ) 799 { 800 801 #if DEBUG_MEMC_TGT_CMD 802 if( m_debug_tgt_cmd_fsm ) 803 { 804 std::cout << " <MEMC " << name() << ".TGT_CMD_IDLE> Receive command from srcid " << std::dec << p_vci_tgt.srcid.read() 805 << " / for address " << std::hex << p_vci_tgt.address.read() << std::endl; 806 } 807 #endif 808 // checking segmentation violation 809 vci_addr_t address = p_vci_tgt.address.read(); 810 uint32_t plen = p_vci_tgt.plen.read(); 811 bool found = false; 812 for ( size_t seg_id = 0 ; seg_id < m_nseg ; seg_id++ ) 813 { 814 if ( m_seg[seg_id]->contains(address) && 815 m_seg[seg_id]->contains(address + plen - vci_param::B) ) 816 { 817 found = true; 818 } 819 } 820 if ( not found ) 821 { 822 std::cout << "VCI_MEM_CACHE ERROR " << name() << std::endl; 823 std::cout << "Out of segment VCI address in TGT_CMD_IDLE state (address = " << std::hex << address << ", srcid = " << p_vci_tgt.srcid.read() << std::dec << ", cycle = " << m_cpt_cycles << ")" << std::endl; 824 exit(0); 825 } 826 827 if ( p_vci_tgt.cmd.read() == vci_param::CMD_READ ) 828 { 829 r_tgt_cmd_fsm = TGT_CMD_READ; 830 } 831 else if ( p_vci_tgt.cmd.read() == vci_param::CMD_WRITE ) 832 { 833 r_tgt_cmd_fsm = TGT_CMD_WRITE; 834 } 835 else if ( p_vci_tgt.cmd.read() == vci_param::CMD_STORE_COND ) 836 { 837 r_tgt_cmd_fsm = TGT_CMD_ATOMIC; 838 } 839 else 840 { 841 std::cout << "VCI_MEM_CACHE ERROR " << name() 842 << " TGT_CMD_IDLE state" << std::endl; 843 std::cout << " illegal VCI command type" << std::endl; 844 exit(0); 845 } 846 } 847 break; 848 } 849 ////////////////// 850 case TGT_CMD_READ: 851 { 852 if ((m_x[(vci_addr_t)p_vci_tgt.address.read()]+(p_vci_tgt.plen.read()>>2)) > 16) 853 { 854 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" << std::endl; 855 std::cout << " illegal address/plen combination for VCI read command" << std::endl; 856 exit(0); 857 } 858 if ( !p_vci_tgt.eop.read() ) 859 { 860 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" << std::endl; 861 std::cout << " read command packets must contain one single flit" << std::endl; 862 exit(0); 863 } 864 865 if ( p_vci_tgt.cmdval && m_cmd_read_addr_fifo.wok() ) 866 { 867 868 #if DEBUG_MEMC_TGT_CMD 869 if( m_debug_tgt_cmd_fsm ) 870 { 871 std::cout << " <MEMC " << name() << ".TGT_CMD_READ> Push into read_fifo:" 872 << " address = " << std::hex << p_vci_tgt.address.read() 873 << " srcid = " << std::dec << p_vci_tgt.srcid.read() 874 << " trdid = " << p_vci_tgt.trdid.read() 875 << " plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 876 } 877 #endif 878 cmd_read_fifo_put = true; 879 m_cpt_read++; 880 r_tgt_cmd_fsm = TGT_CMD_IDLE; 881 } 882 break; 883 } 884 /////////////////// 885 case TGT_CMD_WRITE: 886 { 887 if ( p_vci_tgt.cmdval && m_cmd_write_addr_fifo.wok() ) 888 { 889 890 #if DEBUG_MEMC_TGT_CMD 891 if( m_debug_tgt_cmd_fsm ) 892 { 893 std::cout << " <MEMC " << name() << ".TGT_CMD_WRITE> Push into write_fifo:" 894 << " address = " << std::hex << p_vci_tgt.address.read() 895 << " srcid = " << std::dec << p_vci_tgt.srcid.read() 896 << " trdid = " << p_vci_tgt.trdid.read() 897 << " wdata = " << std::hex << p_vci_tgt.wdata.read() 898 << " be = " << p_vci_tgt.be.read() 899 << " plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 900 } 901 #endif 902 cmd_write_fifo_put = true; 903 if( p_vci_tgt.eop ) r_tgt_cmd_fsm = TGT_CMD_IDLE; 904 } 905 break; 906 } 907 //////////////////// 908 case TGT_CMD_ATOMIC: 909 { 910 if ( (p_vci_tgt.plen.read() != 8) && (p_vci_tgt.plen.read() != 16) ) 911 { 912 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_ATOMIC state" << std::endl; 913 std::cout << "illegal format for sc command " << std::endl; 914 exit(0); 915 } 916 917 if ( p_vci_tgt.cmdval && m_cmd_sc_addr_fifo.wok() ) 918 { 919 920 #if DEBUG_MEMC_TGT_CMD 921 if( m_debug_tgt_cmd_fsm ) 922 { 923 std::cout << " <MEMC " << name() << ".TGT_CMD_ATOMIC> Pushing command into cmd_sc_fifo:" 924 << " address = " << std::hex << p_vci_tgt.address.read() 925 << " srcid = " << std::dec << p_vci_tgt.srcid.read() 926 << " trdid = " << p_vci_tgt.trdid.read() 927 << " wdata = " << std::hex << p_vci_tgt.wdata.read() 928 << " be = " << p_vci_tgt.be.read() 929 << " plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 930 } 931 #endif 932 cmd_sc_fifo_put = true; 933 if( p_vci_tgt.eop ) r_tgt_cmd_fsm = TGT_CMD_IDLE; 934 } 935 break; 936 } 937 } // end switch tgt_cmd_fsm 938 939 ///////////////////////////////////////////////////////////////////////// 940 // INIT_RSP FSM 941 ///////////////////////////////////////////////////////////////////////// 942 // This FSM controls the response to the update or inval coherence 943 // requests sent by the memory cache to the L1 caches and update the UPT. 944 // 945 // It can be update or inval requests initiated by the WRITE FSM, 946 // or inval requests initiated by the XRAM_RSP FSM. 947 // It can also be a direct request from the WRITE FSM. 948 // 949 // The FSM decrements the proper entry in UPT. 950 // It sends a request to the TGT_RSP FSM to complete the pending 951 // write transaction (acknowledge response to the writer processor), 952 // and clear the UPT entry when all responses have been received. 953 // 954 // All those response packets are one word, compact 955 // packets complying with the VCI advanced format. 956 // The index in the Table is defined in the RTRDID field, and 957 // the transaction type is defined in the UPT entry. 958 ///////////////////////////////////////////////////////////////////// 959 960 switch ( r_init_rsp_fsm.read() ) 1143 if (m_cmd_read_addr_fifo.rok()) 1144 { 1145 1146 #if DEBUG_MEMC_READ 1147 if( m_debug_read_fsm ) 1148 { 1149 std::cout << " <MEMC " << name() << ".READ_IDLE> Read request:" 1150 << " srcid = " << std::dec << m_cmd_read_srcid_fifo.read() 1151 << " / address = " << std::hex << m_cmd_read_addr_fifo.read() 1152 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 1153 } 1154 #endif 1155 r_read_fsm = READ_DIR_REQ; 1156 } 1157 break; 1158 } 1159 1160 /////////////////// 1161 case READ_DIR_REQ: 1162 // Get the lock to the directory 961 1163 { 962 /////////////////// 963 case INIT_RSP_IDLE: // wait a response for a coherence transaction 964 { 965 if ( p_vci_ini.rspval ) 966 { 967 968 #if DEBUG_MEMC_INIT_RSP 969 if( m_debug_init_rsp_fsm ) 970 { 971 std::cout << " <MEMC " << name() << ".INIT_RSP_IDLE> Response for UPT entry " 972 << p_vci_ini.rtrdid.read() << std::endl; 973 } 974 #endif 975 if ( p_vci_ini.rtrdid.read() >= m_update_tab.size() ) 976 { 977 std::cout << "VCI_MEM_CACHE ERROR " << name() 978 << " INIT_RSP_IDLE state" << std::endl 979 << "index too large for UPT: " 980 << " / rtrdid = " << std::dec << p_vci_ini.rtrdid.read() 981 << " / UPT size = " << std::dec << m_update_tab.size() << std::endl; 982 exit(0); 983 } 984 if ( !p_vci_ini.reop.read() ) 985 { 986 std::cout << "VCI_MEM_CACHE ERROR " << name() 987 << " INIT_RSP_IDLE state" << std::endl; 988 std::cout << "all coherence response packets must be one flit" << std::endl; 989 exit(0); 990 } 991 992 r_init_rsp_upt_index = p_vci_ini.rtrdid.read(); 993 r_init_rsp_fsm = INIT_RSP_UPT_LOCK; 994 } 995 else if( r_write_to_init_rsp_req.read() ) 996 { 997 r_init_rsp_upt_index = r_write_to_init_rsp_upt_index.read(); 998 r_write_to_init_rsp_req = false; 999 r_init_rsp_fsm = INIT_RSP_UPT_LOCK; 1000 } 1001 break; 1002 } 1003 /////////////////////// 1004 case INIT_RSP_UPT_LOCK: // decrement the number of expected responses 1005 { 1006 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_INIT_RSP ) 1007 { 1008 size_t count = 0; 1009 bool valid = m_update_tab.decrement(r_init_rsp_upt_index.read(), count); 1010 1011 #if DEBUG_MEMC_INIT_RSP 1012 if( m_debug_init_rsp_fsm ) 1013 { 1014 std::cout << " <MEMC " << name() << ".INIT_RSP_UPT_LOCK> Decrement the responses counter for UPT:" 1015 << " entry = " << r_init_rsp_upt_index.read() 1016 << " / rsp_count = " << std::dec << count << std::endl; 1017 } 1018 #endif 1019 if ( not valid ) 1020 { 1021 std::cout << "VCI_MEM_CACHE ERROR " << name() 1022 << " INIT_RSP_UPT_LOCK state" << std::endl 1023 << "unsuccessful access to decrement the UPT" << std::endl; 1024 exit(0); 1025 } 1026 1027 if ( count == 0 ) r_init_rsp_fsm = INIT_RSP_UPT_CLEAR; 1028 else r_init_rsp_fsm = INIT_RSP_IDLE; 1029 } 1030 break; 1031 } 1032 //////////////////////// 1033 case INIT_RSP_UPT_CLEAR: // clear the UPT entry 1034 { 1035 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_INIT_RSP ) 1036 { 1037 r_init_rsp_srcid = m_update_tab.srcid(r_init_rsp_upt_index.read()); 1038 r_init_rsp_trdid = m_update_tab.trdid(r_init_rsp_upt_index.read()); 1039 r_init_rsp_pktid = m_update_tab.pktid(r_init_rsp_upt_index.read()); 1040 r_init_rsp_nline = m_update_tab.nline(r_init_rsp_upt_index.read()); 1041 bool need_rsp = m_update_tab.need_rsp(r_init_rsp_upt_index.read()); 1042 1043 if ( need_rsp ) r_init_rsp_fsm = INIT_RSP_END; 1044 else r_init_rsp_fsm = INIT_RSP_IDLE; 1045 1046 m_update_tab.clear(r_init_rsp_upt_index.read()); 1047 1048 #if DEBUG_MEMC_INIT_RSP 1049 if ( m_debug_init_rsp_fsm ) 1050 { 1051 std::cout << " <MEMC " << name() << ".INIT_RSP_UPT_CLEAR> Clear UPT entry " 1052 << r_init_rsp_upt_index.read() << std::endl; 1053 } 1054 #endif 1055 } 1056 break; 1057 } 1058 ////////////////// 1059 case INIT_RSP_END: // Post a request to TGT_RSP FSM 1060 { 1061 if ( !r_init_rsp_to_tgt_rsp_req ) 1062 { 1063 r_init_rsp_to_tgt_rsp_req = true; 1064 r_init_rsp_to_tgt_rsp_srcid = r_init_rsp_srcid.read(); 1065 r_init_rsp_to_tgt_rsp_trdid = r_init_rsp_trdid.read(); 1066 r_init_rsp_to_tgt_rsp_pktid = r_init_rsp_pktid.read(); 1067 r_init_rsp_fsm = INIT_RSP_IDLE; 1068 1069 #if DEBUG_MEMC_INIT_RSP 1070 if ( m_debug_init_rsp_fsm ) 1071 { 1072 std::cout << " <MEMC " << name() << ".INIT_RSP_END> Request TGT_RSP FSM to send a response to srcid " 1073 << r_init_rsp_srcid.read() << std::endl; 1074 } 1075 #endif 1076 } 1077 break; 1078 } 1079 } // end switch r_init_rsp_fsm 1080 1081 //////////////////////////////////////////////////////////////////////////////////// 1082 // READ FSM 1083 //////////////////////////////////////////////////////////////////////////////////// 1084 // The READ FSM controls the VCI read requests. 1085 // It takes the lock protecting the cache directory to check the cache line status: 1086 // - In case of HIT 1087 // The fsm copies the data (one line, or one single word) 1088 // in the r_read_to_tgt_rsp buffer. It waits if this buffer is not empty. 1089 // The requesting initiator is registered in the cache directory. 1090 // If the number of copy is larger than 1, the new copy is registered 1091 // in the HEAP. 1092 // If the number of copy is larger than the threshold, the HEAP is cleared, 1093 // and the corresponding line switches to the counter mode. 1094 // - In case of MISS 1095 // The READ fsm takes the lock protecting the transaction tab. 1096 // If a read transaction to the XRAM for this line already exists, 1097 // or if the transaction tab is full, the fsm is stalled. 1098 // If a TRT entry is free, the READ request is registered in TRT, 1099 // it is consumed in the request FIFO, and transmited to the IXR_CMD FSM. 1100 // The READ FSM returns in the IDLE state as the read transaction will be 1101 // completed when the missing line will be received. 1102 //////////////////////////////////////////////////////////////////////////////////// 1103 1104 switch ( r_read_fsm.read() ) 1164 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) 1165 { 1166 r_read_fsm = READ_DIR_LOCK; 1167 } 1168 1169 #if DEBUG_MEMC_READ 1170 if( m_debug_read_fsm ) 1171 { 1172 std::cout 1173 << " <MEMC " << name() << ".READ_DIR_REQ> Requesting DIR lock " 1174 << std::endl; 1175 } 1176 #endif 1177 break; 1178 } 1179 1180 /////////////////// 1181 case READ_DIR_LOCK: 1182 // check directory for hit / miss 1105 1183 { 1106 /////////////// 1107 case READ_IDLE: // waiting a read request 1108 { 1109 if (m_cmd_read_addr_fifo.rok()) 1110 { 1184 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) 1185 { 1186 size_t way = 0; 1187 DirectoryEntry entry = 1188 m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 1189 1190 r_read_is_cnt = entry.is_cnt; 1191 r_read_dirty = entry.dirty; 1192 r_read_lock = entry.lock; 1193 r_read_tag = entry.tag; 1194 r_read_way = way; 1195 r_read_count = entry.count; 1196 r_read_copy = entry.owner.srcid; 1197 1198 #if L1_MULTI_CACHE 1199 r_read_copy_cache = entry.owner.cache_id; 1200 #endif 1201 r_read_copy_inst = entry.owner.inst; 1202 r_read_ptr = entry.ptr; // pointer to the heap 1203 1204 bool cached_read = (m_cmd_read_trdid_fifo.read() & 0x1); 1205 if( entry.valid ) // hit 1206 { 1207 // test if we need to register a new copy in the heap 1208 if ( entry.is_cnt || (entry.count == 0) || !cached_read ) 1209 { 1210 r_read_fsm = READ_DIR_HIT; 1211 } 1212 else 1213 { 1214 r_read_fsm = READ_HEAP_REQ; 1215 } 1216 } 1217 else // miss 1218 { 1219 r_read_fsm = READ_TRT_LOCK; 1220 } 1111 1221 1112 1222 #if DEBUG_MEMC_READ 1113 if( m_debug_read_fsm ) 1114 { 1115 std::cout << " <MEMC " << name() << ".READ_IDLE> Read request:" 1116 << " srcid = " << std::dec << m_cmd_read_srcid_fifo.read() 1117 << " / address = " << std::hex << m_cmd_read_addr_fifo.read() 1118 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 1119 } 1120 #endif 1121 r_read_fsm = READ_DIR_LOCK; 1122 } 1123 break; 1124 } 1125 /////////////////// 1126 case READ_DIR_LOCK: // check directory for hit / miss 1127 { 1128 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) 1129 { 1130 size_t way = 0; 1131 DirectoryEntry entry = m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 1132 1133 r_read_is_cnt = entry.is_cnt; 1134 r_read_dirty = entry.dirty; 1135 r_read_lock = entry.lock; 1136 r_read_tag = entry.tag; 1137 r_read_way = way; 1138 r_read_count = entry.count; 1139 r_read_copy = entry.owner.srcid; 1140 1223 if( m_debug_read_fsm ) 1224 { 1225 std::cout 1226 << " <MEMC " << name() << ".READ_DIR_LOCK> Accessing directory: " 1227 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 1228 << " / hit = " << std::dec << entry.valid 1229 << " / count = " <<std::dec << entry.count 1230 << " / is_cnt = " << entry.is_cnt << std::endl; 1231 } 1232 #endif 1233 } 1234 else 1235 { 1236 std::cout 1237 << "VCI_MEM_CACHE ERROR " << name() 1238 << " READ_DIR_LOCK state" << std::endl 1239 << "Bad DIR allocation" << std::endl; 1240 1241 exit(0); 1242 } 1243 break; 1244 } 1245 1246 ////////////////// 1247 case READ_DIR_HIT: 1248 { 1249 // read data in cache & update the directory 1250 // we enter this state in 3 cases: 1251 // - the read request is uncachable 1252 // - the cache line is in counter mode 1253 // - the cache line is valid but not replcated 1254 1255 if( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) 1256 { 1257 // signals generation 1258 bool inst_read = (m_cmd_read_trdid_fifo.read() & 0x2); 1259 bool cached_read = (m_cmd_read_trdid_fifo.read() & 0x1); 1260 bool is_cnt = r_read_is_cnt.read(); 1261 1262 // read data in the cache 1263 size_t set = m_y[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; 1264 size_t way = r_read_way.read(); 1265 for ( size_t i=0 ; i<m_words ; i++ ) r_read_data[i] = m_cache_data[way][set][i]; 1266 1267 // update the cache directory 1268 DirectoryEntry entry; 1269 entry.valid = true; 1270 entry.is_cnt = is_cnt; 1271 entry.dirty = r_read_dirty.read(); 1272 entry.tag = r_read_tag.read(); 1273 entry.lock = r_read_lock.read(); 1274 entry.ptr = r_read_ptr.read(); 1275 if (cached_read) // Cached read => we must update the copies 1276 { 1277 if (!is_cnt) // Not counter mode 1278 { 1279 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 1141 1280 #if L1_MULTI_CACHE 1142 r_read_copy_cache = entry.owner.cache_id; 1143 #endif 1144 r_read_copy_inst = entry.owner.inst; 1145 r_read_ptr = entry.ptr; // pointer to the heap 1146 1147 bool cached_read = (m_cmd_read_trdid_fifo.read() & 0x1); 1148 if( entry.valid ) // hit 1149 { 1150 // test if we need to register a new copy in the heap 1151 if ( entry.is_cnt || (entry.count == 0) || !cached_read ) 1152 r_read_fsm = READ_DIR_HIT; 1153 else 1154 r_read_fsm = READ_HEAP_LOCK; 1155 } 1156 else // miss 1157 { 1158 r_read_fsm = READ_TRT_LOCK; 1159 } 1281 entry.owner.cache_id = m_cmd_read_pktid_fifo.read(); 1282 #endif 1283 entry.owner.inst = inst_read; 1284 entry.count = r_read_count.read() + 1; 1285 } 1286 else // Counter mode 1287 { 1288 entry.owner.srcid = 0; 1289 #if L1_MULTI_CACHE 1290 entry.owner.cache_id = 0; 1291 #endif 1292 entry.owner.inst = false; 1293 entry.count = r_read_count.read() + 1; 1294 } 1295 } 1296 else // Uncached read 1297 { 1298 entry.owner.srcid = r_read_copy.read(); 1299 #if L1_MULTI_CACHE 1300 entry.owner.cache_id = r_read_copy_cache.read(); 1301 #endif 1302 entry.owner.inst = r_read_copy_inst.read(); 1303 entry.count = r_read_count.read(); 1304 } 1160 1305 1161 1306 #if DEBUG_MEMC_READ 1162 if( m_debug_read_fsm ) 1163 { 1164 std::cout << " <MEMC " << name() << ".READ_DIR_LOCK> Accessing directory: " 1165 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 1166 << " / hit = " << std::dec << entry.valid 1167 << " / count = " <<std::dec << entry.count 1168 << " / is_cnt = " << entry.is_cnt << std::endl; 1169 } 1170 #endif 1171 } 1172 break; 1173 } 1174 ////////////////// 1175 case READ_DIR_HIT: // read data in cache & update the directory 1176 // we enter this state in 3 cases: 1177 // - the read request is uncachable 1178 // - the cache line is in counter mode 1179 // - the cache line is valid but not replcated 1180 { 1181 if( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) 1182 { 1183 // signals generation 1184 bool inst_read = (m_cmd_read_trdid_fifo.read() & 0x2); 1185 bool cached_read = (m_cmd_read_trdid_fifo.read() & 0x1); 1186 bool is_cnt = r_read_is_cnt.read(); 1187 1188 // read data in the cache 1189 size_t set = m_y[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; 1190 size_t way = r_read_way.read(); 1191 for ( size_t i=0 ; i<m_words ; i++ ) r_read_data[i] = m_cache_data[way][set][i]; 1192 1193 // update the cache directory 1194 DirectoryEntry entry; 1195 entry.valid = true; 1196 entry.is_cnt = is_cnt; 1197 entry.dirty = r_read_dirty.read(); 1198 entry.tag = r_read_tag.read(); 1199 entry.lock = r_read_lock.read(); 1200 entry.ptr = r_read_ptr.read(); 1201 if (cached_read) // Cached read => we must update the copies 1202 { 1203 if (!is_cnt) // Not counter mode 1204 { 1205 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 1307 if( m_debug_read_fsm ) 1308 { 1309 std::cout 1310 << " <MEMC " << name() << ".READ_DIR_HIT> Update directory entry:" 1311 << " set = " << std::dec << set 1312 << " / way = " << way 1313 << " / owner_id = " << entry.owner.srcid 1314 << " / owner_ins = " << entry.owner.inst 1315 << " / count = " << entry.count 1316 << " / is_cnt = " << entry.is_cnt << std::endl; 1317 } 1318 #endif 1319 1320 m_cache_directory.write(set, way, entry); 1321 r_read_fsm = READ_RSP; 1322 } 1323 break; 1324 } 1325 1326 //////////////////// 1327 case READ_HEAP_REQ: 1328 // Get the lock to the HEAP directory 1329 { 1330 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) 1331 { 1332 r_read_fsm = READ_HEAP_LOCK; 1333 } 1334 1335 #if DEBUG_MEMC_READ 1336 if( m_debug_read_fsm ) 1337 { 1338 std::cout 1339 << " <MEMC " << name() << ".READ_HEAP_REQ> Requesting HEAP lock " 1340 << std::endl; 1341 } 1342 #endif 1343 break; 1344 } 1345 1346 //////////////////// 1347 case READ_HEAP_LOCK: 1348 // read data in cache, update the directory 1349 // and prepare the HEAP update 1350 { 1351 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) 1352 { 1353 // enter counter mode when we reach the limit of copies or the heap is full 1354 bool go_cnt = (r_read_count.read() >= r_copies_limit.read()) || m_heap.is_full(); 1355 1356 // read data in the cache 1357 size_t set = m_y[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; 1358 size_t way = r_read_way.read(); 1359 for ( size_t i=0 ; i<m_words ; i++ ) r_read_data[i] = m_cache_data[way][set][i]; 1360 1361 // update the cache directory 1362 DirectoryEntry entry; 1363 entry.valid = true; 1364 entry.is_cnt = go_cnt; 1365 entry.dirty = r_read_dirty.read(); 1366 entry.tag = r_read_tag.read(); 1367 entry.lock = r_read_lock.read(); 1368 entry.count = r_read_count.read() + 1; 1369 1370 if (not go_cnt) // Not entering counter mode 1371 { 1372 entry.owner.srcid = r_read_copy.read(); 1206 1373 #if L1_MULTI_CACHE 1207 entry.owner.cache_id = m_cmd_read_pktid_fifo.read();1208 #endif 1209 entry.owner.inst = inst_read;1210 entry.count = r_read_count.read() + 1;1211 }1212 else //Counter mode1213 1214 1374 entry.owner.cache_id = r_read_copy_cache.read(); 1375 #endif 1376 entry.owner.inst = r_read_copy_inst.read(); 1377 entry.ptr = m_heap.next_free_ptr(); // set pointer on the heap 1378 } 1379 else // Entering Counter mode 1380 { 1381 entry.owner.srcid = 0; 1215 1382 #if L1_MULTI_CACHE 1216 entry.owner.cache_id = 0; 1217 #endif 1218 entry.owner.inst = false; 1219 entry.count = r_read_count.read() + 1; 1220 } 1221 } 1222 else // Uncached read 1223 { 1224 entry.owner.srcid = r_read_copy.read(); 1383 entry.owner.cache_id = 0; 1384 #endif 1385 entry.owner.inst = false; 1386 entry.ptr = 0; 1387 } 1388 1389 m_cache_directory.write(set, way, entry); 1390 1391 // prepare the heap update (add an entry, or clear the linked list) 1392 if (not go_cnt) // not switching to counter mode 1393 { 1394 // We test if the next free entry in the heap is the last 1395 HeapEntry heap_entry = m_heap.next_free_entry(); 1396 r_read_next_ptr = heap_entry.next; 1397 r_read_last_free = ( heap_entry.next == m_heap.next_free_ptr() ); 1398 1399 r_read_fsm = READ_HEAP_WRITE; // add an entry in the HEAP 1400 } 1401 else // switching to counter mode 1402 { 1403 if ( r_read_count.read()>1 ) // heap must be cleared 1404 { 1405 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 1406 r_read_next_ptr = m_heap.next_free_ptr(); 1407 m_heap.write_free_ptr(r_read_ptr.read()); 1408 1409 if( next_entry.next == r_read_ptr.read() ) // last entry 1410 { 1411 r_read_fsm = READ_HEAP_LAST; // erase the entry 1412 } 1413 else // not the last entry 1414 { 1415 r_read_ptr = next_entry.next; 1416 r_read_fsm = READ_HEAP_ERASE; // erase the list 1417 } 1418 } 1419 else // the heap is not used / nothing to do 1420 { 1421 r_read_fsm = READ_RSP; 1422 } 1423 } 1424 1425 #if DEBUG_MEMC_READ 1426 if( m_debug_read_fsm ) 1427 { 1428 std::cout << " <MEMC " << name() << ".READ_HEAP_LOCK> Update directory:" 1429 << " tag = " << std::hex << entry.tag 1430 << " set = " << std::dec << set 1431 << " way = " << way 1432 << " count = " << entry.count 1433 << " is_cnt = " << entry.is_cnt << std::endl; 1434 } 1435 #endif 1436 } 1437 else 1438 { 1439 std::cout 1440 << "VCI_MEM_CACHE ERROR " << name() 1441 << " READ_HEAP_LOCK state" << std::endl 1442 << "Bad HEAP allocation" << std::endl; 1443 1444 exit(0); 1445 } 1446 1447 break; 1448 } 1449 1450 ///////////////////// 1451 case READ_HEAP_WRITE: // add a entry in the heap 1452 { 1453 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) 1454 { 1455 HeapEntry heap_entry; 1456 heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 1225 1457 #if L1_MULTI_CACHE 1226 entry.owner.cache_id = r_read_copy_cache.read(); 1227 #endif 1228 entry.owner.inst = r_read_copy_inst.read(); 1229 entry.count = r_read_count.read(); 1230 } 1458 heap_entry.owner.cache_id = m_cmd_read_pktid_fifo.read(); 1459 #endif 1460 heap_entry.owner.inst = (m_cmd_read_trdid_fifo.read() & 0x2); 1461 1462 if(r_read_count.read() == 1) // creation of a new linked list 1463 { 1464 heap_entry.next = m_heap.next_free_ptr(); 1465 } 1466 else // head insertion in existing list 1467 { 1468 heap_entry.next = r_read_ptr.read(); 1469 } 1470 m_heap.write_free_entry(heap_entry); 1471 m_heap.write_free_ptr(r_read_next_ptr.read()); 1472 if(r_read_last_free.read()) m_heap.set_full(); 1473 1474 r_read_fsm = READ_RSP; 1231 1475 1232 1476 #if DEBUG_MEMC_READ 1233 if( m_debug_read_fsm ) 1234 { 1235 std::cout << " <MEMC " << name() << ".READ_DIR_HIT> Update directory entry:" 1236 << " set = " << std::dec << set 1237 << " / way = " << way 1238 << " / owner_id = " << entry.owner.srcid 1239 << " / owner_ins = " << entry.owner.inst 1240 << " / count = " << entry.count 1241 << " / is_cnt = " << entry.is_cnt << std::endl; 1242 } 1243 #endif 1244 1245 m_cache_directory.write(set, way, entry); 1246 r_read_fsm = READ_RSP; 1247 } 1248 break; 1249 } 1250 //////////////////// 1251 case READ_HEAP_LOCK: // read data in cache, update the directory 1252 // and prepare the HEAP update 1253 { 1254 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) 1255 { 1256 // enter counter mode when we reach the limit of copies or the heap is full 1257 bool go_cnt = (r_read_count.read() >= r_copies_limit.read()) || m_heap.is_full(); 1258 1259 // read data in the cache 1260 size_t set = m_y[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; 1261 size_t way = r_read_way.read(); 1262 for ( size_t i=0 ; i<m_words ; i++ ) r_read_data[i] = m_cache_data[way][set][i]; 1263 1264 // update the cache directory 1265 DirectoryEntry entry; 1266 entry.valid = true; 1267 entry.is_cnt = go_cnt; 1268 entry.dirty = r_read_dirty.read(); 1269 entry.tag = r_read_tag.read(); 1270 entry.lock = r_read_lock.read(); 1271 entry.count = r_read_count.read() + 1; 1272 1273 if (not go_cnt) // Not entering counter mode 1274 { 1275 entry.owner.srcid = r_read_copy.read(); 1477 if( m_debug_read_fsm ) 1478 { 1479 std::cout 1480 << " <MEMC " << name() << ".READ_HEAP_WRITE> Add an entry in the heap:" 1481 << " owner_id = " << heap_entry.owner.srcid 1482 << " owner_ins = " << heap_entry.owner.inst << std::endl; 1483 } 1484 #endif 1485 } 1486 else 1487 { 1488 std::cout 1489 << "VCI_MEM_CACHE ERROR " << name() 1490 << " READ_HEAP_WRITE state" << std::endl 1491 << "Bad HEAP allocation" << std::endl; 1492 1493 exit(0); 1494 } 1495 break; 1496 } 1497 1498 ///////////////////// 1499 case READ_HEAP_ERASE: 1500 { 1501 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) 1502 { 1503 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 1504 if( next_entry.next == r_read_ptr.read() ) 1505 { 1506 r_read_fsm = READ_HEAP_LAST; 1507 } 1508 else 1509 { 1510 r_read_ptr = next_entry.next; 1511 r_read_fsm = READ_HEAP_ERASE; 1512 } 1513 } 1514 else 1515 { 1516 std::cout 1517 << "VCI_MEM_CACHE ERROR " << name() 1518 << " READ_HEAP_ERASE state" << std::endl 1519 << "Bad HEAP allocation" << std::endl; 1520 1521 exit(0); 1522 } 1523 break; 1524 } 1525 1526 //////////////////// 1527 case READ_HEAP_LAST: 1528 { 1529 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) 1530 { 1531 HeapEntry last_entry; 1532 last_entry.owner.srcid = 0; 1276 1533 #if L1_MULTI_CACHE 1277 entry.owner.cache_id= r_read_copy_cache.read(); 1278 #endif 1279 entry.owner.inst = r_read_copy_inst.read(); 1280 entry.ptr = m_heap.next_free_ptr(); // set pointer on the heap 1281 } 1282 else // Entering Counter mode 1283 { 1284 entry.owner.srcid = 0; 1534 last_entry.owner.cache_id = 0; 1535 #endif 1536 last_entry.owner.inst = false; 1537 1538 if(m_heap.is_full()) 1539 { 1540 last_entry.next = r_read_ptr.read(); 1541 m_heap.unset_full(); 1542 } 1543 else 1544 { 1545 last_entry.next = r_read_next_ptr.read(); 1546 } 1547 m_heap.write(r_read_ptr.read(),last_entry); 1548 r_read_fsm = READ_RSP; 1549 } 1550 else 1551 { 1552 std::cout << "VCI_MEM_CACHE ERROR " << name() 1553 << " READ_HEAP_LAST state" << std::endl; 1554 std::cout << "Bad HEAP allocation" << std::endl; 1555 exit(0); 1556 } 1557 break; 1558 } 1559 1560 ////////////// 1561 case READ_RSP: // request the TGT_RSP FSM to return data 1562 { 1563 if( !r_read_to_tgt_rsp_req ) 1564 { 1565 for ( size_t i=0 ; i<m_words ; i++ ) r_read_to_tgt_rsp_data[i] = r_read_data[i]; 1566 r_read_to_tgt_rsp_word = m_x[(vci_addr_t)m_cmd_read_addr_fifo.read()]; 1567 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read(); 1568 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read(); 1569 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); 1570 r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read(); 1571 cmd_read_fifo_get = true; 1572 r_read_to_tgt_rsp_req = true; 1573 r_read_fsm = READ_IDLE; 1574 1575 #if DEBUG_MEMC_READ 1576 if( m_debug_read_fsm ) 1577 { 1578 std::cout << " <MEMC " << name() << ".READ_RSP> Request the TGT_RSP FSM to return data:" 1579 << " rsrcid = " << std::dec << m_cmd_read_srcid_fifo.read() 1580 << " / address = " << std::hex << m_cmd_read_addr_fifo.read() 1581 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 1582 } 1583 #endif 1584 } 1585 break; 1586 } 1587 1588 /////////////////// 1589 case READ_TRT_LOCK: // read miss : check the Transaction Table 1590 { 1591 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_READ ) 1592 { 1593 size_t index = 0; 1594 vci_addr_t addr = (vci_addr_t)m_cmd_read_addr_fifo.read(); 1595 bool hit_read = m_transaction_tab.hit_read(m_nline[addr], index); 1596 bool hit_write = m_transaction_tab.hit_write(m_nline[addr]); 1597 bool wok = !m_transaction_tab.full(index); 1598 1599 if( hit_read || !wok || hit_write ) // missing line already requested or no space 1600 { 1601 if(!wok) m_cpt_trt_full++; 1602 if(hit_read || hit_write) m_cpt_trt_rb++; 1603 r_read_fsm = READ_IDLE; 1604 } 1605 else // missing line is requested to the XRAM 1606 { 1607 m_cpt_read_miss++; 1608 r_read_trt_index = index; 1609 r_read_fsm = READ_TRT_SET; 1610 } 1611 1612 #if DEBUG_MEMC_READ 1613 if( m_debug_read_fsm ) 1614 { 1615 std::cout << " <MEMC " << name() << ".READ_TRT_LOCK> Check TRT:" 1616 << " hit_read = " << hit_read 1617 << " / hit_write = " << hit_write 1618 << " / full = " << !wok << std::endl; 1619 } 1620 #endif 1621 } 1622 break; 1623 } 1624 1625 ////////////////// 1626 case READ_TRT_SET: // register get transaction in TRT 1627 { 1628 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_READ ) 1629 { 1630 m_transaction_tab.set(r_read_trt_index.read(), 1631 true, 1632 m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())], 1633 m_cmd_read_srcid_fifo.read(), 1634 m_cmd_read_trdid_fifo.read(), 1635 m_cmd_read_pktid_fifo.read(), 1636 true, 1637 m_cmd_read_length_fifo.read(), 1638 m_x[(vci_addr_t)(m_cmd_read_addr_fifo.read())], 1639 std::vector<be_t>(m_words,0), 1640 std::vector<data_t>(m_words,0)); 1641 #if DEBUG_MEMC_READ 1642 if( m_debug_read_fsm ) 1643 { 1644 std::cout << " <MEMC " << name() << ".READ_TRT_SET> Write in Transaction Table: " << std::hex 1645 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 1646 << " / srcid = " << std::dec << m_cmd_read_srcid_fifo.read() 1647 << std::endl; 1648 } 1649 #endif 1650 r_read_fsm = READ_TRT_REQ; 1651 } 1652 break; 1653 } 1654 1655 ////////////////// 1656 case READ_TRT_REQ: 1657 { 1658 // consume the read request in the FIFO, 1659 // and send it to the ixr_cmd_fsm 1660 1661 if( not r_read_to_ixr_cmd_req ) 1662 { 1663 cmd_read_fifo_get = true; 1664 r_read_to_ixr_cmd_req = true; 1665 r_read_to_ixr_cmd_nline = m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; 1666 r_read_to_ixr_cmd_trdid = r_read_trt_index.read(); 1667 r_read_fsm = READ_IDLE; 1668 1669 #if DEBUG_MEMC_READ 1670 if( m_debug_read_fsm ) 1671 { 1672 std::cout 1673 << " <MEMC " << name() << ".READ_TRT_REQ> Request GET transaction for address " 1674 << std::hex << m_cmd_read_addr_fifo.read() << std::endl; 1675 } 1676 #endif 1677 } 1678 break; 1679 } 1680 } // end switch read_fsm 1681 1682 /////////////////////////////////////////////////////////////////////////////////// 1683 // WRITE FSM 1684 /////////////////////////////////////////////////////////////////////////////////// 1685 // The WRITE FSM handles the write bursts sent by the processors. 1686 // All addresses in a burst must be in the same cache line. 1687 // A complete write burst is consumed in the FIFO & copied to a local buffer. 1688 // Then the FSM takes the lock protecting the cache directory, to check 1689 // if the line is in the cache. 1690 // 1691 // - In case of HIT, the cache is updated. 1692 // If there is no other copy, an acknowledge response is immediately 1693 // returned to the writing processor. 1694 // If the data is cached by other processors, a coherence transaction must 1695 // be launched: 1696 // It is a multicast update if the line is not in counter mode, and the processor 1697 // takes the lock protecting the Update Table (UPT) to register this transaction. 1698 // It is a broadcast invalidate if the line is in counter mode. 1699 // If the UPT is full, it releases the lock(s) and retry. Then, it sends 1700 // a multi-update request to all owners of the line (but the writer), 1701 // through the INIT_CMD FSM. In case of coherence transaction, the WRITE FSM 1702 // does not respond to the writing processor, as this response will be sent by 1703 // the INIT_RSP FSM when all update responses have been received. 1704 // 1705 // - In case of MISS, the WRITE FSM takes the lock protecting the transaction 1706 // table (TRT). If a read transaction to the XRAM for this line already exists, 1707 // it writes in the TRT (write buffer). Otherwise, if a TRT entry is free, 1708 // the WRITE FSM register a new transaction in TRT, and sends a read line request 1709 // to the XRAM. If the TRT is full, it releases the lock, and waits. 1710 // Finally, the WRITE FSM returns an aknowledge response to the writing processor. 1711 ///////////////////////////////////////////////////////////////////////////////////// 1712 1713 switch ( r_write_fsm.read() ) 1714 { 1715 //////////////// 1716 case WRITE_IDLE: // copy first word of a write burst in local buffer 1717 { 1718 if ( m_cmd_write_addr_fifo.rok() ) 1719 { 1720 m_cpt_write++; 1721 m_cpt_write_cells++; 1722 1723 // consume a word in the FIFO & write it in the local buffer 1724 cmd_write_fifo_get = true; 1725 size_t index = m_x[(vci_addr_t)(m_cmd_write_addr_fifo.read())]; 1726 1727 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 1728 r_write_word_index = index; 1729 r_write_word_count = 1; 1730 r_write_data[index] = m_cmd_write_data_fifo.read(); 1731 r_write_srcid = m_cmd_write_srcid_fifo.read(); 1732 r_write_trdid = m_cmd_write_trdid_fifo.read(); 1733 r_write_pktid = m_cmd_write_pktid_fifo.read(); 1734 1735 // initialize the be field for all words 1736 for ( size_t i=0 ; i<m_words ; i++ ) 1737 { 1738 if ( i == index ) r_write_be[i] = m_cmd_write_be_fifo.read(); 1739 else r_write_be[i] = 0x0; 1740 } 1741 1742 if( !((m_cmd_write_be_fifo.read() == 0x0)||(m_cmd_write_be_fifo.read() == 0xF)) ) 1743 r_write_byte = true; 1744 else 1745 r_write_byte = false; 1746 1747 if( m_cmd_write_eop_fifo.read() ) 1748 { 1749 r_write_fsm = WRITE_DIR_REQ; 1750 } 1751 else 1752 { 1753 r_write_fsm = WRITE_NEXT; 1754 } 1755 1756 #if DEBUG_MEMC_WRITE 1757 if( m_debug_write_fsm ) 1758 { 1759 std::cout << " <MEMC " << name() << ".WRITE_IDLE> Write request " 1760 << " srcid = " << std::dec << m_cmd_write_srcid_fifo.read() 1761 << " / address = " << std::hex << m_cmd_write_addr_fifo.read() 1762 << " / data = " << m_cmd_write_data_fifo.read() << std::endl; 1763 } 1764 #endif 1765 } 1766 break; 1767 } 1768 1769 //////////////// 1770 case WRITE_NEXT: // copy next word of a write burst in local buffer 1771 { 1772 if ( m_cmd_write_addr_fifo.rok() ) 1773 { 1774 1775 #if DEBUG_MEMC_WRITE 1776 if( m_debug_write_fsm ) 1777 { 1778 std::cout << " <MEMC " << name() << ".WRITE_NEXT> Write another word in local buffer" 1779 << std::endl; 1780 } 1781 #endif 1782 m_cpt_write_cells++; 1783 1784 // check that the next word is in the same cache line 1785 if (( m_nline[(vci_addr_t)(r_write_address.read())] != 1786 m_nline[(vci_addr_t)(m_cmd_write_addr_fifo.read())] )) 1787 { 1788 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_NEXT state" << std::endl 1789 << "all words in a write burst must be in same cache line" << std::endl; 1790 1791 exit(0); 1792 } 1793 1794 // consume a word in the FIFO & write it in the local buffer 1795 cmd_write_fifo_get = true; 1796 size_t index = r_write_word_index.read() + r_write_word_count.read(); 1797 1798 r_write_be[index] = m_cmd_write_be_fifo.read(); 1799 r_write_data[index] = m_cmd_write_data_fifo.read(); 1800 r_write_word_count = r_write_word_count.read() + 1; 1801 1802 if( !((m_cmd_write_be_fifo.read() == 0x0)||(m_cmd_write_be_fifo.read() == 0xF)) ) 1803 r_write_byte = true; 1804 1805 if ( m_cmd_write_eop_fifo.read() ) 1806 { 1807 r_write_fsm = WRITE_DIR_REQ; 1808 } 1809 } 1810 break; 1811 } 1812 1813 //////////////////// 1814 case WRITE_DIR_REQ: 1815 // Get the lock to the directory 1816 { 1817 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE ) 1818 { 1819 r_write_fsm = WRITE_DIR_LOCK; 1820 } 1821 1822 #if DEBUG_MEMC_WRITE 1823 if( m_debug_write_fsm ) 1824 { 1825 std::cout 1826 << " <MEMC " << name() << ".WRITE_DIR_REQ> Requesting DIR lock " 1827 << std::endl; 1828 } 1829 #endif 1830 1831 break; 1832 } 1833 1834 //////////////////// 1835 case WRITE_DIR_LOCK: 1836 // access directory to check hit/miss 1837 { 1838 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE ) 1839 { 1840 size_t way = 0; 1841 DirectoryEntry entry(m_cache_directory.read(r_write_address.read(), way)); 1842 1843 if ( entry.valid ) // hit 1844 { 1845 // copy directory entry in local buffer in case of hit 1846 r_write_is_cnt = entry.is_cnt; 1847 r_write_lock = entry.lock; 1848 r_write_tag = entry.tag; 1849 r_write_copy = entry.owner.srcid; 1285 1850 #if L1_MULTI_CACHE 1286 entry.owner.cache_id= 0; 1287 #endif 1288 entry.owner.inst = false; 1289 entry.ptr = 0; 1290 } 1291 1292 m_cache_directory.write(set, way, entry); 1293 1294 // prepare the heap update (add an entry, or clear the linked list) 1295 if (not go_cnt) // not switching to counter mode 1296 { 1297 // We test if the next free entry in the heap is the last 1298 HeapEntry heap_entry = m_heap.next_free_entry(); 1299 r_read_next_ptr = heap_entry.next; 1300 r_read_last_free = ( heap_entry.next == m_heap.next_free_ptr() ); 1301 1302 r_read_fsm = READ_HEAP_WRITE; // add an entry in the HEAP 1303 } 1304 else // switching to counter mode 1305 { 1306 if ( r_read_count.read()>1 ) // heap must be cleared 1307 { 1308 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 1309 r_read_next_ptr = m_heap.next_free_ptr(); 1310 m_heap.write_free_ptr(r_read_ptr.read()); 1311 1312 if( next_entry.next == r_read_ptr.read() ) // last entry 1313 { 1314 r_read_fsm = READ_HEAP_LAST; // erase the entry 1315 } 1316 else // not the last entry 1317 { 1318 r_read_ptr = next_entry.next; 1319 r_read_fsm = READ_HEAP_ERASE; // erase the list 1320 } 1321 } 1322 else // the heap is not used / nothing to do 1323 { 1324 r_read_fsm = READ_RSP; 1325 } 1326 } 1327 1328 #if DEBUG_MEMC_READ 1329 if( m_debug_read_fsm ) 1330 { 1331 std::cout << " <MEMC " << name() << ".READ_HEAP_LOCK> Update directory:" 1332 << " tag = " << std::hex << entry.tag 1333 << " set = " << std::dec << set 1334 << " way = " << way 1335 << " count = " << entry.count 1336 << " is_cnt = " << entry.is_cnt << std::endl; 1337 } 1338 #endif 1339 } 1340 break; 1341 } 1342 ///////////////////// 1343 case READ_HEAP_WRITE: // add a entry in the heap 1344 { 1345 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) 1346 { 1347 HeapEntry heap_entry; 1348 heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 1851 r_write_copy_cache = entry.owner.cache_id; 1852 #endif 1853 r_write_copy_inst = entry.owner.inst; 1854 r_write_count = entry.count; 1855 r_write_ptr = entry.ptr; 1856 r_write_way = way; 1857 1858 if( entry.is_cnt && entry.count ) 1859 { 1860 r_write_fsm = WRITE_DIR_READ; 1861 } 1862 else 1863 { 1864 if (r_write_byte.read()) 1865 { 1866 r_write_fsm = WRITE_DIR_READ; 1867 } 1868 else 1869 { 1870 r_write_fsm = WRITE_DIR_HIT; 1871 } 1872 } 1873 } 1874 else // miss 1875 { 1876 r_write_fsm = WRITE_MISS_TRT_LOCK; 1877 } 1878 1879 #if DEBUG_MEMC_WRITE 1880 if( m_debug_write_fsm ) 1881 { 1882 std::cout << " <MEMC " << name() << ".WRITE_DIR_LOCK> Check the directory: " 1883 << " address = " << std::hex << r_write_address.read() 1884 << " hit = " << std::dec << entry.valid 1885 << " count = " << entry.count 1886 << " is_cnt = " << entry.is_cnt << std::endl; 1887 } 1888 #endif 1889 } 1890 else 1891 { 1892 std::cout << "VCI_MEM_CACHE ERROR " << name() 1893 << " WRITE_DIR_LOCK state" << std::endl 1894 << "bad DIR allocation" << std::endl; 1895 1896 exit(0); 1897 } 1898 1899 break; 1900 } 1901 1902 //////////////////// 1903 case WRITE_DIR_READ: // read the cache and complete the buffer when be!=0xF 1904 { 1905 // update local buffer 1906 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 1907 size_t way = r_write_way.read(); 1908 for(size_t i=0 ; i<m_words ; i++) 1909 { 1910 data_t mask = 0; 1911 if (r_write_be[i].read() & 0x1) mask = mask | 0x000000FF; 1912 if (r_write_be[i].read() & 0x2) mask = mask | 0x0000FF00; 1913 if (r_write_be[i].read() & 0x4) mask = mask | 0x00FF0000; 1914 if (r_write_be[i].read() & 0x8) mask = mask | 0xFF000000; 1915 1916 // complete only if mask is not null (for energy consumption) 1917 if ( r_write_be[i].read() || r_write_is_cnt.read() ) 1918 { 1919 r_write_data[i] = (r_write_data[i].read() & mask) | 1920 (m_cache_data[way][set][i] & ~mask); 1921 } 1922 } // end for 1923 1924 // test if a coherence broadcast is required 1925 if( r_write_is_cnt.read() && r_write_count.read() ) 1926 { 1927 r_write_fsm = WRITE_BC_TRT_LOCK; 1928 } 1929 else 1930 { 1931 r_write_fsm = WRITE_DIR_HIT; 1932 } 1933 1934 #if DEBUG_MEMC_WRITE 1935 if( m_debug_write_fsm ) 1936 { 1937 std::cout << " <MEMC " << name() << ".WRITE_DIR_READ> Read the cache to complete local buffer" << std::endl; 1938 } 1939 #endif 1940 break; 1941 } 1942 1943 /////////////////// 1944 case WRITE_DIR_HIT: 1945 { 1946 // update the cache directory 1947 // update directory with Dirty bit 1948 DirectoryEntry entry; 1949 entry.valid = true; 1950 entry.dirty = true; 1951 entry.tag = r_write_tag.read(); 1952 entry.is_cnt = r_write_is_cnt.read(); 1953 entry.lock = r_write_lock.read(); 1954 entry.owner.srcid = r_write_copy.read(); 1349 1955 #if L1_MULTI_CACHE 1350 heap_entry.owner.cache_id = m_cmd_read_pktid_fifo.read(); 1351 #endif 1352 heap_entry.owner.inst = (m_cmd_read_trdid_fifo.read() & 0x2); 1353 1354 if(r_read_count.read() == 1) // creation of a new linked list 1355 { 1356 heap_entry.next = m_heap.next_free_ptr(); 1357 } 1358 else // head insertion in existing list 1359 { 1360 heap_entry.next = r_read_ptr.read(); 1361 } 1362 m_heap.write_free_entry(heap_entry); 1363 m_heap.write_free_ptr(r_read_next_ptr.read()); 1364 if(r_read_last_free.read()) m_heap.set_full(); 1365 1366 r_read_fsm = READ_RSP; 1367 1368 #if DEBUG_MEMC_READ 1369 if( m_debug_read_fsm ) 1370 { 1371 std::cout << " <MEMC " << name() << ".READ_HEAP_WRITE> Add an entry in the heap:" 1372 << " owner_id = " << heap_entry.owner.srcid 1373 << " owner_ins = " << heap_entry.owner.inst << std::endl; 1374 } 1375 #endif 1376 } 1377 else 1378 { 1379 std::cout << "VCI_MEM_CACHE ERROR " << name() 1380 << " READ_HEAP_WRITE state" << std::endl; 1381 std::cout << "Bad HEAP allocation" << std::endl; 1382 exit(0); 1383 } 1384 break; 1385 } 1386 ///////////////////// 1387 case READ_HEAP_ERASE: 1388 { 1389 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) 1390 { 1391 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 1392 if( next_entry.next == r_read_ptr.read() ) 1393 { 1394 r_read_fsm = READ_HEAP_LAST; 1395 } 1396 else 1397 { 1398 r_read_ptr = next_entry.next; 1399 r_read_fsm = READ_HEAP_ERASE; 1400 } 1401 } 1402 else 1403 { 1404 std::cout << "VCI_MEM_CACHE ERROR " << name() 1405 << " READ_HEAP_ERASE state" << std::endl; 1406 std::cout << "Bad HEAP allocation" << std::endl; 1407 exit(0); 1408 } 1409 break; 1410 } 1411 //////////////////// 1412 case READ_HEAP_LAST: 1413 { 1414 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) 1415 { 1416 HeapEntry last_entry; 1417 last_entry.owner.srcid = 0; 1956 entry.owner.cache_id = r_write_copy_cache.read(); 1957 #endif 1958 entry.owner.inst = r_write_copy_inst.read(); 1959 entry.count = r_write_count.read(); 1960 entry.ptr = r_write_ptr.read(); 1961 1962 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 1963 size_t way = r_write_way.read(); 1964 1965 // update directory 1966 m_cache_directory.write(set, way, entry); 1967 1968 // owner is true when the the first registered copy is the writer itself 1969 bool owner = (((r_write_copy.read() == r_write_srcid.read()) 1418 1970 #if L1_MULTI_CACHE 1419 last_entry.owner.cache_id = 0; 1420 #endif 1421 last_entry.owner.inst = false; 1422 1423 if(m_heap.is_full()) 1424 { 1425 last_entry.next = r_read_ptr.read(); 1426 m_heap.unset_full(); 1427 } 1428 else 1429 { 1430 last_entry.next = r_read_next_ptr.read(); 1431 } 1432 m_heap.write(r_read_ptr.read(),last_entry); 1433 r_read_fsm = READ_RSP; 1434 } 1435 else 1436 { 1437 std::cout << "VCI_MEM_CACHE ERROR " << name() 1438 << " READ_HEAP_LAST state" << std::endl; 1439 std::cout << "Bad HEAP allocation" << std::endl; 1440 exit(0); 1441 } 1442 break; 1443 } 1444 ////////////// 1445 case READ_RSP: // request the TGT_RSP FSM to return data 1446 { 1447 if( !r_read_to_tgt_rsp_req ) 1448 { 1449 for ( size_t i=0 ; i<m_words ; i++ ) r_read_to_tgt_rsp_data[i] = r_read_data[i]; 1450 r_read_to_tgt_rsp_word = m_x[(vci_addr_t)m_cmd_read_addr_fifo.read()]; 1451 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read(); 1452 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read(); 1453 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); 1454 r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read(); 1455 cmd_read_fifo_get = true; 1456 r_read_to_tgt_rsp_req = true; 1457 r_read_fsm = READ_IDLE; 1458 1459 #if DEBUG_MEMC_READ 1460 if( m_debug_read_fsm ) 1461 { 1462 std::cout << " <MEMC " << name() << ".READ_RSP> Request the TGT_RSP FSM to return data:" 1463 << " rsrcid = " << std::dec << m_cmd_read_srcid_fifo.read() 1464 << " / address = " << std::hex << m_cmd_read_addr_fifo.read() 1465 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 1466 } 1467 #endif 1468 } 1469 break; 1470 } 1471 /////////////////// 1472 case READ_TRT_LOCK: // read miss : check the Transaction Table 1473 { 1474 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_READ ) 1475 { 1476 size_t index = 0; 1477 vci_addr_t addr = (vci_addr_t)m_cmd_read_addr_fifo.read(); 1478 bool hit_read = m_transaction_tab.hit_read(m_nline[addr], index); 1479 bool hit_write = m_transaction_tab.hit_write(m_nline[addr]); 1480 bool wok = !m_transaction_tab.full(index); 1481 1482 if( hit_read || !wok || hit_write ) // missing line already requested or no space 1483 { 1484 if(!wok) m_cpt_trt_full++; 1485 if(hit_read || hit_write) m_cpt_trt_rb++; 1486 r_read_fsm = READ_IDLE; 1487 } 1488 else // missing line is requested to the XRAM 1489 { 1490 m_cpt_read_miss++; 1491 r_read_trt_index = index; 1492 r_read_fsm = READ_TRT_SET; 1493 } 1494 1495 #if DEBUG_MEMC_READ 1496 if( m_debug_read_fsm ) 1497 { 1498 std::cout << " <MEMC " << name() << ".READ_TRT_LOCK> Check TRT:" 1499 << " hit_read = " << hit_read 1500 << " / hit_write = " << hit_write 1501 << " / full = " << !wok << std::endl; 1502 } 1503 #endif 1504 } 1505 break; 1506 } 1507 ////////////////// 1508 case READ_TRT_SET: // register get transaction in TRT 1509 { 1510 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_READ ) 1511 { 1512 m_transaction_tab.set(r_read_trt_index.read(), 1513 true, 1514 m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())], 1515 m_cmd_read_srcid_fifo.read(), 1516 m_cmd_read_trdid_fifo.read(), 1517 m_cmd_read_pktid_fifo.read(), 1518 true, 1519 m_cmd_read_length_fifo.read(), 1520 m_x[(vci_addr_t)(m_cmd_read_addr_fifo.read())], 1521 std::vector<be_t>(m_words,0), 1522 std::vector<data_t>(m_words,0)); 1523 #if DEBUG_MEMC_READ 1524 if( m_debug_read_fsm ) 1525 { 1526 std::cout << " <MEMC " << name() << ".READ_TRT_SET> Write in Transaction Table: " << std::hex 1527 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 1528 << " / srcid = " << std::dec << m_cmd_read_srcid_fifo.read() 1529 << std::endl; 1530 } 1531 #endif 1532 r_read_fsm = READ_TRT_REQ; 1533 } 1534 break; 1535 } 1536 ////////////////// 1537 case READ_TRT_REQ: // consume the read request in the FIFO, 1538 // and send it to the ixr_cmd_fsm 1539 { 1540 if( not r_read_to_ixr_cmd_req ) 1541 { 1542 cmd_read_fifo_get = true; 1543 r_read_to_ixr_cmd_req = true; 1544 r_read_to_ixr_cmd_nline = m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; 1545 r_read_to_ixr_cmd_trdid = r_read_trt_index.read(); 1546 r_read_fsm = READ_IDLE; 1547 1548 #if DEBUG_MEMC_READ 1549 if( m_debug_read_fsm ) 1550 { 1551 std::cout << " <MEMC " << name() << ".READ_TRT_REQ> Request GET transaction for address " 1552 << std::hex << m_cmd_read_addr_fifo.read() << std::endl; 1553 } 1554 #endif 1555 } 1556 break; 1557 } 1558 } // end switch read_fsm 1559 1560 /////////////////////////////////////////////////////////////////////////////////// 1561 // WRITE FSM 1562 /////////////////////////////////////////////////////////////////////////////////// 1563 // The WRITE FSM handles the write bursts sent by the processors. 1564 // All addresses in a burst must be in the same cache line. 1565 // A complete write burst is consumed in the FIFO & copied to a local buffer. 1566 // Then the FSM takes the lock protecting the cache directory, to check 1567 // if the line is in the cache. 1568 // 1569 // - In case of HIT, the cache is updated. 1570 // If there is no other copy, an acknowledge response is immediately 1571 // returned to the writing processor. 1572 // If the data is cached by other processors, a coherence transaction must 1573 // be launched: 1574 // It is a multicast update if the line is not in counter mode, and the processor 1575 // takes the lock protecting the Update Table (UPT) to register this transaction. 1576 // It is a broadcast invalidate if the line is in counter mode. 1577 // If the UPT is full, it releases the lock(s) and retry. Then, it sends 1578 // a multi-update request to all owners of the line (but the writer), 1579 // through the INIT_CMD FSM. In case of coherence transaction, the WRITE FSM 1580 // does not respond to the writing processor, as this response will be sent by 1581 // the INIT_RSP FSM when all update responses have been received. 1582 // 1583 // - In case of MISS, the WRITE FSM takes the lock protecting the transaction 1584 // table (TRT). If a read transaction to the XRAM for this line already exists, 1585 // it writes in the TRT (write buffer). Otherwise, if a TRT entry is free, 1586 // the WRITE FSM register a new transaction in TRT, and sends a read line request 1587 // to the XRAM. If the TRT is full, it releases the lock, and waits. 1588 // Finally, the WRITE FSM returns an aknowledge response to the writing processor. 1589 ///////////////////////////////////////////////////////////////////////////////////// 1590 1591 switch ( r_write_fsm.read() ) 1971 and (r_write_copy_cache.read()==r_write_pktid.read()) 1972 #endif 1973 ) and not r_write_copy_inst.read()); 1974 1975 // no_update is true when there is no need for coherence transaction 1976 bool no_update = (r_write_count.read()==0) || ( owner && (r_write_count.read()==1)); 1977 1978 // write data in the cache if no coherence transaction 1979 if( no_update ) 1980 { 1981 for(size_t i=0 ; i<m_words ; i++) 1982 { 1983 if ( r_write_be[i].read() ) 1984 { 1985 m_cache_data[way][set][i] = r_write_data[i].read(); 1986 1987 if ( m_monitor_ok ) 1988 { 1989 vci_addr_t address = (r_write_address.read() & ~(vci_addr_t)0x3F) | i<<2; 1990 char buf[80]; 1991 snprintf(buf, 80, "WRITE_DIR_HIT srcid %d", r_write_srcid.read()); 1992 check_monitor( buf, address, r_write_data[i].read() ); 1993 } 1994 } 1995 } 1996 } 1997 1998 if ( owner and not no_update ) 1999 { 2000 r_write_count = r_write_count.read() - 1; 2001 } 2002 2003 if ( no_update ) 2004 // Write transaction completed 2005 { 2006 r_write_fsm = WRITE_RSP; 2007 } 2008 else 2009 // coherence update required 2010 { 2011 if( !r_write_to_init_cmd_multi_req.read() && 2012 !r_write_to_init_cmd_brdcast_req.read() ) 2013 { 2014 r_write_fsm = WRITE_UPT_LOCK; 2015 } 2016 else 2017 { 2018 r_write_fsm = WRITE_WAIT; 2019 } 2020 } 2021 2022 #if DEBUG_MEMC_WRITE 2023 if( m_debug_write_fsm ) 2024 { 2025 if ( no_update ) 2026 { 2027 std::cout << " <MEMC " << name() << ".WRITE_DIR_HIT> Write into cache / No coherence transaction" 2028 << std::endl; 2029 } 2030 else 2031 { 2032 std::cout << " <MEMC " << name() << ".WRITE_DIR_HIT> Coherence update required:" 2033 << " is_cnt = " << r_write_is_cnt.read() 2034 << " nb_copies = " << std::dec << r_write_count.read() << std::endl; 2035 if (owner) 2036 std::cout << " ... but the first copy is the writer" << std::endl; 2037 } 2038 } 2039 #endif 2040 break; 2041 } 2042 2043 //////////////////// 2044 case WRITE_UPT_LOCK: // Try to register the update request in UPT 1592 2045 { 1593 //////////////// 1594 case WRITE_IDLE: // copy first word of a write burst in local buffer 1595 { 1596 if ( m_cmd_write_addr_fifo.rok() ) 1597 { 1598 m_cpt_write++; 1599 m_cpt_write_cells++; 1600 1601 // consume a word in the FIFO & write it in the local buffer 1602 cmd_write_fifo_get = true; 1603 size_t index = m_x[(vci_addr_t)(m_cmd_write_addr_fifo.read())]; 1604 1605 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 1606 r_write_word_index = index; 1607 r_write_word_count = 1; 1608 r_write_data[index] = m_cmd_write_data_fifo.read(); 1609 r_write_srcid = m_cmd_write_srcid_fifo.read(); 1610 r_write_trdid = m_cmd_write_trdid_fifo.read(); 1611 r_write_pktid = m_cmd_write_pktid_fifo.read(); 1612 1613 // initialize the be field for all words 1614 for ( size_t i=0 ; i<m_words ; i++ ) 1615 { 1616 if ( i == index ) r_write_be[i] = m_cmd_write_be_fifo.read(); 1617 else r_write_be[i] = 0x0; 1618 } 1619 1620 if( !((m_cmd_write_be_fifo.read() == 0x0)||(m_cmd_write_be_fifo.read() == 0xF)) ) 1621 r_write_byte = true; 1622 else 1623 r_write_byte = false; 1624 1625 if( m_cmd_write_eop_fifo.read() ) r_write_fsm = WRITE_DIR_LOCK; 1626 else r_write_fsm = WRITE_NEXT; 2046 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) 2047 { 2048 bool wok = false; 2049 size_t index = 0; 2050 size_t srcid = r_write_srcid.read(); 2051 size_t trdid = r_write_trdid.read(); 2052 size_t pktid = r_write_pktid.read(); 2053 addr_t nline = m_nline[(vci_addr_t)(r_write_address.read())]; 2054 size_t nb_copies = r_write_count.read(); 2055 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 2056 size_t way = r_write_way.read(); 2057 2058 wok = m_update_tab.set(true, // it's an update transaction 2059 false, // it's not a broadcast 2060 true, // it needs a response 2061 srcid, 2062 trdid, 2063 pktid, 2064 nline, 2065 nb_copies, 2066 index); 2067 if ( wok ) // write data in cache 2068 { 2069 for(size_t i=0 ; i<m_words ; i++) 2070 { 2071 if ( r_write_be[i].read() ) 2072 { 2073 m_cache_data[way][set][i] = r_write_data[i].read(); 2074 2075 if ( m_monitor_ok ) 2076 { 2077 vci_addr_t address = (r_write_address.read() & ~(vci_addr_t)0x3F) | i<<2; 2078 char buf[80]; 2079 snprintf(buf, 80, "WRITE_UPT_LOCK srcid %d", srcid); 2080 check_monitor(buf, address, r_write_data[i].read() ); 2081 } 2082 } 2083 } 2084 } 1627 2085 1628 2086 #if DEBUG_MEMC_WRITE 1629 if( m_debug_write_fsm ) 1630 { 1631 std::cout << " <MEMC " << name() << ".WRITE_IDLE> Write request " 2087 if( m_debug_write_fsm ) 2088 { 2089 if ( wok ) 2090 { 2091 std::cout << " <MEMC " << name() << ".WRITE_UPT_LOCK> Register the multicast update in UPT / " 2092 << " nb_copies = " << r_write_count.read() << std::endl; 2093 } 2094 } 2095 #endif 2096 r_write_upt_index = index; 2097 // releases the lock protecting UPT and the DIR if no entry... 2098 if ( wok ) r_write_fsm = WRITE_UPT_HEAP_LOCK; 2099 else r_write_fsm = WRITE_WAIT; 2100 } 2101 break; 2102 } 2103 2104 ///////////////////////// 2105 case WRITE_UPT_HEAP_LOCK: // get access to heap 2106 { 2107 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE ) 2108 { 2109 2110 #if DEBUG_MEMC_WRITE 2111 if( m_debug_write_fsm ) 2112 { 2113 std::cout << " <MEMC " << name() << ".WRITE_UPT_HEAP_LOCK> Get acces to the HEAP" << std::endl; 2114 } 2115 #endif 2116 r_write_fsm = WRITE_UPT_REQ; 2117 } 2118 break; 2119 } 2120 2121 ////////////////// 2122 case WRITE_UPT_REQ: 2123 { 2124 // prepare the coherence ransaction for the INIT_CMD FSM 2125 // and write the first copy in the FIFO 2126 // send the request if only one copy 2127 2128 if( !r_write_to_init_cmd_multi_req.read() && 2129 !r_write_to_init_cmd_brdcast_req.read() ) // no pending coherence request 2130 { 2131 r_write_to_init_cmd_brdcast_req = false; 2132 r_write_to_init_cmd_trdid = r_write_upt_index.read(); 2133 r_write_to_init_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 2134 r_write_to_init_cmd_index = r_write_word_index.read(); 2135 r_write_to_init_cmd_count = r_write_word_count.read(); 2136 2137 for(size_t i=0; i<m_words ; i++) r_write_to_init_cmd_be[i]=r_write_be[i].read(); 2138 2139 size_t min = r_write_word_index.read(); 2140 size_t max = r_write_word_index.read() + r_write_word_count.read(); 2141 for (size_t i=min ; i<max ; i++) r_write_to_init_cmd_data[i] = r_write_data[i]; 2142 2143 if( (r_write_copy.read() != r_write_srcid.read()) or 2144 #if L1_MULTI_CACHE 2145 (r_write_copy_cache.read() != r_write_pktid.read()) or 2146 #endif 2147 r_write_copy_inst.read() ) 2148 { 2149 // put the first srcid in the fifo 2150 write_to_init_cmd_fifo_put = true; 2151 write_to_init_cmd_fifo_inst = r_write_copy_inst.read(); 2152 write_to_init_cmd_fifo_srcid = r_write_copy.read(); 2153 #if L1_MULTI_CACHE 2154 write_to_init_cmd_fifo_cache_id= r_write_copy_cache.read(); 2155 #endif 2156 if(r_write_count.read() == 1) 2157 { 2158 r_write_fsm = WRITE_IDLE; 2159 r_write_to_init_cmd_multi_req = true; 2160 } 2161 else 2162 { 2163 r_write_fsm = WRITE_UPT_NEXT; 2164 r_write_to_dec = false; 2165 2166 } 2167 } 2168 else 2169 { 2170 r_write_fsm = WRITE_UPT_NEXT; 2171 r_write_to_dec = false; 2172 } 2173 2174 #if DEBUG_MEMC_WRITE 2175 if( m_debug_write_fsm ) 2176 { 2177 std::cout << " <MEMC " << name() << ".WRITE_UPT_REQ> Post first request to INIT_CMD FSM" 2178 << " / srcid = " << std::dec << r_write_copy.read() 2179 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 2180 if ( r_write_count.read() == 1) 2181 std::cout << " ... and this is the last" << std::endl; 2182 } 2183 #endif 2184 } 2185 break; 2186 } 2187 2188 /////////////////// 2189 case WRITE_UPT_NEXT: 2190 { 2191 // continue the multi-update request to INIT_CMD fsm 2192 // when there is copies in the heap. 2193 // if one copy in the heap is the writer itself 2194 // the corresponding SRCID should not be written in the fifo, 2195 // but the UPT counter must be decremented. 2196 // As this decrement is done in the WRITE_UPT_DEC state, 2197 // after the last copy has been found, the decrement request 2198 // must be registered in the r_write_to_dec flip-flop. 2199 2200 HeapEntry entry = m_heap.read(r_write_ptr.read()); 2201 2202 bool dec_upt_counter; 2203 2204 if( (entry.owner.srcid != r_write_srcid.read()) or 2205 #if L1_MULTI_CACHE 2206 (entry.owner.cache_id != r_write_pktid.read()) or 2207 #endif 2208 entry.owner.inst) // put te next srcid in the fifo 2209 { 2210 dec_upt_counter = false; 2211 write_to_init_cmd_fifo_put = true; 2212 write_to_init_cmd_fifo_inst = entry.owner.inst; 2213 write_to_init_cmd_fifo_srcid = entry.owner.srcid; 2214 #if L1_MULTI_CACHE 2215 write_to_init_cmd_fifo_cache_id = entry.owner.cache_id; 2216 #endif 2217 2218 #if DEBUG_MEMC_WRITE 2219 if( m_debug_write_fsm ) 2220 { 2221 std::cout << " <MEMC " << name() << ".WRITE_UPT_NEXT> Post another request to INIT_CMD FSM" 2222 << " / heap_index = " << std::dec << r_write_ptr.read() 2223 << " / srcid = " << std::dec << r_write_copy.read() 2224 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 2225 if( entry.next == r_write_ptr.read() ) 2226 std::cout << " ... and this is the last" << std::endl; 2227 } 2228 #endif 2229 } 2230 else // the UPT counter must be decremented 2231 { 2232 dec_upt_counter = true; 2233 2234 #if DEBUG_MEMC_WRITE 2235 if( m_debug_write_fsm ) 2236 { 2237 std::cout << " <MEMC " << name() << ".WRITE_UPT_NEXT> Skip one entry in heap matching the writer" 2238 << " / heap_index = " << std::dec << r_write_ptr.read() 2239 << " / srcid = " << std::dec << r_write_copy.read() 2240 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 2241 if( entry.next == r_write_ptr.read() ) 2242 std::cout << " ... and this is the last" << std::endl; 2243 } 2244 #endif 2245 } 2246 2247 // register the possible UPT decrement request 2248 r_write_to_dec = dec_upt_counter or r_write_to_dec.read(); 2249 2250 if( not m_write_to_init_cmd_inst_fifo.wok() ) 2251 { 2252 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_UPT_NEXT state" << std::endl 2253 << "The write_to_init_cmd_fifo should not be full" << std::endl 2254 << "as the depth should be larger than the max number of copies" << std::endl; 2255 exit(0); 2256 } 2257 2258 r_write_ptr = entry.next; 2259 2260 if( entry.next == r_write_ptr.read() ) // last copy 2261 { 2262 r_write_to_init_cmd_multi_req = true; 2263 if( r_write_to_dec.read() or dec_upt_counter) r_write_fsm = WRITE_UPT_DEC; 2264 else r_write_fsm = WRITE_IDLE; 2265 } 2266 break; 2267 } 2268 2269 ////////////////// 2270 case WRITE_UPT_DEC: 2271 { 2272 // If the initial writer has a copy, it should not 2273 // receive an update request, but the counter in the 2274 // update table must be decremented by the INIT_RSP FSM. 2275 2276 if ( !r_write_to_init_rsp_req.read() ) 2277 { 2278 r_write_to_init_rsp_req = true; 2279 r_write_to_init_rsp_upt_index = r_write_upt_index.read(); 2280 r_write_fsm = WRITE_IDLE; 2281 } 2282 break; 2283 } 2284 2285 /////////////// 2286 case WRITE_RSP: 2287 { 2288 // Post a request to TGT_RSP FSM to acknowledge the write 2289 // In order to increase the Write requests throughput, 2290 // we don't wait to return in the IDLE state to consume 2291 // a new request in the write FIFO 2292 2293 if ( !r_write_to_tgt_rsp_req.read() ) 2294 { 2295 // post the request to TGT_RSP_FSM 2296 r_write_to_tgt_rsp_req = true; 2297 r_write_to_tgt_rsp_srcid = r_write_srcid.read(); 2298 r_write_to_tgt_rsp_trdid = r_write_trdid.read(); 2299 r_write_to_tgt_rsp_pktid = r_write_pktid.read(); 2300 2301 // try to get a new write request from the FIFO 2302 if ( m_cmd_write_addr_fifo.rok() ) 2303 { 2304 m_cpt_write++; 2305 m_cpt_write_cells++; 2306 2307 // consume a word in the FIFO & write it in the local buffer 2308 cmd_write_fifo_get = true; 2309 size_t index = m_x[(vci_addr_t)(m_cmd_write_addr_fifo.read())]; 2310 2311 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 2312 r_write_word_index = index; 2313 r_write_word_count = 1; 2314 r_write_data[index] = m_cmd_write_data_fifo.read(); 2315 r_write_srcid = m_cmd_write_srcid_fifo.read(); 2316 r_write_trdid = m_cmd_write_trdid_fifo.read(); 2317 r_write_pktid = m_cmd_write_pktid_fifo.read(); 2318 2319 // initialize the be field for all words 2320 for ( size_t i=0 ; i<m_words ; i++ ) 2321 { 2322 if ( i == index ) r_write_be[i] = m_cmd_write_be_fifo.read(); 2323 else r_write_be[i] = 0x0; 2324 } 2325 2326 if( !((m_cmd_write_be_fifo.read() == 0x0)||(m_cmd_write_be_fifo.read() == 0xF)) ) 2327 r_write_byte = true; 2328 else 2329 r_write_byte = false; 2330 2331 if( m_cmd_write_eop_fifo.read() ) 2332 { 2333 r_write_fsm = WRITE_DIR_REQ; 2334 } 2335 else 2336 { 2337 r_write_fsm = WRITE_NEXT; 2338 } 2339 } 2340 else 2341 { 2342 r_write_fsm = WRITE_IDLE; 2343 } 2344 2345 #if DEBUG_MEMC_WRITE 2346 if( m_debug_write_fsm ) 2347 { 2348 std::cout << " <MEMC " << name() << ".WRITE_RSP> Post a request to TGT_RSP FSM: rsrcid = " 2349 << std::dec << r_write_srcid.read() << std::endl; 2350 if ( m_cmd_write_addr_fifo.rok() ) 2351 { 2352 std::cout << " New Write request: " 1632 2353 << " srcid = " << std::dec << m_cmd_write_srcid_fifo.read() 1633 << " / address = " << std::hex << m_cmd_write_addr_fifo.read() 2354 << " / address = " << std::hex << m_cmd_write_addr_fifo.read() 1634 2355 << " / data = " << m_cmd_write_data_fifo.read() << std::endl; 1635 } 1636 #endif 1637 } 1638 break; 1639 } 1640 //////////////// 1641 case WRITE_NEXT: // copy next word of a write burst in local buffer 1642 { 1643 if ( m_cmd_write_addr_fifo.rok() ) 1644 { 2356 } 2357 } 2358 #endif 2359 } 2360 break; 2361 } 2362 2363 ///////////////////////// 2364 case WRITE_MISS_TRT_LOCK: // Miss : check Transaction Table 2365 { 2366 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) 2367 { 1645 2368 1646 2369 #if DEBUG_MEMC_WRITE 1647 if( m_debug_write_fsm ) 1648 { 1649 std::cout << " <MEMC " << name() << ".WRITE_NEXT> Write another word in local buffer" << std::endl; 1650 } 1651 #endif 1652 m_cpt_write_cells++; 1653 1654 // check that the next word is in the same cache line 1655 if ( (m_nline[(vci_addr_t)(r_write_address.read())] != 1656 m_nline[(vci_addr_t)(m_cmd_write_addr_fifo.read())]) ) 1657 { 1658 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_NEXT state" << std::endl; 1659 std::cout << "all words in a write burst must be in same cache line" << std::endl; 1660 exit(0); 1661 } 1662 1663 // consume a word in the FIFO & write it in the local buffer 1664 cmd_write_fifo_get=true; 1665 size_t index = r_write_word_index.read() + r_write_word_count.read(); 1666 1667 r_write_be[index] = m_cmd_write_be_fifo.read(); 1668 r_write_data[index] = m_cmd_write_data_fifo.read(); 1669 r_write_word_count = r_write_word_count.read() + 1; 1670 1671 if( !((m_cmd_write_be_fifo.read() == 0x0)||(m_cmd_write_be_fifo.read() == 0xF)) ) 1672 r_write_byte = true; 1673 1674 if ( m_cmd_write_eop_fifo.read() ) r_write_fsm = WRITE_DIR_LOCK; 1675 } 1676 break; 1677 } 1678 //////////////////// 1679 case WRITE_DIR_LOCK: // access directory to check hit/miss 1680 { 1681 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE ) 1682 { 1683 size_t way = 0; 1684 DirectoryEntry entry(m_cache_directory.read(r_write_address.read(), way)); 1685 1686 if ( entry.valid ) // hit 1687 { 1688 // copy directory entry in local buffer in case of hit 1689 r_write_is_cnt = entry.is_cnt; 1690 r_write_lock = entry.lock; 1691 r_write_tag = entry.tag; 1692 r_write_copy = entry.owner.srcid; 2370 if( m_debug_write_fsm ) 2371 { 2372 std::cout << " <MEMC " << name() << ".WRITE_MISS_TRT_LOCK> Check the TRT" << std::endl; 2373 } 2374 #endif 2375 size_t hit_index = 0; 2376 size_t wok_index = 0; 2377 vci_addr_t addr = (vci_addr_t)r_write_address.read(); 2378 bool hit_read = m_transaction_tab.hit_read(m_nline[addr], hit_index); 2379 bool hit_write = m_transaction_tab.hit_write(m_nline[addr]); 2380 bool wok = !m_transaction_tab.full(wok_index); 2381 2382 if ( hit_read ) // register the modified data in TRT 2383 { 2384 r_write_trt_index = hit_index; 2385 r_write_fsm = WRITE_MISS_TRT_DATA; 2386 m_cpt_write_miss++; 2387 } 2388 else if ( wok && !hit_write ) // set a new entry in TRT 2389 { 2390 r_write_trt_index = wok_index; 2391 r_write_fsm = WRITE_MISS_TRT_SET; 2392 m_cpt_write_miss++; 2393 } 2394 else // wait an empty entry in TRT 2395 { 2396 r_write_fsm = WRITE_WAIT; 2397 m_cpt_trt_full++; 2398 } 2399 } 2400 break; 2401 } 2402 2403 //////////////// 2404 case WRITE_WAIT: // release the locks protecting the shared ressources 2405 { 2406 #if DEBUG_MEMC_WRITE 2407 if( m_debug_write_fsm ) 2408 { 2409 std::cout << " <MEMC " << name() << ".WRITE_WAIT> Releases the locks before retry" << std::endl; 2410 } 2411 #endif 2412 r_write_fsm = WRITE_DIR_REQ; 2413 break; 2414 } 2415 2416 //////////////////////// 2417 case WRITE_MISS_TRT_SET: // register a new transaction in TRT (Write Buffer) 2418 { 2419 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) 2420 { 2421 std::vector<be_t> be_vector; 2422 std::vector<data_t> data_vector; 2423 be_vector.clear(); 2424 data_vector.clear(); 2425 for ( size_t i=0; i<m_words; i++ ) 2426 { 2427 be_vector.push_back(r_write_be[i]); 2428 data_vector.push_back(r_write_data[i]); 2429 } 2430 m_transaction_tab.set(r_write_trt_index.read(), 2431 true, // read request to XRAM 2432 m_nline[(vci_addr_t)(r_write_address.read())], 2433 r_write_srcid.read(), 2434 r_write_trdid.read(), 2435 r_write_pktid.read(), 2436 false, // not a processor read 2437 0, // not a single word 2438 0, // word index 2439 be_vector, 2440 data_vector); 2441 r_write_fsm = WRITE_MISS_XRAM_REQ; 2442 2443 #if DEBUG_MEMC_WRITE 2444 if( m_debug_write_fsm ) 2445 { 2446 std::cout << " <MEMC " << name() << ".WRITE_MISS_TRT_SET> Set a new entry in TRT" << std::endl; 2447 } 2448 #endif 2449 } 2450 break; 2451 } 2452 2453 ///////////////////////// 2454 case WRITE_MISS_TRT_DATA: // update an entry in TRT (used as a Write Buffer) 2455 { 2456 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) 2457 { 2458 std::vector<be_t> be_vector; 2459 std::vector<data_t> data_vector; 2460 be_vector.clear(); 2461 data_vector.clear(); 2462 for ( size_t i=0; i<m_words; i++ ) 2463 { 2464 be_vector.push_back(r_write_be[i]); 2465 data_vector.push_back(r_write_data[i]); 2466 } 2467 m_transaction_tab.write_data_mask(r_write_trt_index.read(), 2468 be_vector, 2469 data_vector); 2470 r_write_fsm = WRITE_RSP; 2471 2472 #if DEBUG_MEMC_WRITE 2473 if( m_debug_write_fsm ) 2474 { 2475 std::cout << " <MEMC " << name() << ".WRITE_MISS_TRT_DATA> Modify an existing entry in TRT" << std::endl; 2476 m_transaction_tab.print( r_write_trt_index.read() ); 2477 } 2478 #endif 2479 } 2480 break; 2481 } 2482 2483 ///////////////////////// 2484 case WRITE_MISS_XRAM_REQ: // send a GET request to IXR_CMD FSM 2485 { 2486 if ( !r_write_to_ixr_cmd_req ) 2487 { 2488 r_write_to_ixr_cmd_req = true; 2489 r_write_to_ixr_cmd_write = false; 2490 r_write_to_ixr_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 2491 r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); 2492 r_write_fsm = WRITE_RSP; 2493 2494 #if DEBUG_MEMC_WRITE 2495 if( m_debug_write_fsm ) 2496 { 2497 std::cout << " <MEMC " << name() << ".WRITE_MISS_XRAM_REQ> Post a GET request to the IXR_CMD FSM" << std::endl; 2498 } 2499 #endif 2500 } 2501 break; 2502 } 2503 2504 /////////////////////// 2505 case WRITE_BC_TRT_LOCK: // Check TRT not full 2506 { 2507 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) 2508 { 2509 size_t wok_index = 0; 2510 bool wok = !m_transaction_tab.full( wok_index ); 2511 if ( wok ) // set a new entry in TRT 2512 { 2513 r_write_trt_index = wok_index; 2514 r_write_fsm = WRITE_BC_UPT_LOCK; 2515 } 2516 else // wait an empty entry in TRT 2517 { 2518 r_write_fsm = WRITE_WAIT; 2519 } 2520 2521 #if DEBUG_MEMC_WRITE 2522 if( m_debug_write_fsm ) 2523 { 2524 std::cout << " <MEMC " << name() << ".WRITE_BC_TRT_LOCK> Check TRT : wok = " 2525 << wok << " / index = " << wok_index << std::endl; 2526 } 2527 #endif 2528 } 2529 break; 2530 } 2531 2532 ////////////////////// 2533 case WRITE_BC_UPT_LOCK: // register BC transaction in UPT 2534 { 2535 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) 2536 { 2537 bool wok = false; 2538 size_t index = 0; 2539 size_t srcid = r_write_srcid.read(); 2540 size_t trdid = r_write_trdid.read(); 2541 size_t pktid = r_write_pktid.read(); 2542 addr_t nline = m_nline[(vci_addr_t)(r_write_address.read())]; 2543 size_t nb_copies = r_write_count.read(); 2544 2545 wok =m_update_tab.set(false, // it's an inval transaction 2546 true, // it's a broadcast 2547 true, // it needs a response 2548 srcid, 2549 trdid, 2550 pktid, 2551 nline, 2552 nb_copies, 2553 index); 2554 2555 #if DEBUG_MEMC_WRITE 2556 if( m_debug_write_fsm ) 2557 { 2558 if ( wok ) 2559 { 2560 std::cout << " <MEMC " << name() << ".WRITE_BC_UPT_LOCK> Register the broadcast inval in UPT / " 2561 << " nb_copies = " << r_write_count.read() << std::endl; 2562 } 2563 } 2564 #endif 2565 r_write_upt_index = index; 2566 2567 if ( wok ) r_write_fsm = WRITE_BC_DIR_INVAL; 2568 else r_write_fsm = WRITE_WAIT; 2569 } 2570 break; 2571 } 2572 2573 //////////////////////// 2574 case WRITE_BC_DIR_INVAL: 2575 { 2576 // Register a put transaction to XRAM in TRT 2577 // and invalidate the line in directory 2578 if ( (r_alloc_trt_fsm.read() != ALLOC_TRT_WRITE ) || 2579 (r_alloc_upt_fsm.read() != ALLOC_UPT_WRITE ) || 2580 (r_alloc_dir_fsm.read() != ALLOC_DIR_WRITE ) ) 2581 { 2582 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_BC_DIR_INVAL state" << std::endl; 2583 std::cout << "bad TRT, DIR, or UPT allocation" << std::endl; 2584 exit(0); 2585 } 2586 2587 // register a write request to XRAM in TRT 2588 m_transaction_tab.set(r_write_trt_index.read(), 2589 false, // write request to XRAM 2590 m_nline[(vci_addr_t)(r_write_address.read())], 2591 0, 2592 0, 2593 0, 2594 false, // not a processor read 2595 0, // not a single word 2596 0, // word index 2597 std::vector<be_t>(m_words,0), 2598 std::vector<data_t>(m_words,0)); 2599 2600 // invalidate directory entry 2601 DirectoryEntry entry; 2602 entry.valid = false; 2603 entry.dirty = false; 2604 entry.tag = 0; 2605 entry.is_cnt = false; 2606 entry.lock = false; 2607 entry.owner.srcid = 0; 1693 2608 #if L1_MULTI_CACHE 1694 r_write_copy_cache = entry.owner.cache_id; 1695 #endif 1696 r_write_copy_inst = entry.owner.inst; 1697 r_write_count = entry.count; 1698 r_write_ptr = entry.ptr; 1699 r_write_way = way; 1700 1701 if( entry.is_cnt && entry.count ) 1702 { 1703 r_write_fsm = WRITE_DIR_READ; 1704 } 1705 else 1706 { 1707 if (r_write_byte.read()) r_write_fsm = WRITE_DIR_READ; 1708 else r_write_fsm = WRITE_DIR_HIT; 1709 } 1710 } 1711 else // miss 1712 { 1713 r_write_fsm = WRITE_MISS_TRT_LOCK; 1714 } 2609 entry.owner.cache_id= 0; 2610 #endif 2611 entry.owner.inst = false; 2612 entry.ptr = 0; 2613 entry.count = 0; 2614 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 2615 size_t way = r_write_way.read(); 2616 2617 m_cache_directory.write(set, way, entry); 1715 2618 1716 2619 #if DEBUG_MEMC_WRITE 1717 if( m_debug_write_fsm ) 1718 { 1719 std::cout << " <MEMC " << name() << ".WRITE_DIR_LOCK> Check the directory: " 1720 << " address = " << std::hex << r_write_address.read() 1721 << " hit = " << std::dec << entry.valid 1722 << " count = " << entry.count 1723 << " is_cnt = " << entry.is_cnt << std::endl; 1724 } 1725 #endif 1726 } 1727 break; 1728 } 1729 //////////////////// 1730 case WRITE_DIR_READ: // read the cache and complete the buffer when be!=0xF 1731 { 1732 // update local buffer 1733 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 1734 size_t way = r_write_way.read(); 1735 for(size_t i=0 ; i<m_words ; i++) 1736 { 1737 data_t mask = 0; 1738 if (r_write_be[i].read() & 0x1) mask = mask | 0x000000FF; 1739 if (r_write_be[i].read() & 0x2) mask = mask | 0x0000FF00; 1740 if (r_write_be[i].read() & 0x4) mask = mask | 0x00FF0000; 1741 if (r_write_be[i].read() & 0x8) mask = mask | 0xFF000000; 1742 1743 // complete only if mask is not null (for energy consumption) 1744 if ( r_write_be[i].read() || r_write_is_cnt.read() ) 1745 { 1746 r_write_data[i] = (r_write_data[i].read() & mask) | 1747 (m_cache_data[way][set][i] & ~mask); 1748 } 1749 } // end for 1750 1751 // test if a coherence broadcast is required 1752 if( r_write_is_cnt.read() && r_write_count.read() ) r_write_fsm = WRITE_BC_TRT_LOCK; 1753 else r_write_fsm = WRITE_DIR_HIT; 2620 if( m_debug_write_fsm ) 2621 { 2622 std::cout << " <MEMC " << name() << ".WRITE_BC_DIR_INVAL> Invalidate the directory entry: @ = " 2623 << r_write_address.read() << " / register the put transaction in TRT:" << std::endl; 2624 } 2625 #endif 2626 r_write_fsm = WRITE_BC_CC_SEND; 2627 break; 2628 } 2629 2630 ////////////////////// 2631 case WRITE_BC_CC_SEND: // Post a coherence broadcast request to INIT_CMD FSM 2632 { 2633 if ( !r_write_to_init_cmd_multi_req.read() && !r_write_to_init_cmd_brdcast_req.read() ) 2634 { 2635 r_write_to_init_cmd_multi_req = false; 2636 r_write_to_init_cmd_brdcast_req = true; 2637 r_write_to_init_cmd_trdid = r_write_upt_index.read(); 2638 r_write_to_init_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 2639 r_write_to_init_cmd_index = 0; 2640 r_write_to_init_cmd_count = 0; 2641 2642 for(size_t i=0; i<m_words ; i++) 2643 { 2644 r_write_to_init_cmd_be[i]=0; 2645 r_write_to_init_cmd_data[i] = 0; 2646 } 2647 r_write_fsm = WRITE_BC_XRAM_REQ; 1754 2648 1755 2649 #if DEBUG_MEMC_WRITE 1756 if( m_debug_write_fsm ) 1757 { 1758 std::cout << " <MEMC " << name() << ".WRITE_DIR_READ> Read the cache to complete local buffer" << std::endl; 1759 } 1760 #endif 1761 break; 1762 } 1763 /////////////////// 1764 case WRITE_DIR_HIT: // update the cache directory 1765 { 1766 // update directory with Dirty bit 1767 DirectoryEntry entry; 1768 entry.valid = true; 1769 entry.dirty = true; 1770 entry.tag = r_write_tag.read(); 1771 entry.is_cnt = r_write_is_cnt.read(); 1772 entry.lock = r_write_lock.read(); 1773 entry.owner.srcid = r_write_copy.read(); 1774 #if L1_MULTI_CACHE 1775 entry.owner.cache_id = r_write_copy_cache.read(); 1776 #endif 1777 entry.owner.inst = r_write_copy_inst.read(); 1778 entry.count = r_write_count.read(); 1779 entry.ptr = r_write_ptr.read(); 1780 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 1781 size_t way = r_write_way.read(); 1782 1783 // update directory 1784 m_cache_directory.write(set, way, entry); 1785 1786 // owner is true when the the first registered copy is the writer itself 1787 bool owner = (((r_write_copy.read() == r_write_srcid.read()) 1788 #if L1_MULTI_CACHE 1789 and (r_write_copy_cache.read()==r_write_pktid.read()) 1790 #endif 1791 ) and not r_write_copy_inst.read()); 1792 1793 // no_update is true when there is no need for coherence transaction 1794 bool no_update = (r_write_count.read()==0) || ( owner && (r_write_count.read()==1)); 1795 1796 // write data in the cache if no coherence transaction 1797 if( no_update ) 1798 { 1799 for(size_t i=0 ; i<m_words ; i++) 1800 { 1801 if ( r_write_be[i].read() ) 1802 { 1803 m_cache_data[way][set][i] = r_write_data[i].read(); 1804 1805 if ( m_monitor_ok ) 1806 { 1807 vci_addr_t address = (r_write_address.read() & ~(vci_addr_t)0x3F) | i<<2; 1808 char buf[80]; 1809 snprintf(buf, 80, "WRITE_DIR_HIT srcid %d", r_write_srcid.read()); 1810 check_monitor( buf, address, r_write_data[i].read() ); 1811 } 1812 } 1813 } 1814 } 1815 1816 if ( owner and not no_update ) r_write_count = r_write_count.read() - 1; 1817 1818 if ( no_update ) // Write transaction completed 1819 { 1820 r_write_fsm = WRITE_RSP; 1821 } 1822 else // coherence update required 1823 { 1824 if( !r_write_to_init_cmd_multi_req.read() && 1825 !r_write_to_init_cmd_brdcast_req.read() ) r_write_fsm = WRITE_UPT_LOCK; 1826 else r_write_fsm = WRITE_WAIT; 1827 } 2650 if( m_debug_write_fsm ) 2651 { 2652 std::cout << " <MEMC " << name() << ".WRITE_BC_CC_SEND> Post a broadcast request to INIT_CMD FSM" << std::endl; 2653 } 2654 #endif 2655 } 2656 break; 2657 } 2658 2659 /////////////////////// 2660 case WRITE_BC_XRAM_REQ: // Post a put request to IXR_CMD FSM 2661 { 2662 if ( !r_write_to_ixr_cmd_req ) 2663 { 2664 r_write_to_ixr_cmd_req = true; 2665 r_write_to_ixr_cmd_write = true; 2666 r_write_to_ixr_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 2667 r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); 2668 2669 for(size_t i=0; i<m_words; i++) r_write_to_ixr_cmd_data[i] = r_write_data[i]; 2670 2671 r_write_fsm = WRITE_IDLE; 1828 2672 1829 2673 #if DEBUG_MEMC_WRITE 1830 if( m_debug_write_fsm ) 1831 { 1832 if ( no_update ) 1833 { 1834 std::cout << " <MEMC " << name() << ".WRITE_DIR_HIT> Write into cache / No coherence transaction" 1835 << std::endl; 2674 if( m_debug_write_fsm ) 2675 { 2676 std::cout << " <MEMC " << name() << ".WRITE_BC_XRAM_REQ> Post a put request to IXR_CMD FSM" << std::endl; 2677 } 2678 #endif 2679 } 2680 break; 1836 2681 } 1837 else 1838 { 1839 std::cout << " <MEMC " << name() << ".WRITE_DIR_HIT> Coherence update required:" 1840 << " is_cnt = " << r_write_is_cnt.read() 1841 << " nb_copies = " << std::dec << r_write_count.read() << std::endl; 1842 if (owner) 1843 std::cout << " ... but the first copy is the writer" << std::endl; 1844 } 1845 } 1846 #endif 1847 break; 1848 } 1849 //////////////////// 1850 case WRITE_UPT_LOCK: // Try to register the update request in UPT 1851 { 1852 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) 1853 { 1854 bool wok = false; 1855 size_t index = 0; 1856 size_t srcid = r_write_srcid.read(); 1857 size_t trdid = r_write_trdid.read(); 1858 size_t pktid = r_write_pktid.read(); 1859 addr_t nline = m_nline[(vci_addr_t)(r_write_address.read())]; 1860 size_t nb_copies = r_write_count.read(); 1861 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 1862 size_t way = r_write_way.read(); 1863 1864 wok = m_update_tab.set(true, // it's an update transaction 1865 false, // it's not a broadcast 1866 true, // it needs a response 1867 srcid, 1868 trdid, 1869 pktid, 1870 nline, 1871 nb_copies, 1872 index); 1873 if ( wok ) // write data in cache 1874 { 1875 for(size_t i=0 ; i<m_words ; i++) 1876 { 1877 if ( r_write_be[i].read() ) 1878 { 1879 m_cache_data[way][set][i] = r_write_data[i].read(); 1880 1881 if ( m_monitor_ok ) 1882 { 1883 vci_addr_t address = (r_write_address.read() & ~(vci_addr_t)0x3F) | i<<2; 1884 char buf[80]; 1885 snprintf(buf, 80, "WRITE_UPT_LOCK srcid %d", srcid); 1886 check_monitor(buf, address, r_write_data[i].read() ); 1887 } 1888 } 1889 } 1890 } 1891 1892 #if DEBUG_MEMC_WRITE 1893 if( m_debug_write_fsm ) 1894 { 1895 if ( wok ) 1896 { 1897 std::cout << " <MEMC " << name() << ".WRITE_UPT_LOCK> Register the multicast update in UPT / " 1898 << " nb_copies = " << r_write_count.read() << std::endl; 1899 } 1900 } 1901 #endif 1902 r_write_upt_index = index; 1903 // releases the lock protecting UPT and the DIR if no entry... 1904 if ( wok ) r_write_fsm = WRITE_UPT_HEAP_LOCK; 1905 else r_write_fsm = WRITE_WAIT; 1906 } 1907 break; 1908 } 1909 ///////////////////////// 1910 case WRITE_UPT_HEAP_LOCK: // get access to heap 1911 { 1912 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE ) 1913 { 1914 1915 #if DEBUG_MEMC_WRITE 1916 if( m_debug_write_fsm ) 1917 { 1918 std::cout << " <MEMC " << name() << ".WRITE_UPT_HEAP_LOCK> Get acces to the HEAP" << std::endl; 1919 } 1920 #endif 1921 r_write_fsm = WRITE_UPT_REQ; 1922 } 1923 break; 1924 } 1925 ////////////////// 1926 case WRITE_UPT_REQ: // prepare the coherence ransaction for the INIT_CMD FSM 1927 // and write the first copy in the FIFO 1928 // send the request if only one copy 1929 { 1930 if( !r_write_to_init_cmd_multi_req.read() && 1931 !r_write_to_init_cmd_brdcast_req.read() ) // no pending coherence request 1932 { 1933 r_write_to_init_cmd_brdcast_req = false; 1934 r_write_to_init_cmd_trdid = r_write_upt_index.read(); 1935 r_write_to_init_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 1936 r_write_to_init_cmd_index = r_write_word_index.read(); 1937 r_write_to_init_cmd_count = r_write_word_count.read(); 1938 1939 for(size_t i=0; i<m_words ; i++) r_write_to_init_cmd_be[i]=r_write_be[i].read(); 1940 1941 size_t min = r_write_word_index.read(); 1942 size_t max = r_write_word_index.read() + r_write_word_count.read(); 1943 for (size_t i=min ; i<max ; i++) r_write_to_init_cmd_data[i] = r_write_data[i]; 1944 1945 if( (r_write_copy.read() != r_write_srcid.read()) or 1946 #if L1_MULTI_CACHE 1947 (r_write_copy_cache.read() != r_write_pktid.read()) or 1948 #endif 1949 r_write_copy_inst.read() ) 1950 { 1951 // put the first srcid in the fifo 1952 write_to_init_cmd_fifo_put = true; 1953 write_to_init_cmd_fifo_inst = r_write_copy_inst.read(); 1954 write_to_init_cmd_fifo_srcid = r_write_copy.read(); 1955 #if L1_MULTI_CACHE 1956 write_to_init_cmd_fifo_cache_id= r_write_copy_cache.read(); 1957 #endif 1958 if(r_write_count.read() == 1) 1959 { 1960 r_write_fsm = WRITE_IDLE; 1961 r_write_to_init_cmd_multi_req = true; 1962 } 1963 else 1964 { 1965 r_write_fsm = WRITE_UPT_NEXT; 1966 r_write_to_dec = false; 1967 1968 } 1969 } 1970 else 1971 { 1972 r_write_fsm = WRITE_UPT_NEXT; 1973 r_write_to_dec = false; 1974 } 1975 1976 #if DEBUG_MEMC_WRITE 1977 if( m_debug_write_fsm ) 1978 { 1979 std::cout << " <MEMC " << name() << ".WRITE_UPT_REQ> Post first request to INIT_CMD FSM" 1980 << " / srcid = " << std::dec << r_write_copy.read() 1981 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 1982 if ( r_write_count.read() == 1) 1983 std::cout << " ... and this is the last" << std::endl; 1984 } 1985 #endif 1986 } 1987 break; 1988 } 1989 /////////////////// 1990 case WRITE_UPT_NEXT: // continue the multi-update request to INIT_CMD fsm 1991 // when there is copies in the heap. 1992 // if one copy in the heap is the writer itself 1993 // the corresponding SRCID should not be written in the fifo, 1994 // but the UPT counter must be decremented. 1995 // As this decrement is done in the WRITE_UPT_DEC state, 1996 // after the last copy has been found, the decrement request 1997 // must be registered in the r_write_to_dec flip-flop. 1998 { 1999 HeapEntry entry = m_heap.read(r_write_ptr.read()); 2000 2001 bool dec_upt_counter; 2002 2003 if( (entry.owner.srcid != r_write_srcid.read()) or 2004 #if L1_MULTI_CACHE 2005 (entry.owner.cache_id != r_write_pktid.read()) or 2006 #endif 2007 entry.owner.inst) // put te next srcid in the fifo 2008 { 2009 dec_upt_counter = false; 2010 write_to_init_cmd_fifo_put = true; 2011 write_to_init_cmd_fifo_inst = entry.owner.inst; 2012 write_to_init_cmd_fifo_srcid = entry.owner.srcid; 2013 #if L1_MULTI_CACHE 2014 write_to_init_cmd_fifo_cache_id = entry.owner.cache_id; 2015 #endif 2016 2017 #if DEBUG_MEMC_WRITE 2018 if( m_debug_write_fsm ) 2019 { 2020 std::cout << " <MEMC " << name() << ".WRITE_UPT_NEXT> Post another request to INIT_CMD FSM" 2021 << " / heap_index = " << std::dec << r_write_ptr.read() 2022 << " / srcid = " << std::dec << r_write_copy.read() 2023 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 2024 if( entry.next == r_write_ptr.read() ) 2025 std::cout << " ... and this is the last" << std::endl; 2026 } 2027 #endif 2028 } 2029 else // the UPT counter must be decremented 2030 { 2031 dec_upt_counter = true; 2032 2033 #if DEBUG_MEMC_WRITE 2034 if( m_debug_write_fsm ) 2035 { 2036 std::cout << " <MEMC " << name() << ".WRITE_UPT_NEXT> Skip one entry in heap matching the writer" 2037 << " / heap_index = " << std::dec << r_write_ptr.read() 2038 << " / srcid = " << std::dec << r_write_copy.read() 2039 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 2040 if( entry.next == r_write_ptr.read() ) 2041 std::cout << " ... and this is the last" << std::endl; 2042 } 2043 #endif 2044 } 2045 2046 // register the possible UPT decrement request 2047 r_write_to_dec = dec_upt_counter or r_write_to_dec.read(); 2048 2049 if( not m_write_to_init_cmd_inst_fifo.wok() ) 2050 { 2051 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_UPT_NEXT state" << std::endl 2052 << "The write_to_init_cmd_fifo should not be full" << std::endl 2053 << "as the depth should be larger than the max number of copies" << std::endl; 2054 exit(0); 2055 } 2056 2057 r_write_ptr = entry.next; 2058 2059 if( entry.next == r_write_ptr.read() ) // last copy 2060 { 2061 r_write_to_init_cmd_multi_req = true; 2062 if( r_write_to_dec.read() or dec_upt_counter) r_write_fsm = WRITE_UPT_DEC; 2063 else r_write_fsm = WRITE_IDLE; 2064 } 2065 break; 2066 } 2067 ////////////////// 2068 case WRITE_UPT_DEC: // If the initial writer has a copy, it should not 2069 // receive an update request, but the counter in the 2070 // update table must be decremented by the INIT_RSP FSM. 2071 { 2072 if ( !r_write_to_init_rsp_req.read() ) 2073 { 2074 r_write_to_init_rsp_req = true; 2075 r_write_to_init_rsp_upt_index = r_write_upt_index.read(); 2076 r_write_fsm = WRITE_IDLE; 2077 } 2078 break; 2079 } 2080 /////////////// 2081 case WRITE_RSP: // Post a request to TGT_RSP FSM to acknowledge the write 2082 // In order to increase the Write requests throughput, 2083 // we don't wait to return in the IDLE state to consume 2084 // a new request in the write FIFO 2085 { 2086 if ( !r_write_to_tgt_rsp_req.read() ) 2087 { 2088 // post the request to TGT_RSP_FSM 2089 r_write_to_tgt_rsp_req = true; 2090 r_write_to_tgt_rsp_srcid = r_write_srcid.read(); 2091 r_write_to_tgt_rsp_trdid = r_write_trdid.read(); 2092 r_write_to_tgt_rsp_pktid = r_write_pktid.read(); 2093 2094 // try to get a new write request from the FIFO 2095 if ( m_cmd_write_addr_fifo.rok() ) 2096 { 2097 m_cpt_write++; 2098 m_cpt_write_cells++; 2099 2100 // consume a word in the FIFO & write it in the local buffer 2101 cmd_write_fifo_get = true; 2102 size_t index = m_x[(vci_addr_t)(m_cmd_write_addr_fifo.read())]; 2103 2104 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 2105 r_write_word_index = index; 2106 r_write_word_count = 1; 2107 r_write_data[index] = m_cmd_write_data_fifo.read(); 2108 r_write_srcid = m_cmd_write_srcid_fifo.read(); 2109 r_write_trdid = m_cmd_write_trdid_fifo.read(); 2110 r_write_pktid = m_cmd_write_pktid_fifo.read(); 2111 2112 // initialize the be field for all words 2113 for ( size_t i=0 ; i<m_words ; i++ ) 2114 { 2115 if ( i == index ) r_write_be[i] = m_cmd_write_be_fifo.read(); 2116 else r_write_be[i] = 0x0; 2117 } 2118 2119 if( !((m_cmd_write_be_fifo.read() == 0x0)||(m_cmd_write_be_fifo.read() == 0xF)) ) 2120 r_write_byte = true; 2121 else 2122 r_write_byte = false; 2123 2124 if( m_cmd_write_eop_fifo.read() ) r_write_fsm = WRITE_DIR_LOCK; 2125 else r_write_fsm = WRITE_NEXT; 2126 } 2127 else 2128 { 2129 r_write_fsm = WRITE_IDLE; 2130 } 2131 2132 #if DEBUG_MEMC_WRITE 2133 if( m_debug_write_fsm ) 2134 { 2135 std::cout << " <MEMC " << name() << ".WRITE_RSP> Post a request to TGT_RSP FSM: rsrcid = " 2136 << std::dec << r_write_srcid.read() << std::endl; 2137 if ( m_cmd_write_addr_fifo.rok() ) 2138 { 2139 std::cout << " New Write request: " 2140 << " srcid = " << std::dec << m_cmd_write_srcid_fifo.read() 2141 << " / address = " << std::hex << m_cmd_write_addr_fifo.read() 2142 << " / data = " << m_cmd_write_data_fifo.read() << std::endl; 2143 } 2144 } 2145 #endif 2146 } 2147 break; 2148 } 2149 ///////////////////////// 2150 case WRITE_MISS_TRT_LOCK: // Miss : check Transaction Table 2151 { 2152 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) 2153 { 2154 2155 #if DEBUG_MEMC_WRITE 2156 if( m_debug_write_fsm ) 2157 { 2158 std::cout << " <MEMC " << name() << ".WRITE_MISS_TRT_LOCK> Check the TRT" << std::endl; 2159 } 2160 #endif 2161 size_t hit_index = 0; 2162 size_t wok_index = 0; 2163 vci_addr_t addr = (vci_addr_t)r_write_address.read(); 2164 bool hit_read = m_transaction_tab.hit_read(m_nline[addr], hit_index); 2165 bool hit_write = m_transaction_tab.hit_write(m_nline[addr]); 2166 bool wok = !m_transaction_tab.full(wok_index); 2167 2168 if ( hit_read ) // register the modified data in TRT 2169 { 2170 r_write_trt_index = hit_index; 2171 r_write_fsm = WRITE_MISS_TRT_DATA; 2172 m_cpt_write_miss++; 2173 } 2174 else if ( wok && !hit_write ) // set a new entry in TRT 2175 { 2176 r_write_trt_index = wok_index; 2177 r_write_fsm = WRITE_MISS_TRT_SET; 2178 m_cpt_write_miss++; 2179 } 2180 else // wait an empty entry in TRT 2181 { 2182 r_write_fsm = WRITE_WAIT; 2183 m_cpt_trt_full++; 2184 } 2185 } 2186 break; 2187 } 2188 //////////////// 2189 case WRITE_WAIT: // release the locks protecting the shared ressources 2190 { 2191 2192 #if DEBUG_MEMC_WRITE 2193 if( m_debug_write_fsm ) 2194 { 2195 std::cout << " <MEMC " << name() << ".WRITE_WAIT> Releases the locks before retry" << std::endl; 2196 } 2197 #endif 2198 r_write_fsm = WRITE_DIR_LOCK; 2199 break; 2200 } 2201 //////////////////////// 2202 case WRITE_MISS_TRT_SET: // register a new transaction in TRT (Write Buffer) 2203 { 2204 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) 2205 { 2206 std::vector<be_t> be_vector; 2207 std::vector<data_t> data_vector; 2208 be_vector.clear(); 2209 data_vector.clear(); 2210 for ( size_t i=0; i<m_words; i++ ) 2211 { 2212 be_vector.push_back(r_write_be[i]); 2213 data_vector.push_back(r_write_data[i]); 2214 } 2215 m_transaction_tab.set(r_write_trt_index.read(), 2216 true, // read request to XRAM 2217 m_nline[(vci_addr_t)(r_write_address.read())], 2218 r_write_srcid.read(), 2219 r_write_trdid.read(), 2220 r_write_pktid.read(), 2221 false, // not a processor read 2222 0, // not a single word 2223 0, // word index 2224 be_vector, 2225 data_vector); 2226 r_write_fsm = WRITE_MISS_XRAM_REQ; 2227 2228 #if DEBUG_MEMC_WRITE 2229 if( m_debug_write_fsm ) 2230 { 2231 std::cout << " <MEMC " << name() << ".WRITE_MISS_TRT_SET> Set a new entry in TRT" << std::endl; 2232 } 2233 #endif 2234 } 2235 break; 2236 } 2237 ///////////////////////// 2238 case WRITE_MISS_TRT_DATA: // update an entry in TRT (used as a Write Buffer) 2239 { 2240 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) 2241 { 2242 std::vector<be_t> be_vector; 2243 std::vector<data_t> data_vector; 2244 be_vector.clear(); 2245 data_vector.clear(); 2246 for ( size_t i=0; i<m_words; i++ ) 2247 { 2248 be_vector.push_back(r_write_be[i]); 2249 data_vector.push_back(r_write_data[i]); 2250 } 2251 m_transaction_tab.write_data_mask(r_write_trt_index.read(), 2252 be_vector, 2253 data_vector); 2254 r_write_fsm = WRITE_RSP; 2255 2256 #if DEBUG_MEMC_WRITE 2257 if( m_debug_write_fsm ) 2258 { 2259 std::cout << " <MEMC " << name() << ".WRITE_MISS_TRT_DATA> Modify an existing entry in TRT" << std::endl; 2260 m_transaction_tab.print( r_write_trt_index.read() ); 2261 } 2262 #endif 2263 } 2264 break; 2265 } 2266 ///////////////////////// 2267 case WRITE_MISS_XRAM_REQ: // send a GET request to IXR_CMD FSM 2268 { 2269 if ( !r_write_to_ixr_cmd_req ) 2270 { 2271 r_write_to_ixr_cmd_req = true; 2272 r_write_to_ixr_cmd_write = false; 2273 r_write_to_ixr_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 2274 r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); 2275 r_write_fsm = WRITE_RSP; 2276 2277 #if DEBUG_MEMC_WRITE 2278 if( m_debug_write_fsm ) 2279 { 2280 std::cout << " <MEMC " << name() << ".WRITE_MISS_XRAM_REQ> Post a GET request to the IXR_CMD FSM" << std::endl; 2281 } 2282 #endif 2283 } 2284 break; 2285 } 2286 /////////////////////// 2287 case WRITE_BC_TRT_LOCK: // Check TRT not full 2288 { 2289 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) 2290 { 2291 size_t wok_index = 0; 2292 bool wok = !m_transaction_tab.full( wok_index ); 2293 if ( wok ) // set a new entry in TRT 2294 { 2295 r_write_trt_index = wok_index; 2296 r_write_fsm = WRITE_BC_UPT_LOCK; 2297 } 2298 else // wait an empty entry in TRT 2299 { 2300 r_write_fsm = WRITE_WAIT; 2301 } 2302 2303 #if DEBUG_MEMC_WRITE 2304 if( m_debug_write_fsm ) 2305 { 2306 std::cout << " <MEMC " << name() << ".WRITE_BC_TRT_LOCK> Check TRT : wok = " 2307 << wok << " / index = " << wok_index << std::endl; 2308 } 2309 #endif 2310 } 2311 break; 2312 } 2313 ////////////////////// 2314 case WRITE_BC_UPT_LOCK: // register BC transaction in UPT 2315 { 2316 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) 2317 { 2318 bool wok = false; 2319 size_t index = 0; 2320 size_t srcid = r_write_srcid.read(); 2321 size_t trdid = r_write_trdid.read(); 2322 size_t pktid = r_write_pktid.read(); 2323 addr_t nline = m_nline[(vci_addr_t)(r_write_address.read())]; 2324 size_t nb_copies = r_write_count.read(); 2325 2326 wok =m_update_tab.set(false, // it's an inval transaction 2327 true, // it's a broadcast 2328 true, // it needs a response 2329 srcid, 2330 trdid, 2331 pktid, 2332 nline, 2333 nb_copies, 2334 index); 2335 2336 #if DEBUG_MEMC_WRITE 2337 if( m_debug_write_fsm ) 2338 { 2339 if ( wok ) 2340 { 2341 std::cout << " <MEMC " << name() << ".WRITE_BC_UPT_LOCK> Register the broadcast inval in UPT / " 2342 << " nb_copies = " << r_write_count.read() << std::endl; 2343 } 2344 } 2345 #endif 2346 r_write_upt_index = index; 2347 2348 if ( wok ) r_write_fsm = WRITE_BC_DIR_INVAL; 2349 else r_write_fsm = WRITE_WAIT; 2350 } 2351 break; 2352 } 2353 //////////////////////// 2354 case WRITE_BC_DIR_INVAL: // Register a put transaction to XRAM in TRT 2355 // and invalidate the line in directory 2356 { 2357 if ( (r_alloc_trt_fsm.read() != ALLOC_TRT_WRITE ) || 2358 (r_alloc_upt_fsm.read() != ALLOC_UPT_WRITE ) || 2359 (r_alloc_dir_fsm.read() != ALLOC_DIR_WRITE ) ) 2360 { 2361 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_BC_DIR_INVAL state" << std::endl; 2362 std::cout << "bad TRT, DIR, or UPT allocation" << std::endl; 2363 exit(0); 2364 } 2365 2366 // register a write request to XRAM in TRT 2367 m_transaction_tab.set(r_write_trt_index.read(), 2368 false, // write request to XRAM 2369 m_nline[(vci_addr_t)(r_write_address.read())], 2370 0, 2371 0, 2372 0, 2373 false, // not a processor read 2374 0, // not a single word 2375 0, // word index 2376 std::vector<be_t>(m_words,0), 2377 std::vector<data_t>(m_words,0)); 2378 // invalidate directory entry 2379 DirectoryEntry entry; 2380 entry.valid = false; 2381 entry.dirty = false; 2382 entry.tag = 0; 2383 entry.is_cnt = false; 2384 entry.lock = false; 2385 entry.owner.srcid = 0; 2386 #if L1_MULTI_CACHE 2387 entry.owner.cache_id= 0; 2388 #endif 2389 entry.owner.inst = false; 2390 entry.ptr = 0; 2391 entry.count = 0; 2392 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 2393 size_t way = r_write_way.read(); 2394 2395 m_cache_directory.write(set, way, entry); 2396 2397 #if DEBUG_MEMC_WRITE 2398 if( m_debug_write_fsm ) 2399 { 2400 std::cout << " <MEMC " << name() << ".WRITE_BC_DIR_INVAL> Invalidate the directory entry: @ = " 2401 << r_write_address.read() << " / register the put transaction in TRT:" << std::endl; 2402 } 2403 #endif 2404 r_write_fsm = WRITE_BC_CC_SEND; 2405 break; 2406 } 2407 ////////////////////// 2408 case WRITE_BC_CC_SEND: // Post a coherence broadcast request to INIT_CMD FSM 2409 { 2410 if ( !r_write_to_init_cmd_multi_req.read() && !r_write_to_init_cmd_brdcast_req.read() ) 2411 { 2412 r_write_to_init_cmd_multi_req = false; 2413 r_write_to_init_cmd_brdcast_req = true; 2414 r_write_to_init_cmd_trdid = r_write_upt_index.read(); 2415 r_write_to_init_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 2416 r_write_to_init_cmd_index = 0; 2417 r_write_to_init_cmd_count = 0; 2418 2419 for(size_t i=0; i<m_words ; i++) 2420 { 2421 r_write_to_init_cmd_be[i]=0; 2422 r_write_to_init_cmd_data[i] = 0; 2423 } 2424 r_write_fsm = WRITE_BC_XRAM_REQ; 2425 2426 #if DEBUG_MEMC_WRITE 2427 if( m_debug_write_fsm ) 2428 { 2429 std::cout << " <MEMC " << name() << ".WRITE_BC_CC_SEND> Post a broadcast request to INIT_CMD FSM" << std::endl; 2430 } 2431 #endif 2432 } 2433 break; 2434 } 2435 /////////////////////// 2436 case WRITE_BC_XRAM_REQ: // Post a put request to IXR_CMD FSM 2437 { 2438 if ( !r_write_to_ixr_cmd_req ) 2439 { 2440 r_write_to_ixr_cmd_req = true; 2441 r_write_to_ixr_cmd_write = true; 2442 r_write_to_ixr_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 2443 r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); 2444 2445 for(size_t i=0; i<m_words; i++) r_write_to_ixr_cmd_data[i] = r_write_data[i]; 2446 2447 r_write_fsm = WRITE_IDLE; 2448 2449 #if DEBUG_MEMC_WRITE 2450 if( m_debug_write_fsm ) 2451 { 2452 std::cout << " <MEMC " << name() << ".WRITE_BC_XRAM_REQ> Post a put request to IXR_CMD FSM" << std::endl; 2453 } 2454 #endif 2455 } 2456 break; 2457 } 2458 } // end switch r_write_fsm 2682 } // end switch r_write_fsm 2459 2683 2460 2684 /////////////////////////////////////////////////////////////////////// 2461 // 2685 // IXR_CMD FSM 2462 2686 /////////////////////////////////////////////////////////////////////// 2463 2687 // The IXR_CMD fsm controls the command packets to the XRAM : 2464 2688 // - It sends a single cell VCI read request to the XRAM in case of MISS 2465 // posted by the READ, WRITE or SC FSMs : the TRDID field contains 2689 // posted by the READ, WRITE or SC FSMs : the TRDID field contains 2466 2690 // the Transaction Tab index. 2467 2691 // The VCI response is a multi-cell packet : the N cells contain 2468 2692 // the N data words. 2469 2693 // - It sends a multi-cell VCI write when the XRAM_RSP FSM, WRITE FSM 2470 // or SC FSM request to save a dirty line to the XRAM. 2694 // or SC FSM request to save a dirty line to the XRAM. 2471 2695 // The VCI response is a single cell packet. 2472 // This FSM handles requests from the READ, WRITE, SC & XRAM_RSP FSMs 2696 // This FSM handles requests from the READ, WRITE, SC & XRAM_RSP FSMs 2473 2697 // with a round-robin priority. 2474 2698 //////////////////////////////////////////////////////////////////////// 2475 2699 2476 switch ( r_ixr_cmd_fsm.read() ) 2700 switch ( r_ixr_cmd_fsm.read() ) 2477 2701 { 2478 //////////////////////// 2702 //////////////////////// 2479 2703 case IXR_CMD_READ_IDLE: 2480 2704 if ( r_write_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_WRITE_NLINE; … … 2483 2707 else if ( r_read_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_READ_NLINE; 2484 2708 break; 2485 //////////////////////// 2709 //////////////////////// 2486 2710 case IXR_CMD_WRITE_IDLE: 2487 2711 if ( r_sc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_SC_NLINE; … … 2490 2714 else if ( r_write_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_WRITE_NLINE; 2491 2715 break; 2492 //////////////////////// 2716 //////////////////////// 2493 2717 case IXR_CMD_SC_IDLE: 2494 2718 if ( r_xram_rsp_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_XRAM_DATA; … … 2497 2721 else if ( r_sc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_SC_NLINE; 2498 2722 break; 2499 //////////////////////// 2723 //////////////////////// 2500 2724 case IXR_CMD_XRAM_IDLE: 2501 2725 if ( r_read_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_READ_NLINE; … … 2506 2730 ///////////////////////// // send a get request to XRAM 2507 2731 case IXR_CMD_READ_NLINE: 2508 if ( p_vci_ixr.cmdack ) 2509 { 2510 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 2732 if ( p_vci_ixr.cmdack ) 2733 { 2734 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 2511 2735 r_read_to_ixr_cmd_req = false; 2512 2736 … … 2520 2744 break; 2521 2745 ////////////////////////// 2522 case IXR_CMD_WRITE_NLINE: 2523 if ( p_vci_ixr.cmdack ) 2746 case IXR_CMD_WRITE_NLINE: // send a put or get command to XRAM 2747 if ( p_vci_ixr.cmdack ) 2524 2748 { 2525 2749 if( r_write_to_ixr_cmd_write.read()) 2526 2750 { 2527 if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) 2751 if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) 2528 2752 { 2529 2753 r_ixr_cmd_cpt = 0; 2530 2754 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 2531 2755 r_write_to_ixr_cmd_req = false; 2532 } 2533 else 2756 } 2757 else 2534 2758 { 2535 2759 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 1; … … 2542 2766 } 2543 2767 #endif 2544 } 2545 else 2546 { 2547 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 2768 } 2769 else 2770 { 2771 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 2548 2772 r_write_to_ixr_cmd_req = false; 2549 2773 … … 2559 2783 ////////////////////// 2560 2784 case IXR_CMD_SC_NLINE: // send a put or get command to XRAM 2561 if ( p_vci_ixr.cmdack ) 2785 if ( p_vci_ixr.cmdack ) 2562 2786 { 2563 2787 if( r_sc_to_ixr_cmd_write.read()) 2564 2788 { 2565 if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) 2789 if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) 2566 2790 { 2567 2791 r_ixr_cmd_cpt = 0; 2568 2792 r_ixr_cmd_fsm = IXR_CMD_SC_IDLE; 2569 2793 r_sc_to_ixr_cmd_req = false; 2570 } 2571 else 2794 } 2795 else 2572 2796 { 2573 2797 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 1; … … 2580 2804 } 2581 2805 #endif 2582 } 2583 else 2584 { 2585 r_ixr_cmd_fsm = IXR_CMD_SC_IDLE; 2806 } 2807 else 2808 { 2809 r_ixr_cmd_fsm = IXR_CMD_SC_IDLE; 2586 2810 r_sc_to_ixr_cmd_req = false; 2587 2811 … … 2597 2821 //////////////////////// 2598 2822 case IXR_CMD_XRAM_DATA: // send a put command to XRAM 2599 if ( p_vci_ixr.cmdack ) 2600 { 2601 if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) 2823 if ( p_vci_ixr.cmdack ) 2824 { 2825 if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) 2602 2826 { 2603 2827 r_ixr_cmd_cpt = 0; 2604 2828 r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE; 2605 2829 r_xram_rsp_to_ixr_cmd_req = false; 2606 } 2607 else 2830 } 2831 else 2608 2832 { 2609 2833 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 1; … … 2631 2855 // The FSM takes the lock protecting the TRT, and the corresponding 2632 2856 // entry is erased. 2633 // 2857 // 2634 2858 // - A response to a get request is a multi-cell VCI packet. 2635 2859 // The Transaction Tab index is contained in the RTRDID field. … … 2640 2864 /////////////////////////////////////////////////////////////////////////////// 2641 2865 2642 switch ( r_ixr_rsp_fsm.read() ) 2866 switch ( r_ixr_rsp_fsm.read() ) 2643 2867 { 2644 2868 ////////////////// 2645 case IXR_RSP_IDLE: 2646 { 2647 if ( p_vci_ixr.rspval.read() ) 2869 case IXR_RSP_IDLE: // test if it's a get or a put transaction 2870 { 2871 if ( p_vci_ixr.rspval.read() ) 2648 2872 { 2649 2873 r_ixr_rsp_cpt = 0; … … 2654 2878 2655 2879 #if DEBUG_MEMC_IXR_RSP 2656 if( m_debug_ixr_rsp_fsm ) 2880 if( m_debug_ixr_rsp_fsm ) 2657 2881 { 2658 std::cout << " <MEMC " << name() << ".IXR_RSP_IDLE> Response from XRAM to a put transaction" << std::endl; 2882 std::cout << " <MEMC " << name() << ".IXR_RSP_IDLE> Response from XRAM to a put transaction" << std::endl; 2659 2883 } 2660 2884 #endif 2661 2885 } 2662 else 2886 else // get transaction 2663 2887 { 2664 r_ixr_rsp_fsm = IXR_RSP_TRT_READ; 2665 2666 #if DEBUG_MEMC_IXR_RSP 2667 if( m_debug_ixr_rsp_fsm ) 2668 { 2669 std::cout << " <MEMC " << name() << ".IXR_RSP_IDLE> Response from XRAM to a get transaction" << std::endl; 2670 } 2671 #endif 2672 } 2673 } 2674 break; 2675 } 2676 //////////////////////// 2677 case IXR_RSP_ACK: // Aknowledge the VCI response 2678 { 2679 if(p_vci_ixr.rspval.read()) r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 2680 2681 #if DEBUG_MEMC_IXR_RSP 2682 if( m_debug_ixr_rsp_fsm ) 2683 { 2684 std::cout << " <MEMC " << name() << ".IXR_RSP_ACK>" << std::endl; 2685 } 2686 #endif 2687 break; 2688 } 2689 //////////////////////// 2690 case IXR_RSP_TRT_ERASE: // erase the entry in the TRT 2691 { 2692 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP ) 2693 { 2694 m_transaction_tab.erase(r_ixr_rsp_trt_index.read()); 2695 r_ixr_rsp_fsm = IXR_RSP_IDLE; 2888 r_ixr_rsp_fsm = IXR_RSP_TRT_READ; 2696 2889 2697 2890 #if DEBUG_MEMC_IXR_RSP 2698 2891 if( m_debug_ixr_rsp_fsm ) 2699 2892 { 2700 std::cout << " <MEMC " << name() << ".IXR_RSP_TRT_ERASE> Erase TRT entry " 2701 << r_ixr_rsp_trt_index.read() << std::endl; 2893 std::cout << " <MEMC " << name() << ".IXR_RSP_IDLE> Response from XRAM to a get transaction" << std::endl; 2702 2894 } 2703 2895 #endif 2896 } 2704 2897 } 2705 2898 break; 2706 2899 } 2900 //////////////////////// 2901 case IXR_RSP_ACK: // Aknowledge the VCI response 2902 { 2903 if(p_vci_ixr.rspval.read()) r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 2904 2905 #if DEBUG_MEMC_IXR_RSP 2906 if( m_debug_ixr_rsp_fsm ) 2907 { 2908 std::cout << " <MEMC " << name() << ".IXR_RSP_ACK>" << std::endl; 2909 } 2910 #endif 2911 break; 2912 } 2913 //////////////////////// 2914 case IXR_RSP_TRT_ERASE: // erase the entry in the TRT 2915 { 2916 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP ) 2917 { 2918 m_transaction_tab.erase(r_ixr_rsp_trt_index.read()); 2919 r_ixr_rsp_fsm = IXR_RSP_IDLE; 2920 2921 #if DEBUG_MEMC_IXR_RSP 2922 if( m_debug_ixr_rsp_fsm ) 2923 { 2924 std::cout << " <MEMC " << name() << ".IXR_RSP_TRT_ERASE> Erase TRT entry " 2925 << r_ixr_rsp_trt_index.read() << std::endl; 2926 } 2927 #endif 2928 } 2929 break; 2930 } 2707 2931 /////////////////////// 2708 case IXR_RSP_TRT_READ: 2709 { 2710 if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) && p_vci_ixr.rspval ) 2932 case IXR_RSP_TRT_READ: // write data in the TRT 2933 { 2934 if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) && p_vci_ixr.rspval ) 2711 2935 { 2712 2936 size_t index = r_ixr_rsp_trt_index.read(); 2713 bool eop 2714 data_t data 2937 bool eop = p_vci_ixr.reop.read(); 2938 data_t data = p_vci_ixr.rdata.read(); 2715 2939 bool error = ((p_vci_ixr.rerror.read() & 0x1) == 1); 2716 assert(((eop == (r_ixr_rsp_cpt.read() == (m_words-1))) || p_vci_ixr.rerror.read()) 2940 assert(((eop == (r_ixr_rsp_cpt.read() == (m_words-1))) || p_vci_ixr.rerror.read()) 2717 2941 and "Error in VCI_MEM_CACHE : invalid length for a response from XRAM"); 2718 m_transaction_tab.write_rsp(index, 2719 r_ixr_rsp_cpt.read(), 2720 data, 2942 m_transaction_tab.write_rsp(index, 2943 r_ixr_rsp_cpt.read(), 2944 data, 2721 2945 error); 2722 2946 r_ixr_rsp_cpt = r_ixr_rsp_cpt.read() + 1; 2723 if ( eop ) 2947 if ( eop ) 2724 2948 { 2725 2949 r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()]=true; … … 2751 2975 // a round-robin priority... 2752 2976 // 2753 // When a response is available, the corresponding TRT entry 2754 // must be copied in a local buffer to be written in the cache. 2977 // When a response is available, the corresponding TRT entry 2978 // must be copied in a local buffer to be written in the cache. 2755 2979 // The FSM takes the lock protecting the TRT, and the lock protecting the DIR. 2756 2980 // It selects a cache slot and writes the line in the cache. … … 2759 2983 // If there is no empty slot, a victim line is evicted, and 2760 2984 // invalidate requests are sent to the L1 caches containing copies. 2761 // If this line is dirty, the XRAM_RSP FSM send a request to the IXR_CMD 2985 // If this line is dirty, the XRAM_RSP FSM send a request to the IXR_CMD 2762 2986 // FSM to save the victim line to the XRAM, and register the write transaction 2763 2987 // in the TRT (using the entry previously used by the read transaction). 2764 2988 /////////////////////////////////////////////////////////////////////////////// 2765 2989 2766 switch ( r_xram_rsp_fsm.read() ) 2990 switch ( r_xram_rsp_fsm.read() ) 2767 2991 { 2768 2992 /////////////////// 2769 case XRAM_RSP_IDLE: 2993 case XRAM_RSP_IDLE: // scan the XRAM responses to get the TRT index (round robin) 2770 2994 { 2771 2995 size_t ptr = r_xram_rsp_trt_index.read(); … … 2776 3000 if ( r_ixr_rsp_to_xram_rsp_rok[index] ) 2777 3001 { 2778 r_xram_rsp_trt_index 2779 r_ixr_rsp_to_xram_rsp_rok[index] 2780 r_xram_rsp_fsm 3002 r_xram_rsp_trt_index = index; 3003 r_ixr_rsp_to_xram_rsp_rok[index] = false; 3004 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 2781 3005 2782 3006 #if DEBUG_MEMC_XRAM_RSP 2783 3007 if( m_debug_xram_rsp_fsm ) 2784 { 3008 { 2785 3009 std::cout << " <MEMC " << name() << ".XRAM_RSP_IDLE> Available cache line in TRT:" 2786 3010 << " index = " << std::dec << index << std::endl; … … 2790 3014 } 2791 3015 } 2792 break; 3016 break; 2793 3017 } 2794 3018 /////////////////////// 2795 case XRAM_RSP_DIR_LOCK: // Takes the lock on the directory 2796 { 2797 if( r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP ) 2798 { 3019 case XRAM_RSP_DIR_LOCK: 3020 // Takes the lock on the directory 3021 // Takes the lock on TRT 3022 // Copy the TRT entry in a local buffer 3023 { 3024 if (( r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP ) && 3025 ( r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP )) 3026 { 3027 // copy the TRT entry in the r_xram_rsp_trt_buf local buffer 3028 size_t index = r_xram_rsp_trt_index.read(); 3029 3030 TransactionTabEntry trt_entry(m_transaction_tab.read(index)); 3031 r_xram_rsp_trt_buf.copy(trt_entry); // TRT entry local buffer 3032 2799 3033 r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; 2800 3034 2801 3035 #if DEBUG_MEMC_XRAM_RSP 2802 3036 if( m_debug_xram_rsp_fsm ) 2803 { 3037 { 2804 3038 std::cout << " <MEMC " << name() << ".XRAM_RSP_DIR_LOCK> Get access to directory" << std::endl; 2805 3039 } … … 2809 3043 } 2810 3044 /////////////////////// 2811 case XRAM_RSP_TRT_COPY: // Takes the lock on TRT 2812 // Copy the TRT entry in a local buffer 2813 // and select a victim cache line 2814 { 2815 if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) ) 2816 { 2817 // copy the TRT entry in the r_xram_rsp_trt_buf local buffer 2818 size_t index = r_xram_rsp_trt_index.read(); 2819 TransactionTabEntry trt_entry(m_transaction_tab.read(index)); 2820 r_xram_rsp_trt_buf.copy(trt_entry); // TRT entry local buffer 2821 3045 case XRAM_RSP_TRT_COPY: 3046 // Select a victim cache line 3047 { 3048 if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) ) 3049 { 2822 3050 // selects & extracts a victim line from cache 2823 3051 size_t way = 0; 2824 size_t set = m_y[(vci_addr_t)(trt_entry.nline * m_words * 4)]; 3052 size_t set = m_y[(vci_addr_t)(r_xram_rsp_trt_buf.nline * m_words * 4)]; 3053 2825 3054 DirectoryEntry victim(m_cache_directory.select(set, way)); 2826 3055 … … 2828 3057 2829 3058 // copy the victim line in a local buffer 2830 for (size_t i=0 ; i<m_words ; i++) 2831 r_xram_rsp_victim_data[i] = m_cache_data[way][set][i]; 3059 for (size_t i=0 ; i<m_words ; i++) 3060 r_xram_rsp_victim_data[i] = m_cache_data[way][set][i]; 3061 2832 3062 r_xram_rsp_victim_copy = victim.owner.srcid; 2833 3063 #if L1_MULTI_CACHE … … 2844 3074 r_xram_rsp_victim_dirty = victim.dirty; 2845 3075 2846 if(!trt_entry.rerror) r_xram_rsp_fsm = XRAM_RSP_INVAL_LOCK; 2847 else r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 3076 if(!r_xram_rsp_trt_buf.rerror) 3077 { 3078 r_xram_rsp_fsm = XRAM_RSP_INVAL_LOCK; 3079 } 3080 else 3081 { 3082 r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 3083 } 2848 3084 2849 3085 #if DEBUG_MEMC_XRAM_RSP … … 2851 3087 { 2852 3088 std::cout << " <MEMC " << name() << ".XRAM_RSP_TRT_COPY> Select a slot: " 2853 << " way = " << std::dec << way 2854 << " / set = " << set 3089 << " way = " << std::dec << way 3090 << " / set = " << set 2855 3091 << " / inval_required = " << inval << std::endl; 2856 3092 } 2857 3093 #endif 2858 3094 } 3095 else 3096 { 3097 std::cout << "VCI_MEM_CACHE ERROR " << name() 3098 << " XRAM_RSP_TRT_COPY state" << std::endl 3099 << "bad TRT allocation" << std::endl; 3100 3101 exit(0); 3102 } 2859 3103 break; 2860 3104 } 2861 3105 ///////////////////////// 2862 case XRAM_RSP_INVAL_LOCK: 2863 { 2864 if ( r_alloc_upt_fsm == ALLOC_UPT_XRAM_RSP ) 3106 case XRAM_RSP_INVAL_LOCK: // check a possible pending inval 3107 { 3108 if ( r_alloc_upt_fsm == ALLOC_UPT_XRAM_RSP ) 2865 3109 { 2866 3110 size_t index; … … 2879 3123 2880 3124 } 2881 3125 else if (m_update_tab.is_full() && r_xram_rsp_victim_inval.read()) 2882 3126 { 2883 3127 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 2884 3128 2885 3129 #if DEBUG_MEMC_XRAM_RSP … … 2891 3135 } 2892 3136 #endif 2893 2894 else 3137 } 3138 else 2895 3139 { 2896 3140 r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; … … 2907 3151 } 2908 3152 ///////////////////////// 2909 case XRAM_RSP_INVAL_WAIT: 3153 case XRAM_RSP_INVAL_WAIT: // returns to DIR_LOCK to retry 2910 3154 { 2911 3155 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; … … 2913 3157 } 2914 3158 /////////////////////// 2915 case XRAM_RSP_DIR_UPDT: 2916 // and possibly set an inval request in UPT 3159 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory) 3160 // and possibly set an inval request in UPT 2917 3161 { 2918 3162 // signals generation 2919 bool inst_read = (r_xram_rsp_trt_buf.trdid & 0x2) && r_xram_rsp_trt_buf.proc_read; 3163 bool inst_read = (r_xram_rsp_trt_buf.trdid & 0x2) && r_xram_rsp_trt_buf.proc_read; 2920 3164 bool cached_read = (r_xram_rsp_trt_buf.trdid & 0x1) && r_xram_rsp_trt_buf.proc_read; 2921 3165 // update data 2922 3166 size_t set = r_xram_rsp_victim_set.read(); 2923 3167 size_t way = r_xram_rsp_victim_way.read(); 2924 for(size_t i=0; i<m_words ; i++) 3168 for(size_t i=0; i<m_words ; i++) 2925 3169 { 2926 3170 m_cache_data[way][set][i] = r_xram_rsp_trt_buf.wdata[i]; 2927 3171 2928 if ( m_monitor_ok ) 3172 if ( m_monitor_ok ) 2929 3173 { 2930 vci_addr_t address = r_xram_rsp_trt_buf.nline<<6 | i<<2; 3174 vci_addr_t address = r_xram_rsp_trt_buf.nline<<6 | i<<2; 2931 3175 check_monitor("XRAM_RSP_DIR_UPDT", address, r_xram_rsp_trt_buf.wdata[i]); 2932 3176 } 2933 3177 } 2934 // compute dirty 3178 // compute dirty 2935 3179 bool dirty = false; 2936 3180 for(size_t i=0; i<m_words;i++) dirty = dirty || (r_xram_rsp_trt_buf.wdata_be[i] != 0); 2937 3181 // update directory 2938 3182 DirectoryEntry entry; 2939 entry.valid 3183 entry.valid = true; 2940 3184 entry.is_cnt = false; 2941 entry.lock 2942 entry.dirty 2943 entry.tag 3185 entry.lock = false; 3186 entry.dirty = dirty; 3187 entry.tag = r_xram_rsp_trt_buf.nline / m_sets; 2944 3188 entry.ptr = 0; 2945 if(cached_read) 3189 if(cached_read) 2946 3190 { 2947 3191 entry.owner.srcid = r_xram_rsp_trt_buf.srcid; … … 2951 3195 entry.owner.inst = inst_read; 2952 3196 entry.count = 1; 2953 } 2954 else 3197 } 3198 else 2955 3199 { 2956 3200 entry.owner.srcid = 0; … … 2965 3209 if (r_xram_rsp_victim_inval.read()) 2966 3210 { 2967 bool brdcast 2968 size_t index 2969 size_t count_copies 2970 2971 bool wok = m_update_tab.set( false,// it's an inval transaction2972 brdcast,// set brdcast bit2973 false,// it does not need a response2974 0,// srcid2975 0,// trdid2976 0,// pktid2977 2978 2979 3211 bool brdcast = r_xram_rsp_victim_is_cnt.read(); 3212 size_t index = 0; 3213 size_t count_copies = r_xram_rsp_victim_count.read(); 3214 3215 bool wok = m_update_tab.set( false, // it's an inval transaction 3216 brdcast, // set brdcast bit 3217 false, // it does not need a response 3218 0, // srcid 3219 0, // trdid 3220 0, // pktid 3221 r_xram_rsp_victim_nline.read(), 3222 count_copies, 3223 index); 2980 3224 r_xram_rsp_upt_index = index; 2981 3225 2982 if (!wok) 3226 if (!wok) 2983 3227 { 2984 3228 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_HEAP_LAST state" << std::endl; … … 2992 3236 { 2993 3237 std::cout << " <MEMC " << name() << ".XRAM_RSP_DIR_UPDT> Directory update: " 2994 << " way = " << std::dec << way 2995 << " / set = " << set 3238 << " way = " << std::dec << way 3239 << " / set = " << set 2996 3240 << " / count = " << entry.count 2997 3241 << " / is_cnt = " << entry.is_cnt << std::endl; … … 3004 3248 3005 3249 // If the victim is not dirty, we don't need another XRAM put transaction, 3006 // and we canwe erase the TRT entry 3250 // and we canwe erase the TRT entry 3007 3251 if (!r_xram_rsp_victim_dirty.read()) m_transaction_tab.erase(r_xram_rsp_trt_index.read()); 3008 3252 … … 3017 3261 case XRAM_RSP_TRT_DIRTY: // set the TRT entry (write to XRAM) if the victim is dirty 3018 3262 { 3019 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP ) 3263 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP ) 3020 3264 { 3021 3265 m_transaction_tab.set( r_xram_rsp_trt_index.read(), 3022 false, 3266 false, // write to XRAM 3023 3267 r_xram_rsp_victim_nline.read(), // line index 3024 3268 0, … … 3035 3279 { 3036 3280 std::cout << " <MEMC " << name() << ".XRAM_RSP_TRT_DIRTY> Set TRT entry for the put transaction:" 3037 3281 << " dirty victim line = " << r_xram_rsp_victim_nline.read() << std::endl; 3038 3282 } 3039 3283 #endif … … 3047 3291 case XRAM_RSP_DIR_RSP: // Request a response to TGT_RSP FSM 3048 3292 { 3049 if ( !r_xram_rsp_to_tgt_rsp_req.read() ) 3293 if ( !r_xram_rsp_to_tgt_rsp_req.read() ) 3050 3294 { 3051 3295 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 3052 3296 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 3053 3297 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 3054 for (size_t i=0; i < m_words; i++) r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 3298 for (size_t i=0; i < m_words; i++) r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 3055 3299 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 3056 3300 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; … … 3059 3303 3060 3304 if ( r_xram_rsp_victim_inval ) r_xram_rsp_fsm = XRAM_RSP_INVAL; 3061 else if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 3305 else if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 3062 3306 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 3063 3307 … … 3066 3310 if( m_debug_xram_rsp_fsm ) 3067 3311 { 3068 std::cout << " <MEMC " << name() << ".XRAM_RSP_DIR_RSP> Request the TGT_RSP FSM to return data:" 3312 std::cout << " <MEMC " << name() << ".XRAM_RSP_DIR_RSP> Request the TGT_RSP FSM to return data:" 3069 3313 << " rsrcid = " << std::dec << r_xram_rsp_trt_buf.srcid 3070 3314 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline*m_words*4 … … 3076 3320 } 3077 3321 //////////////////// 3078 case XRAM_RSP_INVAL: 3079 { 3080 if( !r_xram_rsp_to_init_cmd_multi_req.read() && 3081 !r_xram_rsp_to_init_cmd_brdcast_req.read() ) 3082 { 3322 case XRAM_RSP_INVAL: // send invalidate request to INIT_CMD FSM 3323 { 3324 if( !r_xram_rsp_to_init_cmd_multi_req.read() && 3325 !r_xram_rsp_to_init_cmd_brdcast_req.read() ) 3326 { 3083 3327 bool multi_req = !r_xram_rsp_victim_is_cnt.read(); 3084 3328 bool last_multi_req = multi_req && (r_xram_rsp_victim_count.read() == 1); 3085 3329 bool not_last_multi_req = multi_req && (r_xram_rsp_victim_count.read() != 1); 3086 3330 3087 r_xram_rsp_to_init_cmd_multi_req = last_multi_req; 3331 r_xram_rsp_to_init_cmd_multi_req = last_multi_req; 3088 3332 r_xram_rsp_to_init_cmd_brdcast_req = r_xram_rsp_victim_is_cnt.read(); 3089 3333 r_xram_rsp_to_init_cmd_nline = r_xram_rsp_victim_nline.read(); … … 3098 3342 3099 3343 if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 3100 else if (not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_ ERASE;3344 else if (not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 3101 3345 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 3102 3346 … … 3104 3348 if( m_debug_xram_rsp_fsm ) 3105 3349 { 3106 std::cout << " <MEMC " << name() << ".XRAM_RSP_INVAL> Send an inval request to INIT_CMD FSM:" 3350 std::cout << " <MEMC " << name() << ".XRAM_RSP_INVAL> Send an inval request to INIT_CMD FSM:" 3107 3351 << " victim line = " << r_xram_rsp_victim_nline.read() << std::endl; 3108 3352 } … … 3112 3356 } 3113 3357 ////////////////////////// 3114 case XRAM_RSP_WRITE_DIRTY: 3115 { 3116 if ( !r_xram_rsp_to_ixr_cmd_req.read() ) 3358 case XRAM_RSP_WRITE_DIRTY: // send a write request to IXR_CMD FSM 3359 { 3360 if ( !r_xram_rsp_to_ixr_cmd_req.read() ) 3117 3361 { 3118 3362 r_xram_rsp_to_ixr_cmd_req = true; … … 3124 3368 bool multi_req = !r_xram_rsp_victim_is_cnt.read() && r_xram_rsp_victim_inval.read(); 3125 3369 bool not_last_multi_req = multi_req && (r_xram_rsp_victim_count.read() != 1); 3126 if ( not_last_multi_req ) r_xram_rsp_fsm = XRAM_RSP_HEAP_ ERASE;3370 if ( not_last_multi_req ) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 3127 3371 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 3128 3372 … … 3130 3374 if( m_debug_xram_rsp_fsm ) 3131 3375 { 3132 std::cout << " <MEMC " << name() << ".XRAM_RSP_WRITE_DIRTY> Send the put request to IXR_CMD FSM:" 3376 std::cout << " <MEMC " << name() << ".XRAM_RSP_WRITE_DIRTY> Send the put request to IXR_CMD FSM:" 3133 3377 << " victim line = " << r_xram_rsp_victim_nline.read() << std::endl; 3134 3378 } … … 3137 3381 break; 3138 3382 } 3383 3139 3384 ///////////////////////// 3140 case XRAM_RSP_HEAP_ERASE: // erase the list of copies and sent invalidations 3141 { 3142 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP ) 3385 case XRAM_RSP_HEAP_REQ: 3386 // Get the lock to the HEAP directory 3387 { 3388 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP ) 3389 { 3390 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 3391 } 3392 3393 #if DEBUG_MEMC_XRAM_RSP 3394 if( m_debug_xram_rsp_fsm ) 3395 { 3396 std::cout 3397 << " <MEMC " << name() << ".XRAM_RSP_HEAP_REQ> Requesting HEAP lock " 3398 << std::endl; 3399 } 3400 #endif 3401 break; 3402 } 3403 3404 ///////////////////////// 3405 case XRAM_RSP_HEAP_ERASE: // erase the list of copies and sent invalidations 3406 { 3407 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP ) 3143 3408 { 3144 3409 HeapEntry entry = m_heap.read(r_xram_rsp_next_ptr.read()); … … 3157 3422 r_xram_rsp_to_init_cmd_multi_req = true; 3158 3423 r_xram_rsp_fsm = XRAM_RSP_HEAP_LAST; 3159 } 3160 else 3424 } 3425 else 3161 3426 { 3162 3427 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 3163 3428 } 3164 } 3165 else 3429 } 3430 else 3166 3431 { 3167 3432 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; … … 3171 3436 if( m_debug_xram_rsp_fsm ) 3172 3437 { 3173 std::cout << " <MEMC " << name() << ".XRAM_RSP_HEAP_ERASE> Erase the list of copies:" 3438 std::cout << " <MEMC " << name() << ".XRAM_RSP_HEAP_ERASE> Erase the list of copies:" 3174 3439 << " srcid = " << std::dec << entry.owner.srcid 3175 3440 << " / inst = " << std::dec << entry.owner.inst << std::endl; … … 3180 3445 } 3181 3446 ///////////////////////// 3182 case XRAM_RSP_HEAP_LAST: 3447 case XRAM_RSP_HEAP_LAST: // last member of the list 3183 3448 { 3184 3449 if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_XRAM_RSP ) … … 3200 3465 last_entry.next = r_xram_rsp_next_ptr.read(); 3201 3466 m_heap.unset_full(); 3202 } 3203 else 3467 } 3468 else 3204 3469 { 3205 3470 last_entry.next = free_pointer; … … 3220 3485 } 3221 3486 // /////////////////////// 3222 case XRAM_RSP_ERROR_ERASE: 3487 case XRAM_RSP_ERROR_ERASE: // erase TRT entry in case of error 3223 3488 { 3224 3489 m_transaction_tab.erase(r_xram_rsp_trt_index.read()); … … 3237 3502 } 3238 3503 //////////////////////// 3239 case XRAM_RSP_ERROR_RSP: // Request an error response to TGT_RSP FSM 3240 { 3241 if ( !r_xram_rsp_to_tgt_rsp_req.read() ) 3504 case XRAM_RSP_ERROR_RSP: // Request an error response to TGT_RSP FSM 3505 { 3506 if ( !r_xram_rsp_to_tgt_rsp_req.read() ) 3242 3507 { 3243 3508 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; … … 3265 3530 3266 3531 //////////////////////////////////////////////////////////////////////////////////// 3267 // 3532 // CLEANUP FSM 3268 3533 //////////////////////////////////////////////////////////////////////////////////// 3269 3534 // The CLEANUP FSM handles the cleanup request from L1 caches. … … 3271 3536 //////////////////////////////////////////////////////////////////////////////////// 3272 3537 3273 3274 switch ( r_cleanup_fsm.read() ) 3538 switch ( r_cleanup_fsm.read() ) 3275 3539 { 3276 ////////////////// 3277 case CLEANUP_IDLE: 3278 { 3279 if ( p_vci_tgt_cleanup.cmdval.read() ) 3280 { 3281 if (p_vci_tgt_cleanup.srcid.read() >= m_initiators ) 3282 { 3283 std::cout << "VCI_MEM_CACHE ERROR " << name() 3284 << " CLEANUP_IDLE state" << std::endl; 3285 std::cout << "illegal srcid for cleanup request" << std::endl; 3286 exit(0); 3287 } 3288 3289 bool reached = false; 3290 for ( size_t index = 0 ; index < m_ncseg && !reached ; index++ ) 3291 { 3292 if ( m_cseg[index]->contains((addr_t)(p_vci_tgt_cleanup.address.read())) ) 3293 reached = true; 3294 } 3295 // only write request to a mapped address that are not broadcast are handled 3296 if ( (p_vci_tgt_cleanup.cmd.read() == vci_param::CMD_WRITE) && 3297 ((p_vci_tgt_cleanup.address.read() & 0x3) == 0) && reached) 3298 { 3299 addr_t line =(((addr_t) p_vci_tgt_cleanup.be.read() << (vci_param::B*8))) | 3300 (((addr_t) p_vci_tgt_cleanup.wdata.read())); 3301 3302 r_cleanup_nline = line; 3303 r_cleanup_srcid = p_vci_tgt_cleanup.srcid.read(); 3304 r_cleanup_trdid = p_vci_tgt_cleanup.trdid.read(); 3305 r_cleanup_pktid = p_vci_tgt_cleanup.pktid.read(); 3306 r_cleanup_fsm = CLEANUP_DIR_LOCK; 3540 ////////////////// 3541 case CLEANUP_IDLE: 3542 { 3543 if ( p_vci_tgt_cleanup.cmdval.read() ) 3544 { 3545 if (p_vci_tgt_cleanup.srcid.read() >= m_initiators ) 3546 { 3547 std::cout << "VCI_MEM_CACHE ERROR " << name() 3548 << " CLEANUP_IDLE state" << std::endl; 3549 std::cout << "illegal srcid for cleanup request" << std::endl; 3550 exit(0); 3551 } 3552 3553 bool reached = false; 3554 for ( size_t index = 0 ; index < m_ncseg && !reached ; index++ ) 3555 { 3556 if ( m_cseg[index]->contains((addr_t)(p_vci_tgt_cleanup.address.read())) ) 3557 reached = true; 3558 } 3559 // only write request to a mapped address that are not broadcast are handled 3560 if (( p_vci_tgt_cleanup.cmd.read() == vci_param::CMD_WRITE ) && 3561 (( p_vci_tgt_cleanup.address.read() & 0x3 ) == 0 ) && reached ) 3562 { 3563 addr_t line =(((addr_t) p_vci_tgt_cleanup.be.read() << (vci_param::B*8))) | 3564 (((addr_t) p_vci_tgt_cleanup.wdata.read())); 3565 3566 r_cleanup_nline = line; 3567 r_cleanup_srcid = p_vci_tgt_cleanup.srcid.read(); 3568 r_cleanup_trdid = p_vci_tgt_cleanup.trdid.read(); 3569 r_cleanup_pktid = p_vci_tgt_cleanup.pktid.read(); 3570 r_cleanup_fsm = CLEANUP_DIR_REQ; 3571 3572 #if DEBUG_MEMC_CLEANUP 3573 if( m_debug_cleanup_fsm ) 3574 { 3575 std::cout << " <MEMC " << name() << ".CLEANUP_IDLE> Cleanup request:" << std::hex 3576 << " line addr = " << line * m_words * 4 3577 << " / owner_id = " << p_vci_tgt_cleanup.srcid.read() 3578 << " / owner_ins = " << (p_vci_tgt_cleanup.trdid.read()&0x1) 3579 << std::endl; 3580 } 3581 #endif 3582 m_cpt_cleanup++; 3583 } 3584 } 3585 break; 3586 } 3587 3588 ////////////////////// 3589 case CLEANUP_DIR_REQ: 3590 // Get the lock to the directory 3591 { 3592 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP ) 3593 { 3594 r_cleanup_fsm = CLEANUP_DIR_LOCK; 3595 } 3307 3596 3308 3597 #if DEBUG_MEMC_CLEANUP 3309 if( m_debug_cleanup_fsm ) 3310 { 3311 std::cout << " <MEMC " << name() << ".CLEANUP_IDLE> Cleanup request:" << std::hex 3312 << " line addr = " << line * m_words * 4 3313 << " / owner_id = " << p_vci_tgt_cleanup.srcid.read() 3314 << " / owner_ins = " << (p_vci_tgt_cleanup.trdid.read()&0x1) 3315 << std::endl; 3316 } 3317 #endif 3318 m_cpt_cleanup++; 3319 } 3320 } 3321 break; 3322 } 3323 ////////////////////// 3324 case CLEANUP_DIR_LOCK: // test directory status 3325 { 3326 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP ) 3327 { 3328 // Read the directory 3329 size_t way = 0; 3330 addr_t cleanup_address = r_cleanup_nline.read() * m_words * 4; 3331 DirectoryEntry entry = m_cache_directory.read(cleanup_address , way); 3332 r_cleanup_is_cnt = entry.is_cnt; 3333 r_cleanup_dirty = entry.dirty; 3334 r_cleanup_tag = entry.tag; 3335 r_cleanup_lock = entry.lock; 3336 r_cleanup_way = way; 3337 r_cleanup_copy = entry.owner.srcid; 3598 if( m_debug_cleanup_fsm ) 3599 { 3600 std::cout 3601 << " <MEMC " << name() << ".CLEANUP_DIR_REQ> Requesting DIR lock " 3602 << std::endl; 3603 } 3604 #endif 3605 break; 3606 } 3607 3608 ////////////////////// 3609 case CLEANUP_DIR_LOCK: // test directory status 3610 { 3611 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP ) 3612 { 3613 // Read the directory 3614 size_t way = 0; 3615 addr_t cleanup_address = r_cleanup_nline.read() * m_words * 4; 3616 DirectoryEntry entry = m_cache_directory.read(cleanup_address , way); 3617 r_cleanup_is_cnt = entry.is_cnt; 3618 r_cleanup_dirty = entry.dirty; 3619 r_cleanup_tag = entry.tag; 3620 r_cleanup_lock = entry.lock; 3621 r_cleanup_way = way; 3622 r_cleanup_copy = entry.owner.srcid; 3338 3623 #if L1_MULTI_CACHE 3339 r_cleanup_copy_cache= entry.owner.cache_id;3340 #endif 3341 3342 3343 3344 3345 if( entry.valid) // hit : the copy must be cleared3346 3347 3348 3349 3350 }3351 else// access to the heap3352 3353 r_cleanup_fsm = CLEANUP_HEAP_LOCK;3354 3355 }3356 else// miss : we must check the update table3357 3358 3359 3624 r_cleanup_copy_cache = entry.owner.cache_id; 3625 #endif 3626 r_cleanup_copy_inst = entry.owner.inst; 3627 r_cleanup_count = entry.count; 3628 r_cleanup_ptr = entry.ptr; 3629 3630 if( entry.valid) // hit : the copy must be cleared 3631 { 3632 if ( (entry.count==1) || (entry.is_cnt) ) // no access to the heap 3633 { 3634 r_cleanup_fsm = CLEANUP_DIR_WRITE; 3635 } 3636 else // access to the heap 3637 { 3638 r_cleanup_fsm = CLEANUP_HEAP_REQ; 3639 } 3640 } 3641 else // miss : we must check the update table 3642 { 3643 r_cleanup_fsm = CLEANUP_UPT_LOCK; 3644 } 3360 3645 3361 3646 #if DEBUG_MEMC_CLEANUP 3362 if( m_debug_cleanup_fsm ) 3363 { 3364 std::cout << " <MEMC " << name() << ".CLEANUP_DIR_LOCK> Test directory status: " << std::hex 3647 if( m_debug_cleanup_fsm ) 3648 { 3649 std::cout 3650 << " <MEMC " << name() 3651 << ".CLEANUP_DIR_LOCK> Test directory status: " << std::hex 3365 3652 << " line = " << r_cleanup_nline.read() * m_words * 4 3366 3653 << " / hit = " << entry.valid 3367 3654 << " / dir_id = " << entry.owner.srcid 3368 3655 << " / dir_ins = " << entry.owner.inst 3369 << " / search_id = " << r_cleanup_srcid.read() 3656 << " / search_id = " << r_cleanup_srcid.read() 3370 3657 << " / search_ins = " << (r_cleanup_trdid.read()&0x1) 3371 3658 << " / count = " << entry.count 3372 3659 << " / is_cnt = " << entry.is_cnt << std::endl; 3373 } 3374 #endif 3375 } 3376 break; 3377 } 3378 /////////////////////// 3379 case CLEANUP_DIR_WRITE: // update the directory entry without heap access 3380 { 3381 if ( r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP ) 3382 { 3383 std::cout << "VCI_MEM_CACHE ERROR " << name() 3384 << " CLEANUP_DIR_WRITE state" 3385 << " bad DIR allocation" << std::endl; 3386 exit(0); 3387 } 3388 3389 size_t way = r_cleanup_way.read(); 3390 size_t set = m_y[(vci_addr_t)(r_cleanup_nline.read()*m_words*4)]; 3391 bool cleanup_inst = r_cleanup_trdid.read() & 0x1; 3392 bool match_srcid = ((r_cleanup_copy.read() == r_cleanup_srcid.read()) 3660 } 3661 #endif 3662 } 3663 else 3664 { 3665 std::cout << "VCI_MEM_CACHE ERROR " << name() 3666 << " CLEANUP_DIR_LOCK state" 3667 << " bad DIR allocation" << std::endl; 3668 3669 exit(0); 3670 } 3671 break; 3672 } 3673 3674 /////////////////////// 3675 case CLEANUP_DIR_WRITE: 3676 // Update the directory entry without heap access 3677 { 3678 if ( r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP ) 3679 { 3680 std::cout << "VCI_MEM_CACHE ERROR " << name() 3681 << " CLEANUP_DIR_WRITE state" 3682 << " bad DIR allocation" << std::endl; 3683 exit(0); 3684 } 3685 3686 size_t way = r_cleanup_way.read(); 3687 size_t set = m_y[(vci_addr_t)(r_cleanup_nline.read()*m_words*4)]; 3688 bool cleanup_inst = r_cleanup_trdid.read() & 0x1; 3689 bool match_srcid = ((r_cleanup_copy.read() == r_cleanup_srcid.read()) 3393 3690 #if L1_MULTI_CACHE 3394 3395 #endif 3396 3397 3398 3399 3400 3401 3402 entry.valid= true;3403 3404 entry.dirty= r_cleanup_dirty.read();3405 entry.tag= r_cleanup_tag.read();3406 entry.lock= r_cleanup_lock.read();3407 3408 3409 3410 {3411 3412 3691 and (r_cleanup_copy_cache.read() == r_cleanup_pktid.read()) 3692 #endif 3693 ); 3694 bool match_inst = (r_cleanup_copy_inst.read() == cleanup_inst); 3695 bool match = match_srcid && match_inst; 3696 3697 // update the cache directory (for the copies) 3698 DirectoryEntry entry; 3699 entry.valid = true; 3700 entry.is_cnt = r_cleanup_is_cnt.read(); 3701 entry.dirty = r_cleanup_dirty.read(); 3702 entry.tag = r_cleanup_tag.read(); 3703 entry.lock = r_cleanup_lock.read(); 3704 entry.ptr = r_cleanup_ptr.read(); 3705 3706 if ( r_cleanup_is_cnt.read() ) // counter mode 3707 { 3708 entry.count = r_cleanup_count.read() -1; 3709 entry.owner.srcid = 0; 3413 3710 #if L1_MULTI_CACHE 3414 3415 #endif 3416 3417 // response to the cache3418 3419 3420 else// linked_list mode3421 {3422 3423 3424 3425 3711 entry.owner.cache_id= 0; 3712 #endif 3713 entry.owner.inst = 0; 3714 // response to the cache 3715 r_cleanup_fsm = CLEANUP_RSP; 3716 } 3717 else // linked_list mode 3718 { 3719 if ( match ) // hit 3720 { 3721 entry.count = 0; // no more copy 3722 entry.owner.srcid = 0; 3426 3723 #if L1_MULTI_CACHE 3427 3428 #endif 3429 3430 r_cleanup_fsm = CLEANUP_RSP;3431 }3432 3433 {3434 3435 3724 entry.owner.cache_id=0; 3725 #endif 3726 entry.owner.inst = 0; 3727 r_cleanup_fsm = CLEANUP_RSP; 3728 } 3729 else // miss 3730 { 3731 entry.count = r_cleanup_count.read(); 3732 entry.owner.srcid = r_cleanup_copy.read(); 3436 3733 #if L1_MULTI_CACHE 3437 3438 #endif 3439 3440 3441 3442 3443 m_cache_directory.write(set, way, entry);3734 entry.owner.cache_id = r_cleanup_copy_cache.read(); 3735 #endif 3736 entry.owner.inst = r_cleanup_copy_inst.read(); 3737 r_cleanup_fsm = CLEANUP_UPT_LOCK; 3738 } 3739 } 3740 m_cache_directory.write(set, way, entry); 3444 3741 3445 3742 #if DEBUG_MEMC_CLEANUP 3446 if( m_debug_cleanup_fsm ) 3447 { 3448 std::cout << " <MEMC " << name() << ".CLEANUP_DIR_WRITE> Update directory:" << std::hex 3449 << " line = " << r_cleanup_nline.read() * m_words * 4 3450 << " / dir_id = " << entry.owner.srcid 3451 << " / dir_ins = " << entry.owner.inst 3452 << " / count = " << entry.count 3453 << " / is_cnt = " << entry.is_cnt << std::endl; 3454 } 3455 #endif 3456 3457 break; 3458 } 3459 /////////////////////// 3460 case CLEANUP_HEAP_LOCK: // two cases are handled in this state: 3461 // - the matching copy is directly in the directory 3462 // - the matching copy is the first copy in the heap 3463 { 3464 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP ) 3465 { 3466 size_t way = r_cleanup_way.read(); 3467 size_t set = m_y[(vci_addr_t)(r_cleanup_nline.read()*m_words*4)]; 3468 HeapEntry heap_entry = m_heap.read(r_cleanup_ptr.read()); 3469 bool last = (heap_entry.next == r_cleanup_ptr.read()); 3470 bool cleanup_inst = r_cleanup_trdid.read() & 0x1; 3471 3472 // match_dir computation 3473 bool match_dir_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 3474 bool match_dir_inst = (r_cleanup_copy_inst.read() == cleanup_inst); 3475 bool match_dir = match_dir_srcid and match_dir_inst; 3743 if( m_debug_cleanup_fsm ) 3744 { 3745 std::cout 3746 << " <MEMC " << name() 3747 << ".CLEANUP_DIR_WRITE> Update directory:" << std::hex 3748 << " line = " << r_cleanup_nline.read() * m_words * 4 3749 << " / dir_id = " << entry.owner.srcid 3750 << " / dir_ins = " << entry.owner.inst 3751 << " / count = " << entry.count 3752 << " / is_cnt = " << entry.is_cnt << std::endl; 3753 } 3754 #endif 3755 3756 break; 3757 } 3758 3759 /////////////////////// 3760 case CLEANUP_HEAP_REQ: 3761 // Get the lock to the HEAP directory 3762 { 3763 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP ) 3764 { 3765 r_cleanup_fsm = CLEANUP_HEAP_LOCK; 3766 } 3767 3768 #if DEBUG_MEMC_CLEANUP 3769 if( m_debug_cleanup_fsm ) 3770 { 3771 std::cout 3772 << " <MEMC " << name() << ".CLEANUP_HEAP_REQ> Requesting HEAP lock " 3773 << std::endl; 3774 } 3775 #endif 3776 break; 3777 } 3778 3779 /////////////////////// 3780 case CLEANUP_HEAP_LOCK: 3781 // two cases are handled in this state: 3782 // - the matching copy is directly in the directory 3783 // - the matching copy is the first copy in the heap 3784 { 3785 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP ) 3786 { 3787 size_t way = r_cleanup_way.read(); 3788 size_t set = m_y[(vci_addr_t)(r_cleanup_nline.read()*m_words*4)]; 3789 HeapEntry heap_entry = m_heap.read(r_cleanup_ptr.read()); 3790 bool last = (heap_entry.next == r_cleanup_ptr.read()); 3791 bool cleanup_inst = r_cleanup_trdid.read() & 0x1; 3792 3793 // match_dir computation 3794 bool match_dir_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 3795 bool match_dir_inst = (r_cleanup_copy_inst.read() == cleanup_inst); 3796 bool match_dir = match_dir_srcid and match_dir_inst; 3476 3797 #if L1_MULTI_CACHE 3477 3478 #endif 3479 3480 3481 3482 3483 3798 match_dir = match_dir and (r_cleanup_copy_cache.read() == r_cleanup_pktid.read()); 3799 #endif 3800 3801 // match_heap computation 3802 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 3803 bool match_heap_inst = (heap_entry.owner.inst == cleanup_inst); 3804 bool match_heap = match_heap_srcid and match_heap_inst; 3484 3805 #if L1_MULTI_CACHE 3485 3486 #endif 3487 3488 3489 3806 match_heap = match_heap and (heap_entry.owner.cache_id == r_cleanup_pktid.read()); 3807 #endif 3808 3809 r_cleanup_prev_ptr = r_cleanup_ptr.read(); 3810 r_cleanup_prev_srcid = heap_entry.owner.srcid; 3490 3811 #if L1_MULTI_CACHE 3491 r_cleanup_prev_cache_id = heap_entry.owner.cache_id; 3492 #endif 3493 r_cleanup_prev_inst = heap_entry.owner.inst; 3494 3495 if (match_dir) // the matching copy is registered in the directory 3496 { 3497 // the copy registered in the directory must be replaced 3498 // by the first copy registered in the heap 3499 // and the corresponding entry must be freed 3500 DirectoryEntry dir_entry; 3501 dir_entry.valid = true; 3502 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 3503 dir_entry.dirty = r_cleanup_dirty.read(); 3504 dir_entry.tag = r_cleanup_tag.read(); 3505 dir_entry.lock = r_cleanup_lock.read(); 3506 dir_entry.ptr = heap_entry.next; 3507 dir_entry.count = r_cleanup_count.read()-1; 3508 dir_entry.owner.srcid = heap_entry.owner.srcid; 3812 r_cleanup_prev_cache_id = heap_entry.owner.cache_id; 3813 #endif 3814 r_cleanup_prev_inst = heap_entry.owner.inst; 3815 3816 if (match_dir) 3817 // the matching copy is registered in the directory 3818 { 3819 // the copy registered in the directory must be replaced 3820 // by the first copy registered in the heap 3821 // and the corresponding entry must be freed 3822 DirectoryEntry dir_entry; 3823 dir_entry.valid = true; 3824 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 3825 dir_entry.dirty = r_cleanup_dirty.read(); 3826 dir_entry.tag = r_cleanup_tag.read(); 3827 dir_entry.lock = r_cleanup_lock.read(); 3828 dir_entry.ptr = heap_entry.next; 3829 dir_entry.count = r_cleanup_count.read()-1; 3830 dir_entry.owner.srcid = heap_entry.owner.srcid; 3509 3831 #if L1_MULTI_CACHE 3510 dir_entry.owner.cache_id = heap_entry.owner.cache_id; 3511 #endif 3512 dir_entry.owner.inst = heap_entry.owner.inst; 3513 m_cache_directory.write(set,way,dir_entry); 3514 r_cleanup_next_ptr = r_cleanup_ptr.read(); 3515 r_cleanup_fsm = CLEANUP_HEAP_FREE; 3516 } 3517 else if (match_heap) // the matching copy is the first copy in the heap 3518 { 3519 // The first copy in heap must be freed 3520 // and the copy registered in directory must point to the next copy in heap 3521 DirectoryEntry dir_entry; 3522 dir_entry.valid = true; 3523 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 3524 dir_entry.dirty = r_cleanup_dirty.read(); 3525 dir_entry.tag = r_cleanup_tag.read(); 3526 dir_entry.lock = r_cleanup_lock.read(); 3527 dir_entry.ptr = heap_entry.next; 3528 dir_entry.count = r_cleanup_count.read()-1; 3529 dir_entry.owner.srcid = r_cleanup_copy.read(); 3832 dir_entry.owner.cache_id = heap_entry.owner.cache_id; 3833 #endif 3834 dir_entry.owner.inst = heap_entry.owner.inst; 3835 3836 m_cache_directory.write(set,way,dir_entry); 3837 3838 r_cleanup_next_ptr = r_cleanup_ptr.read(); 3839 r_cleanup_fsm = CLEANUP_HEAP_FREE; 3840 } 3841 else if (match_heap) 3842 // the matching copy is the first copy in the heap 3843 { 3844 // The first copy in heap must be freed 3845 // and the copy registered in directory must point to the next copy in heap 3846 DirectoryEntry dir_entry; 3847 dir_entry.valid = true; 3848 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 3849 dir_entry.dirty = r_cleanup_dirty.read(); 3850 dir_entry.tag = r_cleanup_tag.read(); 3851 dir_entry.lock = r_cleanup_lock.read(); 3852 dir_entry.ptr = heap_entry.next; 3853 dir_entry.count = r_cleanup_count.read()-1; 3854 dir_entry.owner.srcid = r_cleanup_copy.read(); 3530 3855 #if L1_MULTI_CACHE 3531 dir_entry.owner.cache_id = r_cleanup_copy_cache.read(); 3532 #endif 3533 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 3534 m_cache_directory.write(set,way,dir_entry); 3535 r_cleanup_next_ptr = r_cleanup_ptr.read(); 3536 r_cleanup_fsm = CLEANUP_HEAP_FREE; 3537 } 3538 else if(!last) // The matching copy is in the heap, but is not the first copy 3539 { 3540 // The directory entry must be modified to decrement count 3541 DirectoryEntry dir_entry; 3542 dir_entry.valid = true; 3543 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 3544 dir_entry.dirty = r_cleanup_dirty.read(); 3545 dir_entry.tag = r_cleanup_tag.read(); 3546 dir_entry.lock = r_cleanup_lock.read(); 3547 dir_entry.ptr = r_cleanup_ptr.read(); 3548 dir_entry.count = r_cleanup_count.read()-1; 3549 dir_entry.owner.srcid = r_cleanup_copy.read(); 3856 dir_entry.owner.cache_id = r_cleanup_copy_cache.read(); 3857 #endif 3858 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 3859 3860 m_cache_directory.write(set,way,dir_entry); 3861 3862 r_cleanup_next_ptr = r_cleanup_ptr.read(); 3863 r_cleanup_fsm = CLEANUP_HEAP_FREE; 3864 } 3865 else if(!last) 3866 // The matching copy is in the heap, but is not the first copy 3867 { 3868 // The directory entry must be modified to decrement count 3869 DirectoryEntry dir_entry; 3870 dir_entry.valid = true; 3871 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 3872 dir_entry.dirty = r_cleanup_dirty.read(); 3873 dir_entry.tag = r_cleanup_tag.read(); 3874 dir_entry.lock = r_cleanup_lock.read(); 3875 dir_entry.ptr = r_cleanup_ptr.read(); 3876 dir_entry.count = r_cleanup_count.read()-1; 3877 dir_entry.owner.srcid = r_cleanup_copy.read(); 3550 3878 #if L1_MULTI_CACHE 3551 dir_entry.owner.cache_id = r_cleanup_copy_cache.read(); 3552 #endif 3553 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 3554 m_cache_directory.write(set,way,dir_entry); 3555 r_cleanup_next_ptr = heap_entry.next; 3556 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 3557 } 3558 else 3559 { 3560 std::cout << "VCI_MEM_CACHE ERROR " << name() 3561 << " CLEANUP_HEAP_LOCK state" 3562 << " hit but copy not found" << std::endl; 3563 exit(0); 3564 } 3879 dir_entry.owner.cache_id = r_cleanup_copy_cache.read(); 3880 #endif 3881 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 3882 3883 m_cache_directory.write(set,way,dir_entry); 3884 3885 r_cleanup_next_ptr = heap_entry.next; 3886 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 3887 } 3888 else 3889 { 3890 std::cout << "VCI_MEM_CACHE ERROR " << name() 3891 << " CLEANUP_HEAP_LOCK state" 3892 << " hit but copy not found" << std::endl; 3893 exit(0); 3894 } 3565 3895 3566 3896 #if DEBUG_MEMC_CLEANUP 3567 if( m_debug_cleanup_fsm ) 3568 { 3569 std::cout << " <MEMC " << name() << ".CLEANUP_HEAP_LOCK> Checks matching:" 3897 if( m_debug_cleanup_fsm ) 3898 { 3899 std::cout 3900 << " <MEMC " << name() << ".CLEANUP_HEAP_LOCK> Checks matching:" 3570 3901 << " line = " << r_cleanup_nline.read() * m_words * 4 3571 3902 << " / dir_id = " << r_cleanup_copy.read() 3572 3903 << " / dir_ins = " << r_cleanup_copy_inst.read() 3573 3904 << " / heap_id = " << heap_entry.owner.srcid 3574 << " / heap_ins = " << heap_entry.owner.inst3575 << " / search_id = " << r_cleanup_srcid.read()3576 << " / search_ins = " << (r_cleanup_trdid.read()&0x1) << std::endl;3577 }3578 #endif3579 }3580 break;3581 }3582 /////////////////////////3583 case CLEANUP_HEAP_SEARCH: // This state is handling the case where the copy3584 // is in the heap, but is not the first in the linked list3585 {3586 if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP )3587 {3588 std::cout << "VCI_MEM_CACHE ERROR " << name()3589 << " CLEANUP_HEAP_SEARCH state"3590 << " bad HEAP allocation" << std::endl;3591 exit(0);3592 }3593 3594 HeapEntry heap_entry = m_heap.read(r_cleanup_next_ptr.read());3595 bool last = (heap_entry.next == r_cleanup_next_ptr.read());3596 bool cleanup_inst = r_cleanup_trdid.read() & 0x1;3597 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read());3598 bool match_heap_inst = (heap_entry.owner.inst == cleanup_inst);3599 bool match_heap = match_heap_srcid && match_heap_inst;3600 #if L1_MULTI_CACHE3601 match_heap = match_heap and (heap_entry.owner.cache_id == r_cleanup_pktid.read());3602 #endif3603 3604 #if DEBUG_MEMC_CLEANUP3605 if( m_debug_cleanup_fsm )3606 {3607 std::cout << " <MEMC " << name() << ".CLEANUP_HEAP_SEARCH> Cheks matching:"3608 << " line = " << r_cleanup_nline.read() * m_words * 43609 << " / heap_id = " << heap_entry.owner.srcid3610 << " / heap_ins = " << heap_entry.owner.inst3611 << " / search_id = " << r_cleanup_srcid.read()3612 << " / search_ins = " << (r_cleanup_trdid.read()&0x1)3613 << " / last = " << last << std::endl;3614 }3615 #endif3616 if(match_heap) // the matching copy must be removed3617 {3618 r_cleanup_ptr = heap_entry.next; // reuse ressources3619 r_cleanup_fsm = CLEANUP_HEAP_CLEAN;3620 }3621 else3622 {3623 if ( last )3624 {3625 std::cout << "VCI_MEM_CACHE_ERROR " << name()3626 << " CLEANUP_HEAP_SEARCH state"3627 << " cleanup hit but copy not found" << std::endl;3628 exit(0);3629 }3630 else // test the next in the linked list3631 {3632 r_cleanup_prev_ptr = r_cleanup_next_ptr.read();3633 r_cleanup_prev_srcid = heap_entry.owner.srcid;3634 #if L1_MULTI_CACHE3635 r_cleanup_prev_cache_id = heap_entry.owner.cache_id;3636 #endif3637 r_cleanup_prev_inst = heap_entry.owner.inst;3638 r_cleanup_next_ptr = heap_entry.next;3639 r_cleanup_fsm = CLEANUP_HEAP_SEARCH;3640 3641 #if DEBUG_MEMC_CLEANUP3642 if( m_debug_cleanup_fsm )3643 {3644 std::cout << " <MEMC " << name() << ".CLEANUP_HEAP_SEARCH> Matching copy not found, search next:"3645 << " line = " << r_cleanup_nline.read() * m_words * 43646 << " / heap_id = " << heap_entry.owner.srcid3647 3905 << " / heap_ins = " << heap_entry.owner.inst 3648 3906 << " / search_id = " << r_cleanup_srcid.read() 3649 3907 << " / search_ins = " << (r_cleanup_trdid.read()&0x1) << std::endl; 3650 } 3651 #endif 3652 } 3653 } 3654 break; 3655 } 3656 //////////////////////// 3657 case CLEANUP_HEAP_CLEAN: // remove a copy in the linked list 3658 { 3659 if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP ) 3660 { 3661 std::cout << "VCI_MEM_CACHE ERROR " << name() 3662 << " CLEANUP_HEAP_CLEAN state" 3663 << "Bad HEAP allocation" << std::endl; 3664 exit(0); 3665 } 3666 3667 bool last = (r_cleanup_next_ptr.read() == r_cleanup_ptr.read()); 3668 HeapEntry heap_entry; 3669 heap_entry.owner.srcid = r_cleanup_prev_srcid.read(); 3908 } 3909 #endif 3910 } 3911 else 3912 { 3913 std::cout << "VCI_MEM_CACHE ERROR " << name() 3914 << " CLEANUP_HEAP_LOCK state" 3915 << " bad HEAP allocation" << std::endl; 3916 3917 exit(0); 3918 } 3919 break; 3920 } 3921 3922 ///////////////////////// 3923 case CLEANUP_HEAP_SEARCH: // This state is handling the case where the copy 3924 // is in the heap, but is not the first in the linked list 3925 { 3926 if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP ) 3927 { 3928 std::cout << "VCI_MEM_CACHE ERROR " << name() 3929 << " CLEANUP_HEAP_SEARCH state" 3930 << " bad HEAP allocation" << std::endl; 3931 exit(0); 3932 } 3933 3934 HeapEntry heap_entry = m_heap.read(r_cleanup_next_ptr.read()); 3935 bool last = (heap_entry.next == r_cleanup_next_ptr.read()); 3936 bool cleanup_inst = r_cleanup_trdid.read() & 0x1; 3937 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 3938 bool match_heap_inst = (heap_entry.owner.inst == cleanup_inst); 3939 bool match_heap = match_heap_srcid && match_heap_inst; 3670 3940 #if L1_MULTI_CACHE 3671 heap_entry.owner.cache_id = r_cleanup_prev_cache_id.read(); 3672 #endif 3673 heap_entry.owner.inst = r_cleanup_prev_inst.read(); 3674 if(last) // this is the last entry of the list of copies 3675 { 3676 heap_entry.next = r_cleanup_prev_ptr.read(); 3677 } 3678 else // this is not the last entry 3679 { 3680 heap_entry.next = r_cleanup_ptr.read(); 3681 } 3682 m_heap.write(r_cleanup_prev_ptr.read(),heap_entry); 3683 r_cleanup_fsm = CLEANUP_HEAP_FREE; 3941 match_heap = match_heap and (heap_entry.owner.cache_id == r_cleanup_pktid.read()); 3942 #endif 3684 3943 3685 3944 #if DEBUG_MEMC_CLEANUP 3686 if( m_debug_cleanup_fsm ) 3687 { 3688 std::cout << " <MEMC " << name() << ".CLEANUP_HEAP_SEARCH> Remove the copy in the linked list" << std::endl; 3689 } 3690 #endif 3691 break; 3692 } 3693 /////////////////////// 3694 case CLEANUP_HEAP_FREE: // The heap entry pointed by r_cleanup_next_ptr is freed 3695 // and becomes the head of the list of free entries 3696 { 3697 if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP ) 3698 { 3699 std::cout << "VCI_MEM_CACHE ERROR " << name() << " CLEANUP_HEAP_CLEAN state" << std::endl; 3700 std::cout << "Bad HEAP allocation" << std::endl; 3701 exit(0); 3702 } 3703 3704 HeapEntry heap_entry; 3705 heap_entry.owner.srcid = 0; 3945 if( m_debug_cleanup_fsm ) 3946 { 3947 std::cout << " <MEMC " << name() << ".CLEANUP_HEAP_SEARCH> Cheks matching:" 3948 << " line = " << r_cleanup_nline.read() * m_words * 4 3949 << " / heap_id = " << heap_entry.owner.srcid 3950 << " / heap_ins = " << heap_entry.owner.inst 3951 << " / search_id = " << r_cleanup_srcid.read() 3952 << " / search_ins = " << (r_cleanup_trdid.read()&0x1) 3953 << " / last = " << last << std::endl; 3954 } 3955 #endif 3956 if(match_heap) // the matching copy must be removed 3957 { 3958 r_cleanup_ptr = heap_entry.next; // reuse ressources 3959 r_cleanup_fsm = CLEANUP_HEAP_CLEAN; 3960 } 3961 else 3962 { 3963 if ( last ) 3964 { 3965 std::cout << "VCI_MEM_CACHE_ERROR " << name() 3966 << " CLEANUP_HEAP_SEARCH state" 3967 << " cleanup hit but copy not found" << std::endl; 3968 exit(0); 3969 } 3970 else // test the next in the linked list 3971 { 3972 r_cleanup_prev_ptr = r_cleanup_next_ptr.read(); 3973 r_cleanup_prev_srcid = heap_entry.owner.srcid; 3706 3974 #if L1_MULTI_CACHE 3707 heap_entry.owner.cache_id = 0; 3708 #endif 3709 heap_entry.owner.inst = false; 3710 3711 if(m_heap.is_full()) heap_entry.next = r_cleanup_next_ptr.read(); 3712 else heap_entry.next = m_heap.next_free_ptr(); 3713 m_heap.write(r_cleanup_next_ptr.read(),heap_entry); 3714 m_heap.write_free_ptr(r_cleanup_next_ptr.read()); 3715 m_heap.unset_full(); 3975 r_cleanup_prev_cache_id = heap_entry.owner.cache_id; 3976 #endif 3977 r_cleanup_prev_inst = heap_entry.owner.inst; 3978 r_cleanup_next_ptr = heap_entry.next; 3979 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 3980 3981 #if DEBUG_MEMC_CLEANUP 3982 if( m_debug_cleanup_fsm ) 3983 { 3984 std::cout << " <MEMC " << name() << ".CLEANUP_HEAP_SEARCH> Matching copy not found, search next:" 3985 << " line = " << r_cleanup_nline.read() * m_words * 4 3986 << " / heap_id = " << heap_entry.owner.srcid 3987 << " / heap_ins = " << heap_entry.owner.inst 3988 << " / search_id = " << r_cleanup_srcid.read() 3989 << " / search_ins = " << (r_cleanup_trdid.read()&0x1) << std::endl; 3990 } 3991 #endif 3992 } 3993 } 3994 break; 3995 } 3996 3997 //////////////////////// 3998 case CLEANUP_HEAP_CLEAN: // remove a copy in the linked list 3999 { 4000 if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP ) 4001 { 4002 std::cout << "VCI_MEM_CACHE ERROR " << name() 4003 << " CLEANUP_HEAP_CLEAN state" 4004 << "Bad HEAP allocation" << std::endl; 4005 exit(0); 4006 } 4007 4008 bool last = (r_cleanup_next_ptr.read() == r_cleanup_ptr.read()); 4009 HeapEntry heap_entry; 4010 heap_entry.owner.srcid = r_cleanup_prev_srcid.read(); 4011 #if L1_MULTI_CACHE 4012 heap_entry.owner.cache_id = r_cleanup_prev_cache_id.read(); 4013 #endif 4014 heap_entry.owner.inst = r_cleanup_prev_inst.read(); 4015 4016 if(last) // this is the last entry of the list of copies 4017 { 4018 heap_entry.next = r_cleanup_prev_ptr.read(); 4019 } 4020 else // this is not the last entry 4021 { 4022 heap_entry.next = r_cleanup_ptr.read(); 4023 } 4024 4025 m_heap.write(r_cleanup_prev_ptr.read(),heap_entry); 4026 r_cleanup_fsm = CLEANUP_HEAP_FREE; 4027 4028 #if DEBUG_MEMC_CLEANUP 4029 if( m_debug_cleanup_fsm ) 4030 { 4031 std::cout << " <MEMC " << name() << ".CLEANUP_HEAP_SEARCH> Remove the copy in the linked list" << std::endl; 4032 } 4033 #endif 4034 break; 4035 } 4036 4037 /////////////////////// 4038 case CLEANUP_HEAP_FREE: 4039 // The heap entry pointed by r_cleanup_next_ptr is freed 4040 // and becomes the head of the list of free entries 4041 { 4042 if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP ) 4043 { 4044 std::cout 4045 << "VCI_MEM_CACHE ERROR " << name() 4046 << " CLEANUP_HEAP_CLEAN state" << std::endl 4047 << "Bad HEAP allocation" << std::endl; 4048 4049 exit(0); 4050 } 4051 4052 HeapEntry heap_entry; 4053 heap_entry.owner.srcid = 0; 4054 #if L1_MULTI_CACHE 4055 heap_entry.owner.cache_id = 0; 4056 #endif 4057 heap_entry.owner.inst = false; 4058 4059 if(m_heap.is_full()) 4060 { 4061 heap_entry.next = r_cleanup_next_ptr.read(); 4062 } 4063 else 4064 { 4065 heap_entry.next = m_heap.next_free_ptr(); 4066 } 4067 4068 m_heap.write(r_cleanup_next_ptr.read(),heap_entry); 4069 m_heap.write_free_ptr(r_cleanup_next_ptr.read()); 4070 m_heap.unset_full(); 4071 4072 r_cleanup_fsm = CLEANUP_RSP; 4073 4074 #if DEBUG_MEMC_CLEANUP 4075 if( m_debug_cleanup_fsm ) 4076 { 4077 std::cout << " <MEMC " << name() << ".CLEANUP_HEAP_SEARCH> Update the list of free entries" << std::endl; 4078 } 4079 #endif 4080 break; 4081 } 4082 4083 ////////////////////// 4084 case CLEANUP_UPT_LOCK: 4085 { 4086 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_CLEANUP ) 4087 { 4088 size_t index = 0; 4089 bool hit_inval; 4090 hit_inval = m_update_tab.search_inval(r_cleanup_nline.read(),index); 4091 4092 if ( !hit_inval ) // no pending inval 4093 { 4094 4095 #if DEBUG_MEMC_CLEANUP 4096 if( m_debug_cleanup_fsm ) 4097 { 4098 std::cout << " <MEMC " << name() << ".CLEANUP_UPT_LOCK> Unexpected cleanup with no corresponding UPT entry:" 4099 << " address = " << std::hex << (r_cleanup_nline.read()*4*m_words) << std::endl; 4100 } 4101 #endif 3716 4102 r_cleanup_fsm = CLEANUP_RSP; 4103 } 4104 else // pending inval 4105 { 4106 r_cleanup_write_srcid = m_update_tab.srcid(index); 4107 r_cleanup_write_trdid = m_update_tab.trdid(index); 4108 r_cleanup_write_pktid = m_update_tab.pktid(index); 4109 r_cleanup_need_rsp = m_update_tab.need_rsp(index); 4110 r_cleanup_fsm = CLEANUP_UPT_WRITE; 4111 } 4112 r_cleanup_index.write(index) ; 4113 } 4114 break; 4115 } 4116 4117 /////////////////////// 4118 case CLEANUP_UPT_WRITE: // decrement response counter 4119 { 4120 size_t count = 0; 4121 m_update_tab.decrement(r_cleanup_index.read(), count); 4122 if ( count == 0 ) 4123 { 4124 m_update_tab.clear(r_cleanup_index.read()); 3717 4125 3718 4126 #if DEBUG_MEMC_CLEANUP 3719 if( m_debug_cleanup_fsm ) 3720 { 3721 std::cout << " <MEMC " << name() << ".CLEANUP_HEAP_SEARCH> Update the list of free entries" << std::endl; 3722 } 3723 #endif 3724 break; 3725 } 3726 ////////////////////// 3727 case CLEANUP_UPT_LOCK: 3728 { 3729 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_CLEANUP ) 3730 { 3731 size_t index = 0; 3732 bool hit_inval; 3733 hit_inval = m_update_tab.search_inval(r_cleanup_nline.read(),index); 3734 3735 if ( !hit_inval ) // no pending inval 3736 { 3737 3738 #if DEBUG_MEMC_CLEANUP 3739 if( m_debug_cleanup_fsm ) 3740 { 3741 std::cout << " <MEMC " << name() << ".CLEANUP_UPT_LOCK> Unexpected cleanup with no corresponding UPT entry:" 3742 << " address = " << std::hex << (r_cleanup_nline.read()*4*m_words) << std::endl; 3743 } 3744 #endif 3745 r_cleanup_fsm = CLEANUP_RSP; 3746 } 3747 else // pending inval 3748 { 3749 r_cleanup_write_srcid = m_update_tab.srcid(index); 3750 r_cleanup_write_trdid = m_update_tab.trdid(index); 3751 r_cleanup_write_pktid = m_update_tab.pktid(index); 3752 r_cleanup_need_rsp = m_update_tab.need_rsp(index); 3753 r_cleanup_fsm = CLEANUP_UPT_WRITE; 3754 } 3755 r_cleanup_index.write(index) ; 3756 } 3757 break; 3758 } 3759 /////////////////////// 3760 case CLEANUP_UPT_WRITE: // decrement response counter 3761 { 3762 size_t count = 0; 3763 m_update_tab.decrement(r_cleanup_index.read(), count); 3764 if ( count == 0 ) 3765 { 3766 m_update_tab.clear(r_cleanup_index.read()); 3767 3768 #if DEBUG_MEMC_CLEANUP 3769 if( m_debug_cleanup_fsm ) 3770 { 3771 std::cout << " <MEMC " << name() << ".CLEANUP_UPT_WRITE> Decrement response counter in UPT:" 4127 if( m_debug_cleanup_fsm ) 4128 { 4129 std::cout << " <MEMC " << name() << ".CLEANUP_UPT_WRITE> Decrement response counter in UPT:" 3772 4130 << " UPT_index = " << r_cleanup_index.read() 3773 4131 << " rsp_count = " << count << std::endl; 3774 } 3775 #endif 3776 if( r_cleanup_need_rsp.read() ) r_cleanup_fsm = CLEANUP_WRITE_RSP ; 3777 else r_cleanup_fsm = CLEANUP_RSP; 3778 } 3779 else 3780 { 3781 r_cleanup_fsm = CLEANUP_RSP ; 3782 } 3783 break; 3784 } 3785 /////////////////////// 3786 case CLEANUP_WRITE_RSP: // Response to a previous write on the direct network 3787 { 3788 if( !r_cleanup_to_tgt_rsp_req.read() ) 3789 { 3790 r_cleanup_to_tgt_rsp_req = true; 3791 r_cleanup_to_tgt_rsp_srcid = r_cleanup_write_srcid.read(); 3792 r_cleanup_to_tgt_rsp_trdid = r_cleanup_write_trdid.read(); 3793 r_cleanup_to_tgt_rsp_pktid = r_cleanup_write_pktid.read(); 3794 r_cleanup_fsm = CLEANUP_RSP; 4132 } 4133 #endif 4134 if( r_cleanup_need_rsp.read() ) r_cleanup_fsm = CLEANUP_WRITE_RSP ; 4135 else r_cleanup_fsm = CLEANUP_RSP; 4136 } 4137 else 4138 { 4139 r_cleanup_fsm = CLEANUP_RSP ; 4140 } 4141 break; 4142 } 4143 4144 /////////////////////// 4145 case CLEANUP_WRITE_RSP: // Response to a previous write on the direct network 4146 { 4147 if( !r_cleanup_to_tgt_rsp_req.read() ) 4148 { 4149 r_cleanup_to_tgt_rsp_req = true; 4150 r_cleanup_to_tgt_rsp_srcid = r_cleanup_write_srcid.read(); 4151 r_cleanup_to_tgt_rsp_trdid = r_cleanup_write_trdid.read(); 4152 r_cleanup_to_tgt_rsp_pktid = r_cleanup_write_pktid.read(); 4153 r_cleanup_fsm = CLEANUP_RSP; 3795 4154 3796 4155 #if DEBUG_MEMC_CLEANUP 3797 if( m_debug_cleanup_fsm )3798 {3799 std::cout << " <MEMC " << name() << ".CLEANUP_WRITE_RSP> Send a response to a cleanup request:"4156 if( m_debug_cleanup_fsm ) 4157 { 4158 std::cout << " <MEMC " << name() << ".CLEANUP_WRITE_RSP> Send a response to a cleanup request:" 3800 4159 << " rsrcid = " << std::dec << r_cleanup_write_srcid.read() 3801 4160 << " / rtrdid = " << std::dec << r_cleanup_write_trdid.read() << std::endl; 3802 } 3803 #endif 3804 } 3805 break; 3806 } 3807 ///////////////// 3808 case CLEANUP_RSP: // Response to a cleanup on the coherence network 3809 { 3810 if ( p_vci_tgt_cleanup.rspack.read() ) 3811 { 3812 r_cleanup_fsm = CLEANUP_IDLE; 4161 } 4162 #endif 4163 } 4164 break; 4165 } 4166 4167 ///////////////// 4168 case CLEANUP_RSP: // Response to a cleanup on the coherence network 4169 { 4170 if ( p_vci_tgt_cleanup.rspack.read() ) 4171 { 4172 r_cleanup_fsm = CLEANUP_IDLE; 3813 4173 3814 4174 #if DEBUG_MEMC_CLEANUP 3815 if( m_debug_cleanup_fsm )3816 {3817 std::cout << " <MEMC " << name() << ".CLEANUP_RSP> Send the response to a cleanup request:"4175 if( m_debug_cleanup_fsm ) 4176 { 4177 std::cout << " <MEMC " << name() << ".CLEANUP_RSP> Send the response to a cleanup request:" 3818 4178 << " rsrcid = " << std::dec << r_cleanup_write_srcid.read() 3819 4179 << " / rtrdid = " << r_cleanup_write_trdid.read() << std::endl; 3820 }3821 #endif 3822 3823 3824 4180 } 4181 #endif 4182 } 4183 break; 4184 } 3825 4185 } // end switch cleanup fsm 3826 4186 3827 4187 //////////////////////////////////////////////////////////////////////////////////// 3828 // 4188 // SC FSM 3829 4189 //////////////////////////////////////////////////////////////////////////////////// 3830 // The SC FSM handles the SC (Store Conditionnal) atomic commands, 4190 // The SC FSM handles the SC (Store Conditionnal) atomic commands, 3831 4191 // that are handled as "compare-and-swap instructions. 3832 // 3833 // This command contains two or four flits: 4192 // 4193 // This command contains two or four flits: 3834 4194 // - In case of 32 bits atomic access, the first flit contains the value read 3835 4195 // by a previous LL instruction, the second flit contains the value to be writen. 3836 4196 // - In case of 64 bits atomic access, the 2 first flits contains the value read 3837 4197 // by a previous LL instruction, the 2 next flits contains the value to be writen. 3838 // 4198 // 3839 4199 // The target address is cachable. If it is replicated in other L1 caches 3840 4200 // than the writer, a coherence operation is done. 3841 // 4201 // 3842 4202 // It access the directory to check hit / miss. 3843 4203 // - In case of miss, the SC FSM must register a GET transaction in TRT. 3844 // If a read transaction to the XRAM for this line already exists, 4204 // If a read transaction to the XRAM for this line already exists, 3845 4205 // or if the transaction table is full, it goes to the WAIT state 3846 4206 // to release the locks and try again. When the GET transaction has been … … 3850 4210 /////////////////////////////////////////////////////////////////////////////////// 3851 4211 3852 switch ( r_sc_fsm.read() ) 4212 switch ( r_sc_fsm.read() ) 3853 4213 { 3854 4214 ///////////// 3855 case SC_IDLE: 3856 { 3857 if( m_cmd_sc_addr_fifo.rok() ) 4215 case SC_IDLE: // fill the local rdata buffers 4216 { 4217 if( m_cmd_sc_addr_fifo.rok() ) 3858 4218 { 3859 4219 … … 3862 4222 { 3863 4223 std::cout << " <MEMC " << name() << ".SC_IDLE> SC command: " << std::hex 3864 << " srcid = " << std::dec << m_cmd_sc_srcid_fifo.read() 3865 << " addr = " << std::hex << m_cmd_sc_addr_fifo.read() 4224 << " srcid = " << std::dec << m_cmd_sc_srcid_fifo.read() 4225 << " addr = " << std::hex << m_cmd_sc_addr_fifo.read() 3866 4226 << " wdata = " << m_cmd_sc_wdata_fifo.read() 3867 4227 << " eop = " << std::dec << m_cmd_sc_eop_fifo.read() … … 3872 4232 { 3873 4233 m_cpt_sc++; 3874 r_sc_fsm = SC_DIR_ LOCK;3875 } 4234 r_sc_fsm = SC_DIR_REQ; 4235 } 3876 4236 else // we keep the last word in the FIFO 3877 4237 { … … 3887 4247 if( r_sc_cpt.read()>3 ) // more than 4 flits... 3888 4248 { 3889 std::cout << "VCI_MEM_CACHE ERROR in SC_IDLE state : illegal SC command" 4249 std::cout << "VCI_MEM_CACHE ERROR in SC_IDLE state : illegal SC command" 3890 4250 << std::endl; 3891 4251 exit(0); … … 3896 4256 3897 4257 r_sc_cpt = r_sc_cpt.read()+1; 3898 } 4258 } 3899 4259 break; 3900 4260 } 4261 4262 ///////////////// 4263 case SC_DIR_REQ: 4264 { 4265 if( r_alloc_dir_fsm.read() == ALLOC_DIR_SC ) 4266 { 4267 r_sc_fsm = SC_DIR_LOCK; 4268 } 4269 4270 #if DEBUG_MEMC_SC 4271 if( m_debug_sc_fsm ) 4272 { 4273 std::cout 4274 << " <MEMC " << name() << ".SC_DIR_REQ> Requesting DIR lock " 4275 << std::endl; 4276 } 4277 #endif 4278 break; 4279 } 4280 3901 4281 ///////////////// 3902 4282 case SC_DIR_LOCK: // Read the directory 3903 4283 { 3904 if( r_alloc_dir_fsm.read() == ALLOC_DIR_SC ) 4284 if( r_alloc_dir_fsm.read() == ALLOC_DIR_SC ) 3905 4285 { 3906 4286 size_t way = 0; … … 3919 4299 r_sc_count = entry.count; 3920 4300 3921 if ( entry.valid ) r_sc_fsm = SC_DIR_HIT_READ;3922 else 4301 if ( entry.valid ) r_sc_fsm = SC_DIR_HIT_READ; 4302 else r_sc_fsm = SC_MISS_TRT_LOCK; 3923 4303 3924 4304 #if DEBUG_MEMC_SC … … 3927 4307 std::cout << " <MEMC " << name() << ".SC_DIR_LOCK> Directory acces" 3928 4308 << " / address = " << std::hex << m_cmd_sc_addr_fifo.read() 3929 << " / hit = " << std::dec << entry.valid 4309 << " / hit = " << std::dec << entry.valid 3930 4310 << " / count = " << entry.count 3931 4311 << " / is_cnt = " << entry.is_cnt << std::endl; … … 3933 4313 #endif 3934 4314 } 4315 else 4316 { 4317 std::cout 4318 << "VCI_MEM_CACHE ERROR " << name() 4319 << " SC_DIR_LOCK state" << std::endl 4320 << "Bad DIR allocation" << std::endl; 4321 4322 exit(0); 4323 } 4324 3935 4325 break; 3936 4326 } … … 3939 4329 // and check data change in cache 3940 4330 { 3941 size_t way 3942 size_t set = m_y[(vci_addr_t)(m_cmd_sc_addr_fifo.read())];3943 size_t word = m_x[(vci_addr_t)(m_cmd_sc_addr_fifo.read())];4331 size_t way = r_sc_way.read(); 4332 size_t set = m_y[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 4333 size_t word = m_x[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 3944 4334 3945 4335 // update directory (lock & dirty bits) 3946 4336 DirectoryEntry entry; 3947 entry.valid 4337 entry.valid = true; 3948 4338 entry.is_cnt = r_sc_is_cnt.read(); 3949 entry.dirty 3950 entry.lock 3951 entry.tag 4339 entry.dirty = true; 4340 entry.lock = true; 4341 entry.tag = r_sc_tag.read(); 3952 4342 entry.owner.srcid = r_sc_copy.read(); 3953 4343 #if L1_MULTI_CACHE … … 3965 4355 ok &= ( r_sc_rdata[1] == m_cache_data[way][set][word+1] ); 3966 4356 3967 // to avoid livelock, force the atomic access to fail pseudo-randomly 4357 // to avoid livelock, force the atomic access to fail pseudo-randomly 3968 4358 bool forced_fail = ( (r_sc_lfsr % (64) == 0) && RANDOMIZE_SC ); 3969 4359 r_sc_lfsr = (r_sc_lfsr >> 1) ^ ((-(r_sc_lfsr & 1)) & 0xd0000001); 3970 4360 3971 if( ok and not forced_fail ) 4361 if( ok and not forced_fail ) // no data change 3972 4362 { 3973 4363 r_sc_fsm = SC_DIR_HIT_WRITE; … … 3976 4366 { 3977 4367 r_sc_fsm = SC_RSP_FAIL; 3978 } 4368 } 3979 4369 3980 4370 #if DEBUG_MEMC_SC 3981 4371 if( m_debug_sc_fsm ) 3982 4372 { 3983 std::cout << " <MEMC " << name() << ".SC_DIR_HIT_READ> Test if SC success:" 4373 std::cout << " <MEMC " << name() << ".SC_DIR_HIT_READ> Test if SC success:" 3984 4374 << " / expected value = " << r_sc_rdata[0].read() 3985 4375 << " / actual value = " << m_cache_data[way][set][word] … … 3990 4380 } 3991 4381 ////////////////////// 3992 case SC_DIR_HIT_WRITE: 4382 case SC_DIR_HIT_WRITE: // test if a CC transaction is required 3993 4383 // write data in cache if no CC request 3994 4384 { 3995 4385 // test coherence request 3996 if(r_sc_count.read()) // replicated line 4386 if(r_sc_count.read()) // replicated line 3997 4387 { 3998 4388 if ( r_sc_is_cnt.read() ) 3999 4389 { 4000 r_sc_fsm = SC_BC_TRT_LOCK; 4390 r_sc_fsm = SC_BC_TRT_LOCK; // broadcast invalidate required 4001 4391 } 4002 else if( !r_sc_to_init_cmd_multi_req.read() && 4392 else if( !r_sc_to_init_cmd_multi_req.read() && 4003 4393 !r_sc_to_init_cmd_brdcast_req.read() ) 4004 4394 { 4005 r_sc_fsm = SC_UPT_LOCK; 4395 r_sc_fsm = SC_UPT_LOCK; // multi update required 4006 4396 } 4007 4397 else … … 4009 4399 r_sc_fsm = SC_WAIT; 4010 4400 } 4011 } 4012 else // no copies 4013 { 4014 size_t way 4015 size_t set = m_y[(vci_addr_t)(m_cmd_sc_addr_fifo.read())];4016 size_t word = m_x[(vci_addr_t)(m_cmd_sc_addr_fifo.read())];4401 } 4402 else // no copies 4403 { 4404 size_t way = r_sc_way.read(); 4405 size_t set = m_y[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 4406 size_t word = m_x[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 4017 4407 4018 4408 // cache update … … 4022 4412 4023 4413 // monitor 4024 if ( m_monitor_ok ) 4414 if ( m_monitor_ok ) 4025 4415 { 4026 4416 vci_addr_t address = m_cmd_sc_addr_fifo.read(); 4027 4028 4417 char buf[80]; 4418 snprintf(buf, 80, "SC_DIR_HIT_WRITE srcid %d", m_cmd_sc_srcid_fifo.read()); 4029 4419 check_monitor( buf, address, r_sc_wdata.read() ); 4030 4420 if ( r_sc_cpt.read()==4 ) … … 4036 4426 if( m_debug_sc_fsm ) 4037 4427 { 4038 std::cout << " <MEMC " << name() << ".SC_DIR_HIT_WRITE> Update cache:" 4428 std::cout << " <MEMC " << name() << ".SC_DIR_HIT_WRITE> Update cache:" 4039 4429 << " way = " << std::dec << way 4040 4430 << " / set = " << set … … 4052 4442 // releases locks to retry later if UPT full 4053 4443 { 4054 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_SC ) 4444 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_SC ) 4055 4445 { 4056 4446 bool wok = false; … … 4059 4449 size_t trdid = m_cmd_sc_trdid_fifo.read(); 4060 4450 size_t pktid = m_cmd_sc_pktid_fifo.read(); 4061 addr_t 4451 addr_t nline = m_nline[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 4062 4452 size_t nb_copies = r_sc_count.read(); 4063 4453 4064 wok = m_update_tab.set(true, 4454 wok = m_update_tab.set(true, // it's an update transaction 4065 4455 false, // it's not a broadcast 4066 4456 true, // it needs a response … … 4074 4464 { 4075 4465 // cache update 4076 size_t way 4077 size_t set = m_y[(vci_addr_t)(m_cmd_sc_addr_fifo.read())];4078 size_t word = m_x[(vci_addr_t)(m_cmd_sc_addr_fifo.read())];4466 size_t way = r_sc_way.read(); 4467 size_t set = m_y[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 4468 size_t word = m_x[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 4079 4469 4080 4470 m_cache_data[way][set][word] = r_sc_wdata.read(); … … 4083 4473 4084 4474 // monitor 4085 if ( m_monitor_ok ) 4086 { 4475 if ( m_monitor_ok ) 4476 { 4087 4477 vci_addr_t address = m_cmd_sc_addr_fifo.read(); 4088 4089 4478 char buf[80]; 4479 snprintf(buf, 80, "SC_DIR_HIT_WRITE srcid %d", m_cmd_sc_srcid_fifo.read()); 4090 4480 check_monitor( buf, address, r_sc_wdata.read() ); 4091 4481 if ( r_sc_cpt.read()==4 ) … … 4104 4494 if( m_debug_sc_fsm ) 4105 4495 { 4106 std::cout << " <MEMC " << name() << ".SC_UPT_LOCK> Register multi-update transaction in UPT" 4496 std::cout << " <MEMC " << name() << ".SC_UPT_LOCK> Register multi-update transaction in UPT" 4107 4497 << " / wok = " << wok 4108 << " / nline = " << std::hex << nline 4498 << " / nline = " << std::hex << nline 4109 4499 << " / count = " << nb_copies << std::endl; 4110 4500 } … … 4114 4504 } 4115 4505 ///////////// 4116 case SC_WAIT: 4506 case SC_WAIT: // release all locks and retry from beginning 4117 4507 { 4118 4508 … … 4123 4513 } 4124 4514 #endif 4125 r_sc_fsm = SC_DIR_ LOCK;4515 r_sc_fsm = SC_DIR_REQ; 4126 4516 break; 4127 4517 } 4128 4518 ////////////////// 4129 case SC_UPT_HEAP_LOCK: 4130 { 4131 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_SC ) 4519 case SC_UPT_HEAP_LOCK: // lock the heap 4520 { 4521 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_SC ) 4132 4522 { 4133 4523 … … 4143 4533 } 4144 4534 //////////////// 4145 case SC_UPT_REQ: 4535 case SC_UPT_REQ: // send a first update request to INIT_CMD FSM 4146 4536 { 4147 4537 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_SC) and … … 4160 4550 r_sc_to_init_cmd_is_long = true; 4161 4551 r_sc_to_init_cmd_wdata_high = m_cmd_sc_wdata_fifo.read(); 4162 } 4163 else 4552 } 4553 else 4164 4554 { 4165 4555 r_sc_to_init_cmd_is_long = false; … … 4181 4571 r_sc_to_init_cmd_multi_req = true; 4182 4572 r_sc_cpt = 0; 4183 } 4184 else 4573 } 4574 else // several copies 4185 4575 { 4186 4576 r_sc_fsm = SC_UPT_NEXT; … … 4190 4580 if( m_debug_sc_fsm ) 4191 4581 { 4192 std::cout << " <MEMC " << name() << ".SC_UPT_REQ> Send the first update request to INIT_CMD FSM " 4582 std::cout << " <MEMC " << name() << ".SC_UPT_REQ> Send the first update request to INIT_CMD FSM " 4193 4583 << " / address = " << std::hex << m_cmd_sc_addr_fifo.read() 4194 4584 << " / wdata = " << std::hex << r_sc_wdata.read() 4195 << " / srcid = " << std::dec << r_sc_copy.read() 4585 << " / srcid = " << std::dec << r_sc_copy.read() 4196 4586 << " / inst = " << std::dec << r_sc_copy_inst.read() << std::endl; 4197 4587 } … … 4201 4591 } 4202 4592 ///////////////// 4203 case SC_UPT_NEXT: 4593 case SC_UPT_NEXT: // send a multi-update request to INIT_CMD FSM 4204 4594 { 4205 4595 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_SC) … … 4224 4614 cmd_sc_fifo_get = true; 4225 4615 r_sc_cpt = 0; 4226 } 4227 } 4616 } 4617 } 4228 4618 4229 4619 #if DEBUG_MEMC_SC 4230 4620 if( m_debug_sc_fsm ) 4231 4621 { 4232 std::cout << " <MEMC " << name() << ".SC_UPT_NEXT> Send the next update request to INIT_CMD FSM " 4622 std::cout << " <MEMC " << name() << ".SC_UPT_NEXT> Send the next update request to INIT_CMD FSM " 4233 4623 << " / address = " << std::hex << m_cmd_sc_addr_fifo.read() 4234 4624 << " / wdata = " << std::hex << r_sc_wdata.read() … … 4240 4630 } 4241 4631 ///////////////////// 4242 case SC_BC_TRT_LOCK: // check the TRT to register a PUT transaction4243 { 4244 if( r_alloc_trt_fsm.read() == ALLOC_TRT_SC ) 4632 case SC_BC_TRT_LOCK: // check the TRT to register a PUT transaction 4633 { 4634 if( r_alloc_trt_fsm.read() == ALLOC_TRT_SC ) 4245 4635 { 4246 4636 if( !r_sc_to_ixr_cmd_req ) // we can transfer the request to IXR_CMD FSM 4247 4637 { 4248 4638 // fill the data buffer 4249 size_t way 4250 size_t set 4639 size_t way = r_sc_way.read(); 4640 size_t set = m_y[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 4251 4641 size_t word = m_x[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 4252 4642 for(size_t i = 0; i<m_words; i++) 4253 4643 { 4254 if (i == word) 4644 if (i == word) 4255 4645 { 4256 4646 r_sc_to_ixr_cmd_data[i] = r_sc_wdata.read(); 4257 } 4647 } 4258 4648 else if ( (i == word+1) && (r_sc_cpt.read()==4) ) // 64 bit SC 4259 4649 { 4260 4650 r_sc_to_ixr_cmd_data[i] = m_cmd_sc_wdata_fifo.read(); 4261 } 4262 else 4651 } 4652 else 4263 4653 { 4264 4654 r_sc_to_ixr_cmd_data[i] = m_cache_data[way][set][i]; … … 4267 4657 size_t wok_index = 0; 4268 4658 bool wok = !m_transaction_tab.full(wok_index); 4269 if ( wok ) 4659 if ( wok ) 4270 4660 { 4271 4661 r_sc_trt_index = wok_index; 4272 r_sc_fsm = SC_BC_UPT_LOCK; 4273 } 4274 else 4662 r_sc_fsm = SC_BC_UPT_LOCK; 4663 } 4664 else 4275 4665 { 4276 4666 r_sc_fsm = SC_WAIT; 4277 4667 } 4278 } 4279 else 4668 } 4669 else 4280 4670 { 4281 4671 r_sc_fsm = SC_WAIT; … … 4288 4678 // write data in cache in case of successful registration 4289 4679 { 4290 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_SC ) 4680 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_SC ) 4291 4681 { 4292 4682 bool wok = false; … … 4295 4685 size_t trdid = m_cmd_sc_trdid_fifo.read(); 4296 4686 size_t pktid = m_cmd_sc_pktid_fifo.read(); 4297 addr_t 4687 addr_t nline = m_nline[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 4298 4688 size_t nb_copies = r_sc_count.read(); 4299 4689 4300 4690 // register a broadcast inval transaction in UPT 4301 wok = m_update_tab.set(false, 4691 wok = m_update_tab.set(false, // it's an inval transaction 4302 4692 true, // it's a broadcast 4303 4693 true, // it needs a response … … 4308 4698 nb_copies, 4309 4699 index); 4310 4311 if ( wok ) 4700 4701 if ( wok ) // UPT not full 4312 4702 { 4313 4703 // cache update 4314 size_t way 4315 size_t set = m_y[(vci_addr_t)(m_cmd_sc_addr_fifo.read())];4316 size_t word = m_x[(vci_addr_t)(m_cmd_sc_addr_fifo.read())];4704 size_t way = r_sc_way.read(); 4705 size_t set = m_y[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 4706 size_t word = m_x[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 4317 4707 4318 4708 m_cache_data[way][set][word] = r_sc_wdata.read(); … … 4321 4711 4322 4712 // monitor 4323 if ( m_monitor_ok ) 4324 { 4713 if ( m_monitor_ok ) 4714 { 4325 4715 vci_addr_t address = m_cmd_sc_addr_fifo.read(); 4326 4327 4716 char buf[80]; 4717 snprintf(buf, 80, "SC_DIR_HIT_WRITE srcid %d", m_cmd_sc_srcid_fifo.read()); 4328 4718 check_monitor( buf, address, r_sc_wdata.read() ); 4329 4719 if ( r_sc_cpt.read()==4 ) … … 4336 4726 { 4337 4727 std::cout << " <MEMC " << name() << ".SC_BC_UPT_LOCK> Register a broadcast inval transaction in UPT" 4338 << " / nline = " << nline 4339 << " / count = " << nb_copies 4340 << " / upt_index = " << index << std::endl; 4728 << " / nline = " << nline 4729 << " / count = " << nb_copies 4730 << " / upt_index = " << index << std::endl; 4341 4731 } 4342 4732 #endif … … 4358 4748 // set TRT 4359 4749 m_transaction_tab.set(r_sc_trt_index.read(), 4360 false, 4750 false, // PUT request to XRAM 4361 4751 m_nline[(vci_addr_t)(m_cmd_sc_addr_fifo.read())], 4362 4752 0, 4363 4753 0, 4364 4754 0, 4365 false, 4366 0, 4755 false, // not a processor read 4756 0, 4367 4757 0, 4368 4758 std::vector<be_t>(m_words,0), … … 4371 4761 // invalidate directory entry 4372 4762 DirectoryEntry entry; 4373 entry.valid 4374 entry.dirty 4375 entry.tag 4763 entry.valid = false; 4764 entry.dirty = false; 4765 entry.tag = 0; 4376 4766 entry.is_cnt = false; 4377 entry.lock 4767 entry.lock = false; 4378 4768 entry.count = 0; 4379 4769 entry.owner.srcid = 0; … … 4383 4773 entry.owner.inst = false; 4384 4774 entry.ptr = 0; 4385 size_t set 4386 size_t way 4775 size_t set = m_y[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 4776 size_t way = r_sc_way.read(); 4387 4777 m_cache_directory.write(set, way, entry); 4388 4778 … … 4397 4787 } 4398 4788 #endif 4399 } 4400 else 4789 } 4790 else 4401 4791 { 4402 4792 assert(false and "LOCK ERROR in SC_FSM, STATE = SC_BC_DIR_INVAL"); … … 4408 4798 { 4409 4799 if ( !r_sc_to_init_cmd_multi_req.read() && 4410 !r_sc_to_init_cmd_brdcast_req.read()) 4800 !r_sc_to_init_cmd_brdcast_req.read()) 4411 4801 { 4412 4802 r_sc_to_init_cmd_multi_req = false; … … 4424 4814 case SC_BC_XRAM_REQ: // request the IXR FSM to start a put transaction 4425 4815 { 4426 if ( !r_sc_to_ixr_cmd_req ) 4816 if ( !r_sc_to_ixr_cmd_req ) 4427 4817 { 4428 4818 r_sc_to_ixr_cmd_req = true; … … 4442 4832 } 4443 4833 #endif 4444 } 4445 else 4834 } 4835 else 4446 4836 { 4447 4837 std::cout << "MEM_CACHE, SC_BC_XRAM_REQ state : request should not have been previously set" … … 4453 4843 case SC_RSP_FAIL: // request TGT_RSP FSM to send a failure response 4454 4844 { 4455 if( !r_sc_to_tgt_rsp_req ) 4456 { 4457 cmd_sc_fifo_get 4845 if( !r_sc_to_tgt_rsp_req ) 4846 { 4847 cmd_sc_fifo_get = true; 4458 4848 r_sc_cpt = 0; 4459 r_sc_to_tgt_rsp_req 4460 r_sc_to_tgt_rsp_data 4461 r_sc_to_tgt_rsp_srcid 4462 r_sc_to_tgt_rsp_trdid 4463 r_sc_to_tgt_rsp_pktid 4849 r_sc_to_tgt_rsp_req = true; 4850 r_sc_to_tgt_rsp_data = 1; 4851 r_sc_to_tgt_rsp_srcid = m_cmd_sc_srcid_fifo.read(); 4852 r_sc_to_tgt_rsp_trdid = m_cmd_sc_trdid_fifo.read(); 4853 r_sc_to_tgt_rsp_pktid = m_cmd_sc_pktid_fifo.read(); 4464 4854 r_sc_fsm = SC_IDLE; 4465 4855 … … 4476 4866 case SC_RSP_SUCCESS: // request TGT_RSP FSM to send a success response 4477 4867 { 4478 if( !r_sc_to_tgt_rsp_req ) 4868 if( !r_sc_to_tgt_rsp_req ) 4479 4869 { 4480 4870 cmd_sc_fifo_get = true; 4481 4871 r_sc_cpt = 0; 4482 r_sc_to_tgt_rsp_req 4483 r_sc_to_tgt_rsp_data 4484 r_sc_to_tgt_rsp_srcid 4485 r_sc_to_tgt_rsp_trdid 4486 r_sc_to_tgt_rsp_pktid 4872 r_sc_to_tgt_rsp_req = true; 4873 r_sc_to_tgt_rsp_data = 0; 4874 r_sc_to_tgt_rsp_srcid = m_cmd_sc_srcid_fifo.read(); 4875 r_sc_to_tgt_rsp_trdid = m_cmd_sc_trdid_fifo.read(); 4876 r_sc_to_tgt_rsp_pktid = m_cmd_sc_pktid_fifo.read(); 4487 4877 r_sc_fsm = SC_IDLE; 4488 4878 … … 4499 4889 case SC_MISS_TRT_LOCK: // cache miss : request access to transaction Table 4500 4890 { 4501 if( r_alloc_trt_fsm.read() == ALLOC_TRT_SC ) 4891 if( r_alloc_trt_fsm.read() == ALLOC_TRT_SC ) 4502 4892 { 4503 4893 size_t index = 0; … … 4511 4901 if( m_debug_sc_fsm ) 4512 4902 { 4513 std::cout << " <MEMC " << name() << ".SC_MISS_TRT_LOCK> Check TRT state" 4903 std::cout << " <MEMC " << name() << ".SC_MISS_TRT_LOCK> Check TRT state" 4514 4904 << " / hit_read = " << hit_read 4515 4905 << " / hit_write = " << hit_write 4516 << " / wok = " << wok 4906 << " / wok = " << wok 4517 4907 << " / index = " << index << std::endl; 4518 4908 } … … 4522 4912 { 4523 4913 r_sc_fsm = SC_WAIT; 4524 } 4525 else 4914 } 4915 else 4526 4916 { 4527 4917 r_sc_trt_index = index; … … 4532 4922 } 4533 4923 //////////////////// 4534 case SC_MISS_TRT_SET: 4535 { 4536 if( r_alloc_trt_fsm.read() == ALLOC_TRT_SC ) 4924 case SC_MISS_TRT_SET: // register the GET transaction in TRT 4925 { 4926 if( r_alloc_trt_fsm.read() == ALLOC_TRT_SC ) 4537 4927 { 4538 4928 std::vector<be_t> be_vector; … … 4540 4930 be_vector.clear(); 4541 4931 data_vector.clear(); 4542 for ( size_t i=0; i<m_words; i++ ) 4543 { 4932 for ( size_t i=0; i<m_words; i++ ) 4933 { 4544 4934 be_vector.push_back(0); 4545 4935 data_vector.push_back(0); … … 4547 4937 4548 4938 m_transaction_tab.set(r_sc_trt_index.read(), 4549 true, // read request4939 true, // read request 4550 4940 m_nline[(vci_addr_t)m_cmd_sc_addr_fifo.read()], 4551 4941 m_cmd_sc_srcid_fifo.read(), 4552 4942 m_cmd_sc_trdid_fifo.read(), 4553 4943 m_cmd_sc_pktid_fifo.read(), 4554 false, 4944 false, // write request from processor 4555 4945 0, 4556 4946 0, 4557 4947 be_vector, 4558 4948 data_vector); 4559 r_sc_fsm = SC_MISS_XRAM_REQ; 4949 r_sc_fsm = SC_MISS_XRAM_REQ; 4560 4950 4561 4951 #if DEBUG_MEMC_SC … … 4563 4953 { 4564 4954 std::cout << " <MEMC " << name() << ".SC_MISS_TRT_SET> Register a GET transaction in TRT" << std::hex 4565 << " / nline = " << m_nline[(vci_addr_t)m_cmd_sc_addr_fifo.read()] 4955 << " / nline = " << m_nline[(vci_addr_t)m_cmd_sc_addr_fifo.read()] 4566 4956 << " / trt_index = " << r_sc_trt_index.read() << std::endl; 4567 4957 } … … 4571 4961 } 4572 4962 ////////////////////// 4573 case SC_MISS_XRAM_REQ: 4574 { 4575 if ( !r_sc_to_ixr_cmd_req ) 4963 case SC_MISS_XRAM_REQ: // request the IXR_CMD FSM to fetch the missing line 4964 { 4965 if ( !r_sc_to_ixr_cmd_req ) 4576 4966 { 4577 4967 r_sc_to_ixr_cmd_req = true; … … 4596 4986 4597 4987 ////////////////////////////////////////////////////////////////////////////// 4598 // 4988 // INIT_CMD FSM 4599 4989 ////////////////////////////////////////////////////////////////////////////// 4600 4990 // The INIT_CMD fsm controls the VCI CMD initiator port on the coherence … … 4603 4993 // It implements a round-robin priority between the three possible client FSMs 4604 4994 // XRAM_RSP, WRITE and SC. Each FSM can request two types of services: 4605 // - r_xram_rsp_to_init_cmd_multi_req : multi-inval 4606 // r_xram_rsp_to_init_cmd_brdcast_req : broadcast-inval 4607 // - r_write_to_init_cmd_multi_req : multi-update 4608 // r_write_to_init_cmd_brdcast_req : broadcast-inval 4609 // - r_sc_to_init_cmd_multi_req : multi-update 4995 // - r_xram_rsp_to_init_cmd_multi_req : multi-inval 4996 // r_xram_rsp_to_init_cmd_brdcast_req : broadcast-inval 4997 // - r_write_to_init_cmd_multi_req : multi-update 4998 // r_write_to_init_cmd_brdcast_req : broadcast-inval 4999 // - r_sc_to_init_cmd_multi_req : multi-update 4610 5000 // r_sc_to_init_cmd_brdcast_req : broadcast-inval 4611 5001 // 4612 // An inval request is a single cell VCI write command containing the 5002 // An inval request is a single cell VCI write command containing the 4613 5003 // index of the line to be invalidated. 4614 // An update request is a multi-cells VCI write command : The first cell 4615 // contains the index of the cache line to be updated. The second cell contains 5004 // An update request is a multi-cells VCI write command : The first cell 5005 // contains the index of the cache line to be updated. The second cell contains 4616 5006 // the index of the first modified word in the line. The following cells 4617 5007 // contain the data. 4618 5008 /////////////////////////////////////////////////////////////////////////////// 4619 5009 4620 switch ( r_init_cmd_fsm.read() ) 5010 switch ( r_init_cmd_fsm.read() ) 4621 5011 { 4622 //////////////////////// 4623 case INIT_CMD_UPDT_IDLE: 5012 //////////////////////// 5013 case INIT_CMD_UPDT_IDLE: // XRAM_RSP FSM has highest priority 4624 5014 { 4625 5015 if ( m_xram_rsp_to_init_cmd_inst_fifo.rok() || 4626 r_xram_rsp_to_init_cmd_multi_req.read() ) 5016 r_xram_rsp_to_init_cmd_multi_req.read() ) 4627 5017 { 4628 5018 r_init_cmd_fsm = INIT_CMD_INVAL_NLINE; 4629 5019 m_cpt_inval++; 4630 } 4631 else if ( r_xram_rsp_to_init_cmd_brdcast_req.read() ) 5020 } 5021 else if ( r_xram_rsp_to_init_cmd_brdcast_req.read() ) 4632 5022 { 4633 5023 r_init_cmd_fsm = INIT_CMD_XRAM_BRDCAST; 4634 5024 m_cpt_inval++; 4635 } 5025 } 4636 5026 else if ( m_write_to_init_cmd_inst_fifo.rok() || 4637 r_write_to_init_cmd_multi_req.read() ) 5027 r_write_to_init_cmd_multi_req.read() ) 4638 5028 { 4639 5029 r_init_cmd_fsm = INIT_CMD_UPDT_NLINE; 4640 5030 m_cpt_update++; 4641 } 5031 } 4642 5032 else if ( r_write_to_init_cmd_brdcast_req.read() ) 4643 5033 { 4644 5034 r_init_cmd_fsm = INIT_CMD_WRITE_BRDCAST; 4645 5035 m_cpt_inval++; 4646 } 5036 } 4647 5037 else if ( m_sc_to_init_cmd_inst_fifo.rok() || 4648 r_sc_to_init_cmd_multi_req.read() ) 5038 r_sc_to_init_cmd_multi_req.read() ) 4649 5039 { 4650 5040 r_init_cmd_fsm = INIT_CMD_SC_UPDT_NLINE; 4651 5041 m_cpt_update++; 4652 } 5042 } 4653 5043 else if( r_sc_to_init_cmd_brdcast_req.read() ) 4654 5044 { … … 4659 5049 } 4660 5050 ///////////////////////// 4661 case INIT_CMD_INVAL_IDLE: 5051 case INIT_CMD_INVAL_IDLE: // WRITE FSM has highest priority 4662 5052 { 4663 5053 if ( m_write_to_init_cmd_inst_fifo.rok() || 4664 r_write_to_init_cmd_multi_req.read() ) 5054 r_write_to_init_cmd_multi_req.read() ) 4665 5055 { 4666 5056 r_init_cmd_fsm = INIT_CMD_UPDT_NLINE; 4667 5057 m_cpt_update++; 4668 } 5058 } 4669 5059 else if ( r_write_to_init_cmd_brdcast_req.read() ) 4670 5060 { 4671 5061 r_init_cmd_fsm = INIT_CMD_WRITE_BRDCAST; 4672 5062 m_cpt_inval++; 4673 } 5063 } 4674 5064 else if ( m_sc_to_init_cmd_inst_fifo.rok() || 4675 r_sc_to_init_cmd_multi_req.read() ) 5065 r_sc_to_init_cmd_multi_req.read() ) 4676 5066 { 4677 5067 r_init_cmd_fsm = INIT_CMD_SC_UPDT_NLINE; 4678 5068 m_cpt_update++; 4679 } 5069 } 4680 5070 else if( r_sc_to_init_cmd_brdcast_req.read() ) 4681 5071 { 4682 5072 r_init_cmd_fsm = INIT_CMD_SC_BRDCAST; 4683 5073 m_cpt_inval++; 4684 } 5074 } 4685 5075 else if ( m_xram_rsp_to_init_cmd_inst_fifo.rok() || 4686 r_xram_rsp_to_init_cmd_multi_req.read() ) 5076 r_xram_rsp_to_init_cmd_multi_req.read() ) 4687 5077 { 4688 5078 r_init_cmd_fsm = INIT_CMD_INVAL_NLINE; 4689 5079 m_cpt_inval++; 4690 } 4691 else if ( r_xram_rsp_to_init_cmd_brdcast_req.read() ) 5080 } 5081 else if ( r_xram_rsp_to_init_cmd_brdcast_req.read() ) 4692 5082 { 4693 5083 r_init_cmd_fsm = INIT_CMD_XRAM_BRDCAST; … … 4697 5087 } 4698 5088 ////////////////////////// 4699 case INIT_CMD_SC_UPDT_IDLE: 5089 case INIT_CMD_SC_UPDT_IDLE: // SC FSM has highest priority 4700 5090 { 4701 5091 if ( m_sc_to_init_cmd_inst_fifo.rok() || 4702 r_sc_to_init_cmd_multi_req.read() ) 5092 r_sc_to_init_cmd_multi_req.read() ) 4703 5093 { 4704 5094 r_init_cmd_fsm = INIT_CMD_SC_UPDT_NLINE; 4705 5095 m_cpt_update++; 4706 } 5096 } 4707 5097 else if( r_sc_to_init_cmd_brdcast_req.read() ) 4708 5098 { 4709 5099 r_init_cmd_fsm = INIT_CMD_SC_BRDCAST; 4710 5100 m_cpt_inval++; 4711 } 5101 } 4712 5102 else if ( m_xram_rsp_to_init_cmd_inst_fifo.rok() || 4713 r_xram_rsp_to_init_cmd_multi_req.read() ) 5103 r_xram_rsp_to_init_cmd_multi_req.read() ) 4714 5104 { 4715 5105 r_init_cmd_fsm = INIT_CMD_INVAL_NLINE; 4716 5106 m_cpt_inval++; 4717 } 4718 else if ( r_xram_rsp_to_init_cmd_brdcast_req.read() ) 5107 } 5108 else if ( r_xram_rsp_to_init_cmd_brdcast_req.read() ) 4719 5109 { 4720 5110 r_init_cmd_fsm = INIT_CMD_XRAM_BRDCAST; 4721 5111 m_cpt_inval++; 4722 } 5112 } 4723 5113 else if ( m_write_to_init_cmd_inst_fifo.rok() || 4724 r_write_to_init_cmd_multi_req.read() ) 5114 r_write_to_init_cmd_multi_req.read() ) 4725 5115 { 4726 5116 r_init_cmd_fsm = INIT_CMD_UPDT_NLINE; 4727 5117 m_cpt_update++; 4728 } 5118 } 4729 5119 else if ( r_write_to_init_cmd_brdcast_req.read() ) 4730 5120 { … … 4735 5125 } 4736 5126 ////////////////////////// 4737 case INIT_CMD_INVAL_NLINE: 5127 case INIT_CMD_INVAL_NLINE: // send a multi-inval (from XRAM_RSP) 4738 5128 { 4739 5129 if ( m_xram_rsp_to_init_cmd_inst_fifo.rok() ) 4740 5130 { 4741 if ( p_vci_ini.cmdack ) 5131 if ( p_vci_ini.cmdack ) 4742 5132 { 4743 5133 m_cpt_inval_mult++; … … 4745 5135 xram_rsp_to_init_cmd_fifo_get = true; 4746 5136 } 4747 } 4748 else 5137 } 5138 else 4749 5139 { 4750 5140 if( r_xram_rsp_to_init_cmd_multi_req.read() ) r_xram_rsp_to_init_cmd_multi_req = false; … … 4754 5144 } 4755 5145 /////////////////////////// 4756 case INIT_CMD_XRAM_BRDCAST: 4757 { 4758 if ( p_vci_ini.cmdack ) 5146 case INIT_CMD_XRAM_BRDCAST: // send a broadcast-inval (from XRAM_RSP) 5147 { 5148 if ( p_vci_ini.cmdack ) 4759 5149 { 4760 5150 m_cpt_inval_brdcast++; … … 4765 5155 } 4766 5156 //////////////////////////// 4767 case INIT_CMD_WRITE_BRDCAST: 4768 { 4769 if( p_vci_ini.cmdack ) 5157 case INIT_CMD_WRITE_BRDCAST: // send a broadcast-inval (from WRITE FSM) 5158 { 5159 if( p_vci_ini.cmdack ) 4770 5160 { 4771 5161 … … 4773 5163 if( m_debug_init_cmd_fsm ) 4774 5164 { 4775 std::cout << " <MEMC " << name() << ".INIT_CMD_WRITE_BRDCAST> Broadcast-Inval for line " 5165 std::cout << " <MEMC " << name() << ".INIT_CMD_WRITE_BRDCAST> Broadcast-Inval for line " 4776 5166 << r_write_to_init_cmd_nline.read() << std::endl; 4777 5167 } … … 4786 5176 case INIT_CMD_UPDT_NLINE: // send nline for a multi-update (from WRITE FSM) 4787 5177 { 4788 if ( m_write_to_init_cmd_inst_fifo.rok() ) 5178 if ( m_write_to_init_cmd_inst_fifo.rok() ) 4789 5179 { 4790 5180 if ( p_vci_ini.cmdack ) … … 4794 5184 // write_to_init_cmd_fifo_get = true; 4795 5185 } 4796 } 4797 else 5186 } 5187 else 4798 5188 { 4799 5189 if ( r_write_to_init_cmd_multi_req.read() ) r_write_to_init_cmd_multi_req = false; … … 4812 5202 case INIT_CMD_UPDT_DATA: // send the data for a multi-update (from WRITE FSM) 4813 5203 { 4814 if ( p_vci_ini.cmdack ) 4815 { 4816 if ( r_init_cmd_cpt.read() == (r_write_to_init_cmd_count.read()-1) ) 5204 if ( p_vci_ini.cmdack ) 5205 { 5206 if ( r_init_cmd_cpt.read() == (r_write_to_init_cmd_count.read()-1) ) 4817 5207 { 4818 5208 r_init_cmd_fsm = INIT_CMD_UPDT_NLINE; 4819 5209 write_to_init_cmd_fifo_get = true; 4820 } 4821 else 5210 } 5211 else 4822 5212 { 4823 5213 r_init_cmd_cpt = r_init_cmd_cpt.read() + 1; … … 4827 5217 } 4828 5218 ///////////////////////// 4829 case INIT_CMD_SC_BRDCAST: 4830 { 4831 if( p_vci_ini.cmdack ) 5219 case INIT_CMD_SC_BRDCAST: // send a broadcast-inval (from SC FSM) 5220 { 5221 if( p_vci_ini.cmdack ) 4832 5222 { 4833 5223 m_cpt_inval_brdcast++; … … 4847 5237 r_init_cmd_fsm = INIT_CMD_SC_UPDT_INDEX; 4848 5238 } 4849 } 4850 else 5239 } 5240 else 4851 5241 { 4852 5242 if( r_sc_to_init_cmd_multi_req.read() ) r_sc_to_init_cmd_multi_req = false; … … 4862 5252 } 4863 5253 /////////////////////////// 4864 case INIT_CMD_SC_UPDT_DATA: // send first data for a multi-update (from SC FSM) 4865 { 4866 if ( p_vci_ini.cmdack ) 5254 case INIT_CMD_SC_UPDT_DATA: // send first data for a multi-update (from SC FSM) 5255 { 5256 if ( p_vci_ini.cmdack ) 4867 5257 { 4868 5258 if ( r_sc_to_init_cmd_is_long.read() ) 4869 5259 { 4870 5260 r_init_cmd_fsm = INIT_CMD_SC_UPDT_DATA_HIGH; 4871 } 4872 else 5261 } 5262 else 4873 5263 { 4874 5264 sc_to_init_cmd_fifo_get = true; … … 4881 5271 case INIT_CMD_SC_UPDT_DATA_HIGH: // send second data for a multi-update (from SC FSM) 4882 5272 { 4883 if ( p_vci_ini.cmdack ) 5273 if ( p_vci_ini.cmdack ) 4884 5274 { 4885 5275 sc_to_init_cmd_fifo_get = true; … … 4891 5281 4892 5282 ///////////////////////////////////////////////////////////////////// 4893 // 5283 // TGT_RSP FSM 4894 5284 ///////////////////////////////////////////////////////////////////// 4895 5285 // The TGT_RSP fsm sends the responses on the VCI target port … … 4904 5294 ///////////////////////////////////////////////////////////////////// 4905 5295 4906 switch ( r_tgt_rsp_fsm.read() ) 5296 switch ( r_tgt_rsp_fsm.read() ) 4907 5297 { 4908 5298 /////////////////////// 4909 case TGT_RSP_READ_IDLE: 5299 case TGT_RSP_READ_IDLE: // write requests have the highest priority 4910 5300 { 4911 5301 if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; 4912 5302 else if ( r_sc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_SC; 4913 else if ( r_xram_rsp_to_tgt_rsp_req ) 5303 else if ( r_xram_rsp_to_tgt_rsp_req ) 4914 5304 { 4915 5305 r_tgt_rsp_fsm = TGT_RSP_XRAM; … … 4918 5308 else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; 4919 5309 else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 4920 else if ( r_read_to_tgt_rsp_req ) 5310 else if ( r_read_to_tgt_rsp_req ) 4921 5311 { 4922 5312 r_tgt_rsp_fsm = TGT_RSP_READ; … … 4926 5316 } 4927 5317 //////////////////////// 4928 case TGT_RSP_WRITE_IDLE: 5318 case TGT_RSP_WRITE_IDLE: // sc requests have the highest priority 4929 5319 { 4930 5320 if ( r_sc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_SC; 4931 else if ( r_xram_rsp_to_tgt_rsp_req ) 5321 else if ( r_xram_rsp_to_tgt_rsp_req ) 4932 5322 { 4933 5323 r_tgt_rsp_fsm = TGT_RSP_XRAM; … … 4936 5326 else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; 4937 5327 else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 4938 else if ( r_read_to_tgt_rsp_req ) 5328 else if ( r_read_to_tgt_rsp_req ) 4939 5329 { 4940 5330 r_tgt_rsp_fsm = TGT_RSP_READ; … … 4946 5336 } 4947 5337 /////////////////////// 4948 case TGT_RSP_SC_IDLE: 4949 { 4950 if ( r_xram_rsp_to_tgt_rsp_req ) 5338 case TGT_RSP_SC_IDLE: // xram_rsp requests have the highest priority 5339 { 5340 if ( r_xram_rsp_to_tgt_rsp_req ) 4951 5341 { 4952 5342 r_tgt_rsp_fsm = TGT_RSP_XRAM; … … 4955 5345 else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; 4956 5346 else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 4957 else if ( r_read_to_tgt_rsp_req ) 5347 else if ( r_read_to_tgt_rsp_req ) 4958 5348 { 4959 5349 r_tgt_rsp_fsm = TGT_RSP_READ; … … 4965 5355 } 4966 5356 /////////////////////// 4967 case TGT_RSP_XRAM_IDLE: 5357 case TGT_RSP_XRAM_IDLE: // init requests have the highest priority 4968 5358 { 4969 5359 4970 5360 if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; 4971 5361 else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 4972 else if ( r_read_to_tgt_rsp_req ) 5362 else if ( r_read_to_tgt_rsp_req ) 4973 5363 { 4974 5364 r_tgt_rsp_fsm = TGT_RSP_READ; … … 4977 5367 else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; 4978 5368 else if ( r_sc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_SC; 4979 else if ( r_xram_rsp_to_tgt_rsp_req ) 5369 else if ( r_xram_rsp_to_tgt_rsp_req ) 4980 5370 { 4981 5371 r_tgt_rsp_fsm = TGT_RSP_XRAM; … … 4985 5375 } 4986 5376 /////////////////////// 4987 case TGT_RSP_INIT_IDLE: 5377 case TGT_RSP_INIT_IDLE: // cleanup requests have the highest priority 4988 5378 { 4989 5379 if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 4990 else if ( r_read_to_tgt_rsp_req ) 5380 else if ( r_read_to_tgt_rsp_req ) 4991 5381 { 4992 5382 r_tgt_rsp_fsm = TGT_RSP_READ; … … 4995 5385 else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; 4996 5386 else if ( r_sc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_SC; 4997 else if ( r_xram_rsp_to_tgt_rsp_req ) 5387 else if ( r_xram_rsp_to_tgt_rsp_req ) 4998 5388 { 4999 5389 r_tgt_rsp_fsm = TGT_RSP_XRAM; … … 5004 5394 } 5005 5395 /////////////////////// 5006 case TGT_RSP_CLEANUP_IDLE: 5007 { 5008 if ( r_read_to_tgt_rsp_req ) 5396 case TGT_RSP_CLEANUP_IDLE: // read requests have the highest priority 5397 { 5398 if ( r_read_to_tgt_rsp_req ) 5009 5399 { 5010 5400 r_tgt_rsp_fsm = TGT_RSP_READ; … … 5013 5403 else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; 5014 5404 else if ( r_sc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_SC; 5015 else if ( r_xram_rsp_to_tgt_rsp_req ) 5405 else if ( r_xram_rsp_to_tgt_rsp_req ) 5016 5406 { 5017 5407 r_tgt_rsp_fsm = TGT_RSP_XRAM; … … 5023 5413 } 5024 5414 ////////////////// 5025 case TGT_RSP_READ: // send the response to a read5026 { 5027 if ( p_vci_tgt.rspack ) 5415 case TGT_RSP_READ: // send the response to a read 5416 { 5417 if ( p_vci_tgt.rspack ) 5028 5418 { 5029 5419 … … 5038 5428 } 5039 5429 #endif 5040 if ( r_tgt_rsp_cpt.read() == (r_read_to_tgt_rsp_word.read()+r_read_to_tgt_rsp_length-1) ) 5430 if ( r_tgt_rsp_cpt.read() == (r_read_to_tgt_rsp_word.read()+r_read_to_tgt_rsp_length-1) ) 5041 5431 { 5042 5432 r_tgt_rsp_fsm = TGT_RSP_READ_IDLE; 5043 5433 r_read_to_tgt_rsp_req = false; 5044 } 5045 else 5434 } 5435 else 5046 5436 { 5047 5437 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; … … 5051 5441 } 5052 5442 /////////////////// 5053 case TGT_RSP_WRITE: 5054 { 5055 if ( p_vci_tgt.rspack ) 5443 case TGT_RSP_WRITE: // send the write acknowledge 5444 { 5445 if ( p_vci_tgt.rspack ) 5056 5446 { 5057 5447 … … 5070 5460 } 5071 5461 /////////////////// 5072 case TGT_RSP_CLEANUP: // pas clair pour moi (AG)5073 { 5074 if ( p_vci_tgt.rspack ) 5462 case TGT_RSP_CLEANUP: // pas clair pour moi (AG) 5463 { 5464 if ( p_vci_tgt.rspack ) 5075 5465 { 5076 5466 … … 5089 5479 } 5090 5480 ////////////////// 5091 case TGT_RSP_SC: 5092 { 5093 if ( p_vci_tgt.rspack ) 5481 case TGT_RSP_SC: // send one atomic word response 5482 { 5483 if ( p_vci_tgt.rspack ) 5094 5484 { 5095 5485 … … 5109 5499 5110 5500 /////////////////////// 5111 case TGT_RSP_XRAM: 5112 { 5113 if ( p_vci_tgt.rspack ) 5501 case TGT_RSP_XRAM: // send the response after XRAM access 5502 { 5503 if ( p_vci_tgt.rspack ) 5114 5504 { 5115 5505 … … 5124 5514 } 5125 5515 #endif 5126 if ( (r_tgt_rsp_cpt.read() == 5516 if ( (r_tgt_rsp_cpt.read() == 5127 5517 (r_xram_rsp_to_tgt_rsp_word.read()+r_xram_rsp_to_tgt_rsp_length.read()-1)) 5128 || r_xram_rsp_to_tgt_rsp_rerror.read() ) 5518 || r_xram_rsp_to_tgt_rsp_rerror.read() ) 5129 5519 { 5130 5520 r_tgt_rsp_fsm = TGT_RSP_XRAM_IDLE; 5131 5521 r_xram_rsp_to_tgt_rsp_req = false; 5132 } 5133 else 5522 } 5523 else 5134 5524 { 5135 5525 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; … … 5139 5529 } 5140 5530 ////////////////// 5141 case TGT_RSP_INIT: 5142 { 5143 if ( p_vci_tgt.rspack ) 5531 case TGT_RSP_INIT: // send the write response after coherence transaction 5532 { 5533 if ( p_vci_tgt.rspack ) 5144 5534 { 5145 5535 … … 5160 5550 5161 5551 //////////////////////////////////////////////////////////////////////////////////// 5162 // 5552 // ALLOC_UPT FSM 5163 5553 //////////////////////////////////////////////////////////////////////////////////// 5164 5554 // The ALLOC_UPT FSM allocates the access to the Update/Inval Table (UPT). 5165 // with a round robin priority between three FSMs : INIT_RSP > WRITE > XRAM_RSP > CLEANUP 5555 // with a round robin priority between three FSMs : INIT_RSP > WRITE > XRAM_RSP > CLEANUP 5166 5556 // - The WRITE FSM initiates update transactions and sets new entry in UPT. 5167 5557 // - The XRAM_RSP FSM initiates inval transactions and sets new entry in UPT. … … 5171 5561 ///////////////////////////////////////////////////////////////////////////////////// 5172 5562 5173 switch ( r_alloc_upt_fsm.read() ) 5563 switch ( r_alloc_upt_fsm.read() ) 5174 5564 { 5175 5565 5176 5566 //////////////////////// 5177 5567 case ALLOC_UPT_INIT_RSP: 5178 if ( (r_init_rsp_fsm.read() != INIT_RSP_UPT_LOCK) && 5179 (r_init_rsp_fsm.read() != INIT_RSP_UPT_CLEAR) ) 5180 { 5181 if ((r_write_fsm.read() == WRITE_UPT_LOCK) || 5182 (r_write_fsm.read() == WRITE_BC_UPT_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_WRITE; 5183 else if (r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK) r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; 5184 else if (r_cleanup_fsm.read() == CLEANUP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_CLEANUP; 5185 else if ((r_sc_fsm.read() == SC_UPT_LOCK) || 5186 (r_sc_fsm.read() == SC_BC_UPT_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_SC; 5568 if (( r_init_rsp_fsm.read() != INIT_RSP_UPT_LOCK ) && 5569 ( r_init_rsp_fsm.read() != INIT_RSP_UPT_CLEAR )) 5570 { 5571 if (( r_write_fsm.read() == WRITE_UPT_LOCK ) || 5572 ( r_write_fsm.read() == WRITE_BC_UPT_LOCK )) 5573 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 5574 5575 else if ( r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK ) 5576 r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; 5577 5578 else if ( r_cleanup_fsm.read() == CLEANUP_UPT_LOCK ) 5579 r_alloc_upt_fsm = ALLOC_UPT_CLEANUP; 5580 5581 else if (( r_sc_fsm.read() == SC_UPT_LOCK ) || 5582 ( r_sc_fsm.read() == SC_BC_UPT_LOCK )) 5583 r_alloc_upt_fsm = ALLOC_UPT_SC; 5187 5584 } 5188 5585 break; … … 5190 5587 ///////////////////// 5191 5588 case ALLOC_UPT_WRITE: 5192 if ( (r_write_fsm.read() != WRITE_UPT_LOCK) && 5193 (r_write_fsm.read() != WRITE_BC_UPT_LOCK)) 5194 { 5195 if (r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK) r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; 5196 else if (r_cleanup_fsm.read() == CLEANUP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_CLEANUP; 5197 else if ((r_sc_fsm.read() == SC_UPT_LOCK) || 5198 (r_sc_fsm.read() == SC_BC_UPT_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_SC; 5199 else if (r_init_rsp_fsm.read() == INIT_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; 5589 if (( r_write_fsm.read() != WRITE_UPT_LOCK ) && 5590 ( r_write_fsm.read() != WRITE_BC_UPT_LOCK )) 5591 { 5592 if ( r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK ) 5593 r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; 5594 5595 else if ( r_cleanup_fsm.read() == CLEANUP_UPT_LOCK ) 5596 r_alloc_upt_fsm = ALLOC_UPT_CLEANUP; 5597 5598 else if (( r_sc_fsm.read() == SC_UPT_LOCK ) || 5599 ( r_sc_fsm.read() == SC_BC_UPT_LOCK )) 5600 r_alloc_upt_fsm = ALLOC_UPT_SC; 5601 5602 else if ( r_init_rsp_fsm.read() == INIT_RSP_UPT_LOCK ) 5603 r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; 5200 5604 } 5201 5605 break; … … 5203 5607 //////////////////////// 5204 5608 case ALLOC_UPT_XRAM_RSP: 5205 if (r_xram_rsp_fsm.read() != XRAM_RSP_INVAL_LOCK) 5206 { 5207 if (r_cleanup_fsm.read() == CLEANUP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_CLEANUP; 5208 else if ((r_sc_fsm.read() == SC_UPT_LOCK) || 5209 (r_sc_fsm.read() == SC_BC_UPT_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_SC; 5210 else if (r_init_rsp_fsm.read() == INIT_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; 5211 else if ((r_write_fsm.read() == WRITE_UPT_LOCK) || 5212 (r_write_fsm.read() == WRITE_BC_UPT_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_WRITE; 5609 if (r_xram_rsp_fsm.read() != XRAM_RSP_INVAL_LOCK) 5610 { 5611 if ( r_cleanup_fsm.read() == CLEANUP_UPT_LOCK ) 5612 r_alloc_upt_fsm = ALLOC_UPT_CLEANUP; 5613 5614 else if (( r_sc_fsm.read() == SC_UPT_LOCK ) || 5615 ( r_sc_fsm.read() == SC_BC_UPT_LOCK )) 5616 r_alloc_upt_fsm = ALLOC_UPT_SC; 5617 5618 else if ( r_init_rsp_fsm.read() == INIT_RSP_UPT_LOCK ) 5619 r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; 5620 5621 else if (( r_write_fsm.read() == WRITE_UPT_LOCK ) || 5622 ( r_write_fsm.read() == WRITE_BC_UPT_LOCK )) 5623 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 5213 5624 } 5214 5625 break; … … 5218 5629 if(r_cleanup_fsm.read() != CLEANUP_UPT_LOCK ) 5219 5630 { 5220 if ((r_sc_fsm.read() == SC_UPT_LOCK) || 5221 (r_sc_fsm.read() == SC_BC_UPT_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_SC; 5222 else if (r_init_rsp_fsm.read() == INIT_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; 5223 else if ((r_write_fsm.read() == WRITE_UPT_LOCK) || 5224 (r_write_fsm.read() == WRITE_BC_UPT_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_WRITE; 5225 else if (r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK) r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; 5631 if (( r_sc_fsm.read() == SC_UPT_LOCK ) || 5632 ( r_sc_fsm.read() == SC_BC_UPT_LOCK )) 5633 r_alloc_upt_fsm = ALLOC_UPT_SC; 5634 5635 else if ( r_init_rsp_fsm.read() == INIT_RSP_UPT_LOCK ) 5636 r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; 5637 5638 else if (( r_write_fsm.read() == WRITE_UPT_LOCK ) || 5639 ( r_write_fsm.read() == WRITE_BC_UPT_LOCK )) 5640 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 5641 5642 else if ( r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK ) 5643 r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; 5226 5644 } 5227 5645 break; 5228 5646 5229 5647 ////////////////////////// 5230 5648 case ALLOC_UPT_SC: 5231 if( (r_sc_fsm.read() != SC_UPT_LOCK) && 5232 (r_sc_fsm.read() != SC_BC_UPT_LOCK)) 5233 { 5234 if (r_init_rsp_fsm.read() == INIT_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; 5235 else if ((r_write_fsm.read() == WRITE_UPT_LOCK) || 5236 (r_write_fsm.read() == WRITE_BC_UPT_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_WRITE; 5237 else if (r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK) r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; 5238 else if (r_cleanup_fsm.read() == CLEANUP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_CLEANUP; 5649 if (( r_sc_fsm.read() != SC_UPT_LOCK ) && 5650 ( r_sc_fsm.read() != SC_BC_UPT_LOCK )) 5651 { 5652 if ( r_init_rsp_fsm.read() == INIT_RSP_UPT_LOCK ) 5653 r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; 5654 5655 else if (( r_write_fsm.read() == WRITE_UPT_LOCK ) || 5656 ( r_write_fsm.read() == WRITE_BC_UPT_LOCK )) 5657 r_alloc_upt_fsm = ALLOC_UPT_WRITE; 5658 5659 else if ( r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK ) 5660 r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; 5661 5662 else if ( r_cleanup_fsm.read() == CLEANUP_UPT_LOCK ) 5663 r_alloc_upt_fsm = ALLOC_UPT_CLEANUP; 5239 5664 } 5240 5665 break; … … 5243 5668 5244 5669 //////////////////////////////////////////////////////////////////////////////////// 5245 // 5670 // ALLOC_DIR FSM 5246 5671 //////////////////////////////////////////////////////////////////////////////////// 5247 5672 // The ALLOC_DIR FSM allocates the access to the directory and … … 5251 5676 ///////////////////////////////////////////////////////////////////////////////////// 5252 5677 5253 switch ( r_alloc_dir_fsm.read() ) 5678 switch ( r_alloc_dir_fsm.read() ) 5254 5679 { 5680 case ALLOC_DIR_RESET: 5681 // Initializes the directory one SET each cycle. All the WAYS of a SET are 5682 // initialize in parallel 5683 5684 r_alloc_dir_reset_cpt.write(r_alloc_dir_reset_cpt.read() + 1); 5685 5686 if (r_alloc_dir_reset_cpt.read() == (m_sets - 1)) { 5687 m_cache_directory.init(); 5688 5689 r_alloc_dir_fsm = ALLOC_DIR_READ; 5690 } 5691 break; 5692 5255 5693 //////////////////// 5256 5694 case ALLOC_DIR_READ: 5257 if ( ( (r_read_fsm.read() != READ_DIR_LOCK) && 5258 (r_read_fsm.read() != READ_TRT_LOCK) && 5259 (r_read_fsm.read() != READ_HEAP_LOCK)) 5695 if ((( r_read_fsm.read() != READ_DIR_REQ ) && 5696 ( r_read_fsm.read() != READ_DIR_LOCK ) && 5697 ( r_read_fsm.read() != READ_TRT_LOCK ) && 5698 ( r_read_fsm.read() != READ_HEAP_REQ )) 5260 5699 || 5261 ( (r_read_fsm.read() == READ_HEAP_LOCK)&&5262 (r_alloc_heap_fsm.read() == ALLOC_HEAP_READ))5700 (( r_read_fsm.read() == READ_HEAP_REQ ) && 5701 ( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ )) 5263 5702 || 5264 ( (r_read_fsm.read() == READ_TRT_LOCK) && 5265 (r_alloc_trt_fsm.read() == ALLOC_TRT_READ) ) ) 5266 { 5267 if (r_write_fsm.read() == WRITE_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_WRITE; 5268 else if (r_sc_fsm.read() == SC_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_SC; 5269 else if (r_cleanup_fsm.read() == CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 5270 else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 5703 (( r_read_fsm.read() == READ_TRT_LOCK ) && 5704 ( r_alloc_trt_fsm.read() == ALLOC_TRT_READ ))) 5705 { 5706 if (r_write_fsm.read() == WRITE_DIR_REQ) 5707 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 5708 5709 else if (r_sc_fsm.read() == SC_DIR_REQ) 5710 r_alloc_dir_fsm = ALLOC_DIR_SC; 5711 5712 else if (r_cleanup_fsm.read() == CLEANUP_DIR_REQ ) 5713 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 5714 5715 else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 5716 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 5271 5717 } 5272 5718 break; … … 5274 5720 ///////////////////// 5275 5721 case ALLOC_DIR_WRITE: 5276 if ( ((r_write_fsm.read() != WRITE_DIR_LOCK) && 5277 (r_write_fsm.read() != WRITE_MISS_TRT_LOCK) && 5278 (r_write_fsm.read() != WRITE_DIR_READ) && 5279 (r_write_fsm.read() != WRITE_DIR_HIT) && 5280 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) && 5281 (r_write_fsm.read() != WRITE_BC_UPT_LOCK) && 5282 (r_write_fsm.read() != WRITE_UPT_LOCK) && 5283 (r_write_fsm.read() != WRITE_UPT_HEAP_LOCK)) 5722 if ((( r_write_fsm.read() != WRITE_DIR_REQ ) && 5723 ( r_write_fsm.read() != WRITE_DIR_LOCK ) && 5724 ( r_write_fsm.read() != WRITE_DIR_READ ) && 5725 ( r_write_fsm.read() != WRITE_DIR_HIT ) && 5726 ( r_write_fsm.read() != WRITE_BC_TRT_LOCK ) && 5727 ( r_write_fsm.read() != WRITE_BC_UPT_LOCK ) && 5728 ( r_write_fsm.read() != WRITE_MISS_TRT_LOCK ) && 5729 ( r_write_fsm.read() != WRITE_UPT_LOCK ) && 5730 ( r_write_fsm.read() != WRITE_UPT_HEAP_LOCK )) 5284 5731 || 5285 ( (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK)&&5286 (r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE))5732 (( r_write_fsm.read() == WRITE_UPT_HEAP_LOCK ) && 5733 ( r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE )) 5287 5734 || 5288 ( (r_write_fsm.read() == WRITE_MISS_TRT_LOCK) && 5289 (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) ) ) 5290 { 5291 if (r_sc_fsm.read() == SC_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_SC; 5292 else if (r_cleanup_fsm.read() == CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 5293 else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 5294 else if (r_read_fsm.read() == READ_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_READ; 5735 (( r_write_fsm.read() == WRITE_MISS_TRT_LOCK ) && 5736 ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ))) 5737 { 5738 if ( r_sc_fsm.read() == SC_DIR_REQ ) 5739 r_alloc_dir_fsm = ALLOC_DIR_SC; 5740 5741 else if ( r_cleanup_fsm.read() == CLEANUP_DIR_REQ ) 5742 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 5743 5744 else if ( r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK ) 5745 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 5746 5747 else if ( r_read_fsm.read() == READ_DIR_REQ ) 5748 r_alloc_dir_fsm = ALLOC_DIR_READ; 5295 5749 } 5296 5750 break; … … 5298 5752 //////////////////// 5299 5753 case ALLOC_DIR_SC: 5300 if ( ((r_sc_fsm.read() != SC_DIR_LOCK) && 5301 (r_sc_fsm.read() != SC_DIR_HIT_READ ) && 5302 (r_sc_fsm.read() != SC_DIR_HIT_WRITE ) && 5303 // (r_sc_fsm.read() != SC_MISS_TRT_LOCK ) && 5304 (r_sc_fsm.read() != SC_BC_TRT_LOCK) && 5305 (r_sc_fsm.read() != SC_BC_UPT_LOCK) && 5306 (r_sc_fsm.read() != SC_UPT_LOCK) && 5307 (r_sc_fsm.read() != SC_UPT_HEAP_LOCK)) 5754 if ((( r_sc_fsm.read() != SC_DIR_REQ ) && 5755 ( r_sc_fsm.read() != SC_DIR_LOCK ) && 5756 ( r_sc_fsm.read() != SC_DIR_HIT_READ ) && 5757 ( r_sc_fsm.read() != SC_DIR_HIT_WRITE ) && 5758 ( r_sc_fsm.read() != SC_BC_TRT_LOCK ) && 5759 ( r_sc_fsm.read() != SC_BC_UPT_LOCK ) && 5760 ( r_sc_fsm.read() != SC_MISS_TRT_LOCK ) && 5761 ( r_sc_fsm.read() != SC_UPT_LOCK ) && 5762 ( r_sc_fsm.read() != SC_UPT_HEAP_LOCK )) 5308 5763 || 5309 ( (r_sc_fsm.read() == SC_UPT_HEAP_LOCK)&&5310 (r_alloc_heap_fsm.read() == ALLOC_HEAP_SC))5764 (( r_sc_fsm.read() == SC_UPT_HEAP_LOCK ) && 5765 ( r_alloc_heap_fsm.read() == ALLOC_HEAP_SC )) 5311 5766 || 5312 ( (r_sc_fsm.read() == SC_MISS_TRT_LOCK ) && 5313 (r_alloc_trt_fsm.read() == ALLOC_TRT_SC) ) ) 5314 { 5315 if (r_cleanup_fsm.read() == CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 5316 else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 5317 else if (r_read_fsm.read() == READ_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_READ; 5318 else if (r_write_fsm.read() == WRITE_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_WRITE; 5767 (( r_sc_fsm.read() == SC_MISS_TRT_LOCK ) && 5768 ( r_alloc_trt_fsm.read() == ALLOC_TRT_SC ))) 5769 { 5770 if ( r_cleanup_fsm.read() == CLEANUP_DIR_REQ ) 5771 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 5772 5773 else if ( r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK ) 5774 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 5775 5776 else if ( r_read_fsm.read() == READ_DIR_REQ ) 5777 r_alloc_dir_fsm = ALLOC_DIR_READ; 5778 5779 else if ( r_write_fsm.read() == WRITE_DIR_REQ ) 5780 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 5319 5781 } 5320 5782 break; … … 5322 5784 /////////////////////// 5323 5785 case ALLOC_DIR_CLEANUP: 5324 if ( (r_cleanup_fsm.read() != CLEANUP_DIR_LOCK) && 5325 (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK) ) 5326 { 5327 if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 5328 else if (r_read_fsm.read() == READ_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_READ; 5329 else if (r_write_fsm.read() == WRITE_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_WRITE; 5330 else if (r_sc_fsm.read() == SC_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_SC; 5786 if (( r_cleanup_fsm.read() != CLEANUP_DIR_REQ ) && 5787 ( r_cleanup_fsm.read() != CLEANUP_DIR_LOCK ) && 5788 ( r_cleanup_fsm.read() != CLEANUP_HEAP_REQ ) && 5789 ( r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK )) 5790 { 5791 if ( r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK ) 5792 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 5793 5794 else if ( r_read_fsm.read() == READ_DIR_REQ ) 5795 r_alloc_dir_fsm = ALLOC_DIR_READ; 5796 5797 else if ( r_write_fsm.read() == WRITE_DIR_REQ ) 5798 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 5799 5800 else if ( r_sc_fsm.read() == SC_DIR_REQ ) 5801 r_alloc_dir_fsm = ALLOC_DIR_SC; 5331 5802 } 5332 5803 break; 5804 5333 5805 //////////////////////// 5334 5806 case ALLOC_DIR_XRAM_RSP: 5335 if ( (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) && 5336 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) && 5337 (r_xram_rsp_fsm.read() != XRAM_RSP_INVAL_LOCK)) 5338 { 5339 if (r_read_fsm.read() == READ_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_READ; 5340 else if (r_write_fsm.read() == WRITE_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_WRITE; 5341 else if (r_sc_fsm.read() == SC_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_SC; 5342 else if (r_cleanup_fsm.read() == CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 5807 if (( r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK ) && 5808 ( r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY ) && 5809 ( r_xram_rsp_fsm.read() != XRAM_RSP_INVAL_LOCK )) 5810 { 5811 if ( r_read_fsm.read() == READ_DIR_REQ ) 5812 r_alloc_dir_fsm = ALLOC_DIR_READ; 5813 5814 else if ( r_write_fsm.read() == WRITE_DIR_REQ ) 5815 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 5816 5817 else if ( r_sc_fsm.read() == SC_DIR_REQ ) 5818 r_alloc_dir_fsm = ALLOC_DIR_SC; 5819 5820 else if ( r_cleanup_fsm.read() == CLEANUP_DIR_REQ ) 5821 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 5343 5822 } 5344 5823 break; … … 5347 5826 5348 5827 //////////////////////////////////////////////////////////////////////////////////// 5349 // 5828 // ALLOC_TRT FSM 5350 5829 //////////////////////////////////////////////////////////////////////////////////// 5351 5830 // The ALLOC_TRT fsm allocates the access to the Transaction Table (write buffer) … … 5355 5834 /////////////////////////////////////////////////////////////////////////////////// 5356 5835 5357 switch ( r_alloc_trt_fsm)5836 switch ( r_alloc_trt_fsm.read() ) 5358 5837 { 5359 5360 5838 //////////////////// 5361 5839 case ALLOC_TRT_READ: 5362 if ( r_read_fsm.read() != READ_TRT_LOCK ) 5363 { 5364 if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) || 5365 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_WRITE; 5366 else if ((r_sc_fsm.read() == SC_MISS_TRT_LOCK) || 5367 (r_sc_fsm.read() == SC_BC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_SC; 5368 else if (r_xram_rsp_fsm.read() == XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 5369 else if ( (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) || 5370 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ) ) r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 5840 if ( r_read_fsm.read() != READ_TRT_LOCK ) 5841 { 5842 if (( r_write_fsm.read() == WRITE_MISS_TRT_LOCK ) || 5843 ( r_write_fsm.read() == WRITE_BC_TRT_LOCK )) 5844 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 5845 5846 else if (( r_sc_fsm.read() == SC_MISS_TRT_LOCK ) || 5847 ( r_sc_fsm.read() == SC_BC_TRT_LOCK )) 5848 r_alloc_trt_fsm = ALLOC_TRT_SC; 5849 5850 else if (( r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK ) && 5851 ( r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP )) 5852 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 5853 5854 else if (( r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE ) || 5855 ( r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ )) 5856 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 5371 5857 } 5372 5858 break; 5373 ///////////////////// 5859 5860 ///////////////////// 5374 5861 case ALLOC_TRT_WRITE: 5375 if ( (r_write_fsm.read() != WRITE_MISS_TRT_LOCK) && 5376 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) && 5377 (r_write_fsm.read() != WRITE_BC_UPT_LOCK)) 5378 { 5379 if ((r_sc_fsm.read() == SC_MISS_TRT_LOCK) || 5380 (r_sc_fsm.read() == SC_BC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_SC; 5381 else if (r_xram_rsp_fsm.read() == XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 5382 else if ( (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) || 5383 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 5384 else if (r_read_fsm.read() == READ_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_READ; 5862 if (( r_write_fsm.read() != WRITE_MISS_TRT_LOCK ) && 5863 ( r_write_fsm.read() != WRITE_BC_TRT_LOCK ) && 5864 ( r_write_fsm.read() != WRITE_BC_UPT_LOCK )) 5865 { 5866 if (( r_sc_fsm.read() == SC_MISS_TRT_LOCK ) || 5867 ( r_sc_fsm.read() == SC_BC_TRT_LOCK )) 5868 r_alloc_trt_fsm = ALLOC_TRT_SC; 5869 5870 else if (( r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK ) && 5871 ( r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP )) 5872 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 5873 5874 else if (( r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE ) || 5875 ( r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ )) 5876 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 5877 5878 else if ( r_read_fsm.read() == READ_TRT_LOCK ) 5879 r_alloc_trt_fsm = ALLOC_TRT_READ; 5385 5880 } 5386 5881 break; 5387 //////////////////// 5882 5883 //////////////////// 5388 5884 case ALLOC_TRT_SC: 5389 if ( (r_sc_fsm.read() != SC_MISS_TRT_LOCK) && 5390 (r_sc_fsm.read() != SC_BC_TRT_LOCK) && 5391 (r_sc_fsm.read() != SC_BC_UPT_LOCK)) 5392 { 5393 if (r_xram_rsp_fsm.read() == XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 5394 else if ( (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) || 5395 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 5396 else if (r_read_fsm.read() == READ_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_READ; 5397 else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) || 5398 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_WRITE; 5885 if (( r_sc_fsm.read() != SC_MISS_TRT_LOCK ) && 5886 ( r_sc_fsm.read() != SC_BC_TRT_LOCK ) && 5887 ( r_sc_fsm.read() != SC_BC_UPT_LOCK )) 5888 { 5889 if (( r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK ) && 5890 ( r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP )) 5891 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 5892 5893 else if (( r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE ) || 5894 ( r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ )) 5895 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 5896 5897 else if ( r_read_fsm.read() == READ_TRT_LOCK ) 5898 r_alloc_trt_fsm = ALLOC_TRT_READ; 5899 5900 else if (( r_write_fsm.read() == WRITE_MISS_TRT_LOCK ) || 5901 ( r_write_fsm.read() == WRITE_BC_TRT_LOCK )) 5902 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 5399 5903 } 5400 5904 break; 5401 //////////////////////// 5905 5906 //////////////////////// 5402 5907 case ALLOC_TRT_XRAM_RSP: 5403 if ( (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) && 5404 (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_UPDT) && 5405 (r_xram_rsp_fsm.read() != XRAM_RSP_INVAL_LOCK)) { 5406 if ( (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) || 5407 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 5408 else if (r_read_fsm.read() == READ_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_READ; 5409 else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) || 5410 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_WRITE; 5411 else if ((r_sc_fsm.read() == SC_MISS_TRT_LOCK) || 5412 (r_sc_fsm.read() == SC_BC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_SC; 5908 if ((( r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK ) || 5909 ( r_alloc_dir_fsm.read() != ALLOC_DIR_XRAM_RSP )) && 5910 ( r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY ) && 5911 ( r_xram_rsp_fsm.read() != XRAM_RSP_DIR_UPDT ) && 5912 ( r_xram_rsp_fsm.read() != XRAM_RSP_INVAL_LOCK )) 5913 { 5914 if (( r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE ) || 5915 ( r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ )) 5916 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 5917 5918 else if ( r_read_fsm.read() == READ_TRT_LOCK ) 5919 r_alloc_trt_fsm = ALLOC_TRT_READ; 5920 5921 else if (( r_write_fsm.read() == WRITE_MISS_TRT_LOCK ) || 5922 ( r_write_fsm.read() == WRITE_BC_TRT_LOCK )) 5923 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 5924 5925 else if (( r_sc_fsm.read() == SC_MISS_TRT_LOCK ) || 5926 ( r_sc_fsm.read() == SC_BC_TRT_LOCK )) 5927 r_alloc_trt_fsm = ALLOC_TRT_SC; 5413 5928 } 5414 5929 break; 5415 //////////////////////// 5930 5931 //////////////////////// 5416 5932 case ALLOC_TRT_IXR_RSP: 5417 if ( (r_ixr_rsp_fsm.read() != IXR_RSP_TRT_ERASE) && 5418 (r_ixr_rsp_fsm.read() != IXR_RSP_TRT_READ) ) { 5419 if (r_read_fsm.read() == READ_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_READ; 5420 else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) || 5421 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_WRITE; 5422 else if ((r_sc_fsm.read() == SC_MISS_TRT_LOCK) || 5423 (r_sc_fsm.read() == SC_BC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_SC; 5424 else if (r_xram_rsp_fsm.read() == XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 5933 if (( r_ixr_rsp_fsm.read() != IXR_RSP_TRT_ERASE ) && 5934 ( r_ixr_rsp_fsm.read() != IXR_RSP_TRT_READ )) 5935 { 5936 if ( r_read_fsm.read() == READ_TRT_LOCK ) 5937 r_alloc_trt_fsm = ALLOC_TRT_READ; 5938 5939 else if (( r_write_fsm.read() == WRITE_MISS_TRT_LOCK ) || 5940 ( r_write_fsm.read() == WRITE_BC_TRT_LOCK )) 5941 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 5942 5943 else if (( r_sc_fsm.read() == SC_MISS_TRT_LOCK ) || 5944 ( r_sc_fsm.read() == SC_BC_TRT_LOCK )) 5945 r_alloc_trt_fsm = ALLOC_TRT_SC; 5946 5947 else if (( r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK ) && 5948 ( r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP )) 5949 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 5425 5950 } 5426 5951 break; … … 5429 5954 5430 5955 //////////////////////////////////////////////////////////////////////////////////// 5431 // 5956 // ALLOC_HEAP FSM 5432 5957 //////////////////////////////////////////////////////////////////////////////////// 5433 5958 // The ALLOC_HEAP FSM allocates the access to the heap … … 5437 5962 ///////////////////////////////////////////////////////////////////////////////////// 5438 5963 5439 switch ( r_alloc_heap_fsm.read() ) 5964 switch ( r_alloc_heap_fsm.read() ) 5440 5965 { 5441 5966 //////////////////// 5967 case ALLOC_HEAP_RESET: 5968 // Initializes the heap one ENTRY each cycle. 5969 5970 r_alloc_heap_reset_cpt.write(r_alloc_heap_reset_cpt.read() + 1); 5971 5972 if(r_alloc_heap_reset_cpt.read() == (m_heap_size-1)) { 5973 m_heap.init(); 5974 5975 r_alloc_heap_fsm = ALLOC_HEAP_READ; 5976 } 5977 break; 5978 5979 //////////////////// 5442 5980 case ALLOC_HEAP_READ: 5443 if ( (r_read_fsm.read() != READ_HEAP_LOCK) && 5444 (r_read_fsm.read() != READ_HEAP_ERASE) ) 5445 { 5446 if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 5447 else if (r_sc_fsm.read() == SC_UPT_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_SC; 5448 else if (r_cleanup_fsm.read() == CLEANUP_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 5449 else if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_ERASE) r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 5981 if (( r_read_fsm.read() != READ_HEAP_REQ ) && 5982 ( r_read_fsm.read() != READ_HEAP_LOCK ) && 5983 ( r_read_fsm.read() != READ_HEAP_ERASE )) 5984 { 5985 if ( r_write_fsm.read() == WRITE_UPT_HEAP_LOCK ) 5986 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 5987 5988 else if ( r_sc_fsm.read() == SC_UPT_HEAP_LOCK ) 5989 r_alloc_heap_fsm = ALLOC_HEAP_SC; 5990 5991 else if ( r_cleanup_fsm.read() == CLEANUP_HEAP_REQ ) 5992 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 5993 5994 else if ( r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ ) 5995 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 5450 5996 } 5451 5997 break; … … 5453 5999 ///////////////////// 5454 6000 case ALLOC_HEAP_WRITE: 5455 if ( (r_write_fsm.read() != WRITE_UPT_HEAP_LOCK) && 5456 (r_write_fsm.read() != WRITE_UPT_REQ) && 5457 (r_write_fsm.read() != WRITE_UPT_NEXT) ) 5458 { 5459 if (r_sc_fsm.read() == SC_UPT_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_SC; 5460 else if (r_cleanup_fsm.read() == CLEANUP_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 5461 else if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_ERASE) r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 5462 else if (r_read_fsm.read() == READ_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_READ; 6001 if (( r_write_fsm.read() != WRITE_UPT_HEAP_LOCK ) && 6002 ( r_write_fsm.read() != WRITE_UPT_REQ ) && 6003 ( r_write_fsm.read() != WRITE_UPT_NEXT )) 6004 { 6005 if ( r_sc_fsm.read() == SC_UPT_HEAP_LOCK ) 6006 r_alloc_heap_fsm = ALLOC_HEAP_SC; 6007 6008 else if ( r_cleanup_fsm.read() == CLEANUP_HEAP_REQ ) 6009 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 6010 6011 else if ( r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ ) 6012 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 6013 6014 else if ( r_read_fsm.read() == READ_HEAP_REQ ) 6015 r_alloc_heap_fsm = ALLOC_HEAP_READ; 5463 6016 } 5464 6017 break; … … 5466 6019 //////////////////// 5467 6020 case ALLOC_HEAP_SC: 5468 if ( (r_sc_fsm.read() != SC_UPT_HEAP_LOCK) && 5469 (r_sc_fsm.read() != SC_UPT_REQ ) && 5470 (r_sc_fsm.read() != SC_UPT_NEXT) ) 5471 { 5472 if (r_cleanup_fsm.read() == CLEANUP_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 5473 else if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_ERASE) r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 5474 else if (r_read_fsm.read() == READ_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_READ; 5475 else if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 6021 if (( r_sc_fsm.read() != SC_UPT_HEAP_LOCK ) && 6022 ( r_sc_fsm.read() != SC_UPT_REQ ) && 6023 ( r_sc_fsm.read() != SC_UPT_NEXT )) 6024 { 6025 if ( r_cleanup_fsm.read() == CLEANUP_HEAP_REQ ) 6026 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 6027 6028 else if ( r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ ) 6029 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 6030 6031 else if ( r_read_fsm.read() == READ_HEAP_REQ ) 6032 r_alloc_heap_fsm = ALLOC_HEAP_READ; 6033 6034 else if ( r_write_fsm.read() == WRITE_UPT_HEAP_LOCK ) 6035 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 5476 6036 } 5477 6037 break; … … 5479 6039 /////////////////////// 5480 6040 case ALLOC_HEAP_CLEANUP: 5481 if ( (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK) && 5482 (r_cleanup_fsm.read() != CLEANUP_HEAP_SEARCH)&& 5483 (r_cleanup_fsm.read() != CLEANUP_HEAP_CLEAN) ) 5484 { 5485 if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_ERASE) r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 5486 else if (r_read_fsm.read() == READ_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_READ; 5487 else if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 5488 else if (r_sc_fsm.read() == SC_UPT_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_SC; 6041 if (( r_cleanup_fsm.read() != CLEANUP_HEAP_REQ ) && 6042 ( r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK ) && 6043 ( r_cleanup_fsm.read() != CLEANUP_HEAP_SEARCH ) && 6044 ( r_cleanup_fsm.read() != CLEANUP_HEAP_CLEAN )) 6045 { 6046 if ( r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ ) 6047 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 6048 6049 else if ( r_read_fsm.read() == READ_HEAP_REQ ) 6050 r_alloc_heap_fsm = ALLOC_HEAP_READ; 6051 6052 else if ( r_write_fsm.read() == WRITE_UPT_HEAP_LOCK ) 6053 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 6054 6055 else if ( r_sc_fsm.read() == SC_UPT_HEAP_LOCK ) 6056 r_alloc_heap_fsm = ALLOC_HEAP_SC; 5489 6057 } 5490 6058 break; 6059 5491 6060 //////////////////////// 5492 6061 case ALLOC_HEAP_XRAM_RSP: 5493 if ( r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_ERASE ) 5494 { 5495 if (r_read_fsm.read() == READ_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_READ; 5496 else if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 5497 else if (r_sc_fsm.read() == SC_UPT_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_SC; 5498 else if (r_cleanup_fsm.read() == CLEANUP_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 6062 if (( r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_REQ ) && 6063 ( r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_ERASE )) 6064 { 6065 if ( r_read_fsm.read() == READ_HEAP_REQ ) 6066 r_alloc_heap_fsm = ALLOC_HEAP_READ; 6067 6068 else if ( r_write_fsm.read() == WRITE_UPT_HEAP_LOCK ) 6069 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 6070 6071 else if ( r_sc_fsm.read() == SC_UPT_HEAP_LOCK ) 6072 r_alloc_heap_fsm = ALLOC_HEAP_SC; 6073 6074 else if ( r_cleanup_fsm.read() == CLEANUP_HEAP_REQ ) 6075 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 6076 5499 6077 } 5500 6078 break; … … 5504 6082 5505 6083 //////////////////////////////////////////////////////////////////////////////////// 5506 // 6084 // TGT_CMD to READ FIFO 5507 6085 //////////////////////////////////////////////////////////////////////////////////// 5508 6086 … … 5531 6109 } 5532 6110 ///////////////////////////////////////////////////////////////////// 5533 // 6111 // TGT_CMD to WRITE FIFO 5534 6112 ///////////////////////////////////////////////////////////////////// 5535 6113 … … 5564 6142 } 5565 6143 //////////////////////////////////////////////////////////////////////////////////// 5566 // 6144 // TGT_CMD to SC FIFO 5567 6145 //////////////////////////////////////////////////////////////////////////////////// 5568 6146 … … 5570 6148 if ( cmd_sc_fifo_get ) { 5571 6149 m_cmd_sc_addr_fifo.put_and_get((addr_t)(p_vci_tgt.address.read())); 5572 m_cmd_sc_eop_fifo.put_and_get(p_vci_tgt.eop.read()); 6150 m_cmd_sc_eop_fifo.put_and_get(p_vci_tgt.eop.read()); 5573 6151 m_cmd_sc_srcid_fifo.put_and_get(p_vci_tgt.srcid.read()); 5574 6152 m_cmd_sc_trdid_fifo.put_and_get(p_vci_tgt.trdid.read()); … … 5577 6155 } else { 5578 6156 m_cmd_sc_addr_fifo.simple_put((addr_t)(p_vci_tgt.address.read())); 5579 m_cmd_sc_eop_fifo.simple_put(p_vci_tgt.eop.read()); 6157 m_cmd_sc_eop_fifo.simple_put(p_vci_tgt.eop.read()); 5580 6158 m_cmd_sc_srcid_fifo.simple_put(p_vci_tgt.srcid.read()); 5581 6159 m_cmd_sc_trdid_fifo.simple_put(p_vci_tgt.trdid.read()); … … 5594 6172 } 5595 6173 //////////////////////////////////////////////////////////////////////////////////// 5596 // 6174 // WRITE to INIT_CMD FIFO 5597 6175 //////////////////////////////////////////////////////////////////////////////////// 5598 6176 … … 5621 6199 } 5622 6200 //////////////////////////////////////////////////////////////////////////////////// 5623 // 6201 // XRAM_RSP to INIT_CMD FIFO 5624 6202 //////////////////////////////////////////////////////////////////////////////////// 5625 6203 … … 5648 6226 } 5649 6227 //////////////////////////////////////////////////////////////////////////////////// 5650 // 6228 // SC to INIT_CMD FIFO 5651 6229 //////////////////////////////////////////////////////////////////////////////////// 5652 6230 … … 5704 6282 p_vci_ixr.trdid = r_read_to_ixr_cmd_trdid.read(); 5705 6283 p_vci_ixr.eop = true; 5706 } 6284 } 5707 6285 else if ( r_ixr_cmd_fsm.read() == IXR_CMD_SC_NLINE ) { 5708 6286 if(r_sc_to_ixr_cmd_write.read()){ … … 5723 6301 p_vci_ixr.eop = true; 5724 6302 } 5725 } 6303 } 5726 6304 else if ( r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_NLINE ) { 5727 6305 if(r_write_to_ixr_cmd_write.read()){ … … 5742 6320 p_vci_ixr.eop = true; 5743 6321 } 5744 } 6322 } 5745 6323 else if ( r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_DATA ) { 5746 6324 p_vci_ixr.cmd = vci_param::CMD_WRITE; … … 5757 6335 p_vci_ixr.wdata = 0; 5758 6336 p_vci_ixr.trdid = 0; 5759 p_vci_ixr.eop 6337 p_vci_ixr.eop = false; 5760 6338 } 5761 6339 … … 5765 6343 5766 6344 if ( ((r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) && 5767 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) || 6345 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) || 5768 6346 (r_ixr_rsp_fsm.read() == IXR_RSP_ACK) ) p_vci_ixr.rspack = true; 5769 6347 else p_vci_ixr.rspack = false; … … 5808 6386 p_vci_tgt.rtrdid = 0; 5809 6387 p_vci_tgt.rerror = 0; 5810 p_vci_tgt.reop = false; 6388 p_vci_tgt.reop = false; 5811 6389 break; 5812 6390 case TGT_RSP_READ: … … 5825 6403 p_vci_tgt.rtrdid = r_write_to_tgt_rsp_trdid.read(); 5826 6404 p_vci_tgt.rpktid = r_write_to_tgt_rsp_pktid.read(); 5827 p_vci_tgt.rerror = 0x2 & ( (1 << vci_param::E) - 1); 6405 p_vci_tgt.rerror = 0x2 & ( (1 << vci_param::E) - 1); 5828 6406 p_vci_tgt.reop = true; 5829 6407 break; … … 5853 6431 p_vci_tgt.rpktid = r_xram_rsp_to_tgt_rsp_pktid.read(); 5854 6432 p_vci_tgt.rerror = r_xram_rsp_to_tgt_rsp_rerror.read(); 5855 p_vci_tgt.reop = (( r_tgt_rsp_cpt.read() 5856 5857 6433 p_vci_tgt.reop = (( r_tgt_rsp_cpt.read() 6434 == (r_xram_rsp_to_tgt_rsp_word.read()+r_xram_rsp_to_tgt_rsp_length.read()-1)) 6435 || r_xram_rsp_to_tgt_rsp_rerror.read()); 5858 6436 break; 5859 6437 case TGT_RSP_INIT: … … 5864 6442 p_vci_tgt.rpktid = r_init_rsp_to_tgt_rsp_pktid.read(); 5865 6443 p_vci_tgt.rerror = 0; // Can be a SC rsp 5866 p_vci_tgt.reop = true; 6444 p_vci_tgt.reop = true; 5867 6445 break; 5868 6446 } // end switch r_tgt_rsp_fsm … … 6143 6721 // End: 6144 6722 6145 // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=4:softtabstop=4 6146 6723 // vim: filetype=cpp:expandtab:shiftwidth=2:tabstop=2:softtabstop=2
Note: See TracChangeset
for help on using the changeset viewer.