Changeset 184 for trunk/modules/vci_mem_cache_v4/caba/source
- Timestamp:
- Jan 7, 2012, 7:17:34 PM (13 years ago)
- Location:
- trunk/modules/vci_mem_cache_v4/caba/source
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/modules/vci_mem_cache_v4/caba/source/include/update_tab_v4.h
r2 r184 47 47 size_t i_count) 48 48 { 49 valid 50 update 49 valid = i_valid; 50 update = i_update; 51 51 brdcast = i_brdcast; 52 52 rsp = i_rsp; 53 srcid 54 trdid 55 pktid 56 nline 57 count 53 srcid = i_srcid; 54 trdid = i_trdid; 55 pktid = i_pktid; 56 nline = i_nline; 57 count = i_count; 58 58 } 59 59 … … 156 156 157 157 //////////////////////////////////////////////////////////////////// 158 // The size() function returns the size of the tab158 // The print() function diplays the tab content 159 159 //////////////////////////////////////////////////////////////////// 160 160 void print(){ … … 262 262 263 263 ///////////////////////////////////////////////////////////////////// 264 // The is_not_empty() function returns true if the table is not empty 265 ///////////////////////////////////////////////////////////////////// 266 bool is_not_empty() 267 { 268 for(size_t i = 0 ; i < size_tab ; i++){ 269 if(tab[i].valid){ 270 return true; 271 } 272 } 273 return false; 274 } 275 276 ///////////////////////////////////////////////////////////////////// 264 277 // The need_rsp() function returns the need of a response 265 278 // Arguments : -
trunk/modules/vci_mem_cache_v4/caba/source/include/vci_mem_cache_v4.h
r177 r184 80 80 TGT_CMD_IDLE, 81 81 TGT_CMD_READ, 82 TGT_CMD_READ_EOP,83 82 TGT_CMD_WRITE, 84 83 TGT_CMD_ATOMIC, … … 89 88 TGT_RSP_READ_IDLE, 90 89 TGT_RSP_WRITE_IDLE, 91 TGT_RSP_ LLSC_IDLE,90 TGT_RSP_SC_IDLE, 92 91 TGT_RSP_XRAM_IDLE, 93 92 TGT_RSP_INIT_IDLE, … … 95 94 TGT_RSP_READ, 96 95 TGT_RSP_WRITE, 97 TGT_RSP_ LLSC,96 TGT_RSP_SC, 98 97 TGT_RSP_XRAM, 99 98 TGT_RSP_INIT, … … 139 138 READ_TRT_LOCK, 140 139 READ_TRT_SET, 141 READ_ XRAM_REQ,140 READ_TRT_REQ, 142 141 }; 143 142 … … 197 196 IXR_CMD_READ_IDLE, 198 197 IXR_CMD_WRITE_IDLE, 199 IXR_CMD_ LLSC_IDLE,198 IXR_CMD_SC_IDLE, 200 199 IXR_CMD_XRAM_IDLE, 201 200 IXR_CMD_READ_NLINE, 202 201 IXR_CMD_WRITE_NLINE, 203 IXR_CMD_ LLSC_NLINE,202 IXR_CMD_SC_NLINE, 204 203 IXR_CMD_XRAM_DATA, 205 204 }; 206 205 207 /* States of the LLSC fsm */208 enum llsc_fsm_state_e{209 LLSC_IDLE,206 /* States of the SC fsm */ 207 enum sc_fsm_state_e{ 208 SC_IDLE, 210 209 SC_DIR_LOCK, 211 210 SC_DIR_HIT_READ, … … 215 214 SC_HEAP_LOCK, 216 215 SC_UPT_REQ, 217 SC_UP DATE,218 SC_TRT_ LOCK,216 SC_UPT_NEXT, 217 SC_TRT_PUT_LOCK, 219 218 SC_INVAL_LOCK, 220 219 SC_DIR_INVAL, 221 220 SC_INVAL, 222 SC_ XRAM_SEND,223 SC_RSP_FA LSE,224 SC_RSP_ TRUE,225 LLSC_TRT_LOCK,226 LLSC_TRT_SET,227 LLSC_XRAM_REQ,221 SC_TRT_PUT_REQ, 222 SC_RSP_FAIL, 223 SC_RSP_SUCCESS, 224 SC_TRT_GET_LOCK, 225 SC_TRT_GET_SET, 226 SC_TRT_GET_REQ, 228 227 }; 229 228 … … 247 246 ALLOC_DIR_READ, 248 247 ALLOC_DIR_WRITE, 249 ALLOC_DIR_ LLSC,248 ALLOC_DIR_SC, 250 249 ALLOC_DIR_CLEANUP, 251 250 ALLOC_DIR_XRAM_RSP, … … 256 255 ALLOC_TRT_READ, 257 256 ALLOC_TRT_WRITE, 258 ALLOC_TRT_ LLSC,257 ALLOC_TRT_SC, 259 258 ALLOC_TRT_XRAM_RSP, 260 259 ALLOC_TRT_IXR_RSP, … … 267 266 ALLOC_UPT_INIT_RSP, 268 267 ALLOC_UPT_CLEANUP, 269 ALLOC_UPT_ LLSC,268 ALLOC_UPT_SC, 270 269 }; 271 270 … … 274 273 ALLOC_HEAP_READ, 275 274 ALLOC_HEAP_WRITE, 276 ALLOC_HEAP_ LLSC,275 ALLOC_HEAP_SC, 277 276 ALLOC_HEAP_CLEANUP, 278 277 ALLOC_HEAP_XRAM_RSP, 279 278 }; 280 279 280 // debug variables (for each FSM) 281 size_t m_debug_start_cycle; 282 bool m_debug_ok; 283 bool m_debug_global; 284 bool m_debug_tgt_cmd_fsm; 285 bool m_debug_tgt_rsp_fsm; 286 bool m_debug_init_cmd_fsm; 287 bool m_debug_init_rsp_fsm; 288 bool m_debug_read_fsm; 289 bool m_debug_write_fsm; 290 bool m_debug_sc_fsm; 291 bool m_debug_cleanup_fsm; 292 bool m_debug_ixr_cmd_fsm; 293 bool m_debug_ixr_rsp_fsm; 294 bool m_debug_xram_rsp_fsm; 295 bool m_debug_previous_hit; 296 size_t m_debug_previous_count; 297 298 // instrumentation counters 281 299 uint32_t m_cpt_cycles; // Counter of cycles 282 300 uint32_t m_cpt_read; // Number of READ transactions … … 297 315 uint32_t m_cpt_sc; // Number of SC transactions 298 316 317 size_t m_prev_count; 318 299 319 protected: 300 320 … … 310 330 311 331 VciMemCacheV4( 312 sc_module_name name, // Instance Name 313 const soclib::common::MappingTable &mtp, // Mapping table for primary requets 314 const soclib::common::MappingTable &mtc, // Mapping table for coherence requets 315 const soclib::common::MappingTable &mtx, // Mapping table for XRAM 316 const soclib::common::IntTab &vci_ixr_index, // VCI port to XRAM (initiator) 317 const soclib::common::IntTab &vci_ini_index, // VCI port to PROC (initiator) 318 const soclib::common::IntTab &vci_tgt_index, // VCI port to PROC (target) 319 const soclib::common::IntTab &vci_tgt_index_cleanup, // VCI port to PROC (target) for cleanup 320 size_t nways, // Number of ways per set 321 size_t nsets, // Number of sets 322 size_t nwords, // Number of words per line 323 size_t heap_size=1024, // Size of the heap 324 size_t transaction_tab_lines=TRANSACTION_TAB_LINES,// Size of the TRT 325 size_t update_tab_lines=UPDATE_TAB_LINES // Size of the UPT 326 ); 332 sc_module_name name, // Instance Name 333 const soclib::common::MappingTable &mtp, // Mapping table for primary requets 334 const soclib::common::MappingTable &mtc, // Mapping table for coherence requets 335 const soclib::common::MappingTable &mtx, // Mapping table for XRAM 336 const soclib::common::IntTab &vci_ixr_index, // VCI port to XRAM (initiator) 337 const soclib::common::IntTab &vci_ini_index, // VCI port to PROC (initiator) 338 const soclib::common::IntTab &vci_tgt_index, // VCI port to PROC (target) 339 const soclib::common::IntTab &vci_tgt_index_cleanup,// VCI port to PROC (target) for cleanup 340 size_t nways, // Number of ways per set 341 size_t nsets, // Number of sets 342 size_t nwords, // Number of words per line 343 size_t heap_size=1024, // Size of the heap 344 size_t transaction_tab_lines=TRANSACTION_TAB_LINES, // Size of the TRT 345 size_t update_tab_lines=UPDATE_TAB_LINES, // Size of the UPT 346 size_t debug_start_cycle=0, 347 bool debug_ok=false); 327 348 328 349 ~VciMemCacheV4(); 329 350 330 351 void transition(); 331 332 352 void genMoore(); 333 334 353 void print_stats(); 335 336 354 void print_trace(); 355 void cache_monitor(vci_addr_t addr); 337 356 338 357 private: … … 343 362 const size_t m_ways; // Number of ways in a set 344 363 const size_t m_sets; // Number of cache sets 345 const size_t m_words; // Number of words in a line346 const size_t m_srcid_ixr; // Srcid for requests to XRAM347 const size_t m_srcid_ini; // Srcid for requests to processors364 const size_t m_words; // Number of words in a line 365 const size_t m_srcid_ixr; // Srcid for requests to XRAM 366 const size_t m_srcid_ini; // Srcid for requests to processors 348 367 std::list<soclib::common::Segment> m_seglist; // memory cached into the cache 349 368 std::list<soclib::common::Segment> m_cseglist; // coherence segment for the cache 350 369 vci_addr_t *m_coherence_table; // address(srcid) 351 370 uint32_t m_transaction_tab_lines; 352 TransactionTab m_transaction_tab; // xram transaction table371 TransactionTab m_transaction_tab; // xram transaction table 353 372 uint32_t m_update_tab_lines; 354 UpdateTab m_update_tab; // pending update & invalidate355 CacheDirectory m_cache_directory; // data cache directory356 HeapDirectory m_heap _directory; // heap directory357 358 data_t ***m_cache_data; // data array[set][way][word]373 UpdateTab m_update_tab; // pending update & invalidate 374 CacheDirectory m_cache_directory; // data cache directory 375 HeapDirectory m_heap; // heap for copies 376 377 data_t ***m_cache_data; // data array[set][way][word] 359 378 360 379 // adress masks … … 371 390 ////////////////////////////////////////////////// 372 391 sc_signal<size_t> r_copies_limit; // Limit of the number of copies for one line 392 sc_signal<size_t> xxx_count; 373 393 374 394 ////////////////////////////////////////////////// … … 392 412 GenericFifo<be_t> m_cmd_write_be_fifo; 393 413 394 // Fifo between TGT_CMD fsm and LLSC fsm395 GenericFifo<uint64_t> m_cmd_ llsc_addr_fifo;396 GenericFifo<bool> m_cmd_ llsc_eop_fifo;397 GenericFifo<size_t> m_cmd_ llsc_srcid_fifo;398 GenericFifo<size_t> m_cmd_ llsc_trdid_fifo;399 GenericFifo<size_t> m_cmd_ llsc_pktid_fifo;400 GenericFifo<data_t> m_cmd_ llsc_wdata_fifo;414 // Fifo between TGT_CMD fsm and SC fsm 415 GenericFifo<uint64_t> m_cmd_sc_addr_fifo; 416 GenericFifo<bool> m_cmd_sc_eop_fifo; 417 GenericFifo<size_t> m_cmd_sc_srcid_fifo; 418 GenericFifo<size_t> m_cmd_sc_trdid_fifo; 419 GenericFifo<size_t> m_cmd_sc_pktid_fifo; 420 GenericFifo<data_t> m_cmd_sc_wdata_fifo; 401 421 402 422 sc_signal<int> r_tgt_cmd_fsm; … … 555 575 556 576 /////////////////////////////////////////////////////// 557 // Registers controlled by LLSC fsm577 // Registers controlled by SC fsm 558 578 /////////////////////////////////////////////////////// 559 579 560 sc_signal<int> r_ llsc_fsm;// FSM state561 sc_signal<data_t> r_ llsc_wdata;// write data word562 sc_signal<data_t> *r_ llsc_rdata;// read data word563 sc_signal<uint32_t> r_ llsc_lfsr;// lfsr for random introducing564 sc_signal<size_t> r_ llsc_cpt;// size of command565 sc_signal<copy_t> r_ llsc_copy;// Srcid of the first copy566 sc_signal<copy_t> r_ llsc_copy_cache;// Srcid of the first copy567 sc_signal<bool> r_ llsc_copy_inst;// Type of the first copy568 sc_signal<size_t> r_ llsc_count;// number of copies569 sc_signal<size_t> r_ llsc_ptr;// pointer to the heap570 sc_signal<size_t> r_ llsc_next_ptr;// next pointer to the heap571 sc_signal<bool> r_ llsc_is_cnt;// is_cnt bit (in directory)572 sc_signal<bool> r_ llsc_dirty;// dirty bit (in directory)573 sc_signal<size_t> r_ llsc_way;// way in directory574 sc_signal<size_t> r_ llsc_set;// set in directory575 sc_signal<data_t> r_ llsc_tag;// cache line tag (in directory)576 sc_signal<size_t> r_ llsc_trt_index;// Transaction Table index577 sc_signal<size_t> r_ llsc_upt_index;// Update Table index578 579 // Buffer between LLSC fsm and INIT_CMD fsm (XRAM read)580 sc_signal<bool> r_ llsc_to_ixr_cmd_req;// valid request581 sc_signal<addr_t> r_ llsc_to_ixr_cmd_nline;// cache line index582 sc_signal<size_t> r_ llsc_to_ixr_cmd_trdid;// index in Transaction Table583 sc_signal<bool> r_ llsc_to_ixr_cmd_write;// write request584 sc_signal<data_t> *r_ llsc_to_ixr_cmd_data;// cache line data585 586 587 // Buffer between LLSC fsm and TGT_RSP fsm588 sc_signal<bool> r_ llsc_to_tgt_rsp_req;// valid request589 sc_signal<data_t> r_ llsc_to_tgt_rsp_data;// read data word590 sc_signal<size_t> r_ llsc_to_tgt_rsp_srcid;// Transaction srcid591 sc_signal<size_t> r_ llsc_to_tgt_rsp_trdid;// Transaction trdid592 sc_signal<size_t> r_ llsc_to_tgt_rsp_pktid;// Transaction pktid593 594 // Buffer between LLSC fsm and INIT_CMD fsm (Update/Invalidate L1 caches)595 sc_signal<bool> r_ llsc_to_init_cmd_multi_req;// valid request596 sc_signal<bool> r_ llsc_to_init_cmd_brdcast_req;// brdcast request597 sc_signal<addr_t> r_ llsc_to_init_cmd_nline;// cache line index598 sc_signal<size_t> r_ llsc_to_init_cmd_trdid;// index in Update Table599 sc_signal<data_t> r_ llsc_to_init_cmd_wdata;// data (one word)600 sc_signal<bool> r_ llsc_to_init_cmd_is_long;// it is a 64 bits SC601 sc_signal<data_t> r_ llsc_to_init_cmd_wdata_high;// data high (one word)602 sc_signal<size_t> r_ llsc_to_init_cmd_index;// index of the word in line603 GenericFifo<bool> m_ llsc_to_init_cmd_inst_fifo;// fifo for the L1 type604 GenericFifo<size_t> m_ llsc_to_init_cmd_srcid_fifo;// fifo for srcids605 GenericFifo<size_t> m_ llsc_to_init_cmd_cache_id_fifo;// fifo for srcids606 607 // Buffer between LLSC fsm and INIT_RSP fsm (Decrement UPT entry)608 sc_signal<bool> r_ llsc_to_init_rsp_req;// valid request609 sc_signal<size_t> r_ llsc_to_init_rsp_upt_index;// index in update table580 sc_signal<int> r_sc_fsm; // FSM state 581 sc_signal<data_t> r_sc_wdata; // write data word 582 sc_signal<data_t> *r_sc_rdata; // read data word 583 sc_signal<uint32_t> r_sc_lfsr; // lfsr for random introducing 584 sc_signal<size_t> r_sc_cpt; // size of command 585 sc_signal<copy_t> r_sc_copy; // Srcid of the first copy 586 sc_signal<copy_t> r_sc_copy_cache; // Srcid of the first copy 587 sc_signal<bool> r_sc_copy_inst; // Type of the first copy 588 sc_signal<size_t> r_sc_count; // number of copies 589 sc_signal<size_t> r_sc_ptr; // pointer to the heap 590 sc_signal<size_t> r_sc_next_ptr; // next pointer to the heap 591 sc_signal<bool> r_sc_is_cnt; // is_cnt bit (in directory) 592 sc_signal<bool> r_sc_dirty; // dirty bit (in directory) 593 sc_signal<size_t> r_sc_way; // way in directory 594 sc_signal<size_t> r_sc_set; // set in directory 595 sc_signal<data_t> r_sc_tag; // cache line tag (in directory) 596 sc_signal<size_t> r_sc_trt_index; // Transaction Table index 597 sc_signal<size_t> r_sc_upt_index; // Update Table index 598 599 // Buffer between SC fsm and INIT_CMD fsm (XRAM read) 600 sc_signal<bool> r_sc_to_ixr_cmd_req; // valid request 601 sc_signal<addr_t> r_sc_to_ixr_cmd_nline; // cache line index 602 sc_signal<size_t> r_sc_to_ixr_cmd_trdid; // index in Transaction Table 603 sc_signal<bool> r_sc_to_ixr_cmd_write; // write request 604 sc_signal<data_t> *r_sc_to_ixr_cmd_data; // cache line data 605 606 607 // Buffer between SC fsm and TGT_RSP fsm 608 sc_signal<bool> r_sc_to_tgt_rsp_req; // valid request 609 sc_signal<data_t> r_sc_to_tgt_rsp_data; // read data word 610 sc_signal<size_t> r_sc_to_tgt_rsp_srcid; // Transaction srcid 611 sc_signal<size_t> r_sc_to_tgt_rsp_trdid; // Transaction trdid 612 sc_signal<size_t> r_sc_to_tgt_rsp_pktid; // Transaction pktid 613 614 // Buffer between SC fsm and INIT_CMD fsm (Update/Invalidate L1 caches) 615 sc_signal<bool> r_sc_to_init_cmd_multi_req; // valid request 616 sc_signal<bool> r_sc_to_init_cmd_brdcast_req; // brdcast request 617 sc_signal<addr_t> r_sc_to_init_cmd_nline; // cache line index 618 sc_signal<size_t> r_sc_to_init_cmd_trdid; // index in Update Table 619 sc_signal<data_t> r_sc_to_init_cmd_wdata; // data (one word) 620 sc_signal<bool> r_sc_to_init_cmd_is_long; // it is a 64 bits SC 621 sc_signal<data_t> r_sc_to_init_cmd_wdata_high; // data high (one word) 622 sc_signal<size_t> r_sc_to_init_cmd_index; // index of the word in line 623 GenericFifo<bool> m_sc_to_init_cmd_inst_fifo; // fifo for the L1 type 624 GenericFifo<size_t> m_sc_to_init_cmd_srcid_fifo; // fifo for srcids 625 GenericFifo<size_t> m_sc_to_init_cmd_cache_id_fifo; // fifo for srcids 626 627 // Buffer between SC fsm and INIT_RSP fsm (Decrement UPT entry) 628 sc_signal<bool> r_sc_to_init_rsp_req; // valid request 629 sc_signal<size_t> r_sc_to_init_rsp_upt_index; // index in update table 610 630 611 631 //////////////////////////////////////////////////// -
trunk/modules/vci_mem_cache_v4/caba/source/include/xram_transaction_v4.h
r138 r184 14 14 15 15 class TransactionTabEntry { 16 typedef uint32_t size_t;17 typedef uint32_t data_t;18 typedef sc_dt::sc_uint<40> addr_t;19 typedef uint32_t be_t;16 typedef uint32_t size_t; 17 typedef uint32_t data_t; 18 typedef sc_dt::sc_uint<40> addr_t; 19 typedef uint32_t be_t; 20 20 21 21 public: 22 bool valid; // entry valid23 bool xram_read; // read request to XRAM24 addr_t nline; // index (zy) of the requested line25 size_t srcid; // processor requesting the transaction26 size_t trdid; // processor requesting the transaction27 size_t pktid; // processor requesting the transaction28 bool proc_read; // read request from processor29 size_t read_length;// length of the read (for the response)30 size_t word_index; // index of the first read word (for the response)31 std::vector<data_t> wdata;// write buffer (one cache line)32 std::vector<be_t> wdata_be; // be for each data in the write buffer33 bool rerror;// error returned by xram22 bool valid; // entry valid 23 bool xram_read; // read request to XRAM 24 addr_t nline; // index (zy) of the requested line 25 size_t srcid; // processor requesting the transaction 26 size_t trdid; // processor requesting the transaction 27 size_t pktid; // processor requesting the transaction 28 bool proc_read; // read request from processor 29 size_t read_length; // length of the read (for the response) 30 size_t word_index; // index of the first read word (for the response) 31 std::vector<data_t> wdata; // write buffer (one cache line) 32 std::vector<be_t> wdata_be; // be for each data in the write buffer 33 bool rerror; // error returned by xram 34 34 35 35 ///////////////////////////////////////////////////////////////////// … … 39 39 { 40 40 valid = false; 41 rerror = false; 41 42 } 42 43 … … 50 51 wdata_be.reserve( (int)n_words ); 51 52 wdata.reserve( (int)n_words ); 52 for(size_t i=0; i<n_words; i++){ 53 wdata_be.push_back(false); 53 for(size_t i=0; i<n_words; i++) 54 { 55 wdata_be.push_back(0); 54 56 wdata.push_back(0); 55 57 } … … 352 354 tab[index].read_length = read_length; 353 355 tab[index].word_index = word_index; 354 for(size_t i=0; i<tab[index].wdata.size(); i++) { 356 for(size_t i=0; i<tab[index].wdata.size(); i++) 357 { 355 358 tab[index].wdata_be[i] = data_be[i]; 356 359 tab[index].wdata[i] = data[i]; … … 361 364 // The write_rsp() function writes a word of the response to an 362 365 // XRAM read transaction. 363 // The data is only written when the corresponding BE field is Ox0.366 // The BE field in TRT is taken into account. 364 367 // Arguments : 365 368 // - index : the index of the transaction in the transaction tab 366 369 // - word_index : the index of the data in the line 367 370 // - data : the data to write 371 // - error : invalid data 368 372 ///////////////////////////////////////////////////////////////////// 369 373 void write_rsp(const size_t index, … … 396 400 && "The selected entry is out of range in erase() Transaction Tab"); 397 401 tab[index].valid = false; 402 tab[index].rerror = false; 398 403 } 399 404 }; // end class TransactionTab -
trunk/modules/vci_mem_cache_v4/caba/source/src/vci_mem_cache_v4.cpp
r175 r184 1 /* -*- c++ -*-1 /* -*- c++ -*- 2 2 * File : vci_mem_cache_v4.cpp 3 3 * Date : 30/10/2008 … … 27 27 * Maintainers: alain eric.guthmuller@polytechnique.edu 28 28 */ 29 29 30 #include "../include/vci_mem_cache_v4.h" 30 31 31 #define DEBUG_VCI_MEM_CACHE 0 32 #define DEBUG_START_CYCLE 1013300 33 #define RANDOMIZE_SC 34 35 #define ASSERT_VERBOSE 36 #define ASSERT_NCYCLES m_cpt_cycles 37 38 #if DEBUG_VCI_MEM_CACHE 39 #define TDEBUG // Transaction tab debug 40 #define IDEBUG // Update tab debug 41 #define DDEBUG // Directory debug 42 #define LOCK_DEBUG // Lock debug 43 #endif 44 45 46 #include "debug.h" 47 48 #if DEBUG_VCI_MEM_CACHE 49 # define PRINTF(msg...) PRINTF_COND(m_cpt_cycles > DEBUG_START_CYCLE,msg) 50 #else 51 # define PRINTF(msg...) 52 #endif 32 ////// debug services /////////////////////////////////////////////////////// 33 // All debug messages are conditionned by two variables: 34 // - compile time : DEBUG_MEMC_*** : defined below 35 // - execution time : m_debug_*** : defined by constructor arguments 36 // m_debug_* = (m_debug_ok) and (m_cpt_cycle > m_debug_start_cycle) 37 ///////////////////////////////////////////////////////////////////////////////// 38 39 #define DEBUG_MEMC_GLOBAL 0 // synthetic trace of all FSMs 40 #define DEBUG_MEMC_READ 1 // detailed trace of READ FSM 41 #define DEBUG_MEMC_WRITE 1 // detailed trace of WRITE FSM 42 #define DEBUG_MEMC_SC 0 // detailed trace of SC FSM 43 #define DEBUG_MEMC_IXR_CMD 1 // detailed trace of IXR_RSP FSM 44 #define DEBUG_MEMC_IXR_RSP 1 // detailed trace of IXR_RSP FSM 45 #define DEBUG_MEMC_XRAM_RSP 1 // detailed trace of XRAM_RSP FSM 46 #define DEBUG_MEMC_INIT_CMD 0 // detailed trace of INIT_CMD FSM 47 #define DEBUG_MEMC_INIT_RSP 0 // detailed trace of INIT_RSP FSM 48 #define DEBUG_MEMC_TGT_CMD 0 // detailed trace of TGT_CMD FSM 49 #define DEBUG_MEMC_TGT_RSP 0 // detailed trace of TGT_RSP FSM 50 #define DEBUG_MEMC_CLEANUP 0 // detailed trace of CLEANUP FSM 51 52 #define RANDOMIZE_SC 1 53 53 54 54 namespace soclib { namespace caba { 55 55 56 56 const char *tgt_cmd_fsm_str[] = { 57 "TGT_CMD_IDLE ", 58 "TGT_CMD_READ ", 59 "TGT_CMD_READ_EOP", 60 "TGT_CMD_WRITE ", 61 "TGT_CMD_ATOMIC ", 57 "TGT_CMD_IDLE", 58 "TGT_CMD_READ", 59 "TGT_CMD_WRITE", 60 "TGT_CMD_ATOMIC", 62 61 }; 63 62 const char *tgt_rsp_fsm_str[] = { 64 "TGT_RSP_READ_IDLE 65 "TGT_RSP_WRITE_IDLE 66 "TGT_RSP_ LLSC_IDLE",67 "TGT_RSP_XRAM_IDLE 68 "TGT_RSP_INIT_IDLE 63 "TGT_RSP_READ_IDLE", 64 "TGT_RSP_WRITE_IDLE", 65 "TGT_RSP_SC_IDLE", 66 "TGT_RSP_XRAM_IDLE", 67 "TGT_RSP_INIT_IDLE", 69 68 "TGT_RSP_CLEANUP_IDLE", 70 "TGT_RSP_READ 71 "TGT_RSP_WRITE 72 "TGT_RSP_ LLSC",73 "TGT_RSP_XRAM 74 "TGT_RSP_INIT 75 "TGT_RSP_CLEANUP 69 "TGT_RSP_READ", 70 "TGT_RSP_WRITE", 71 "TGT_RSP_SC", 72 "TGT_RSP_XRAM", 73 "TGT_RSP_INIT", 74 "TGT_RSP_CLEANUP", 76 75 }; 77 76 const char *init_cmd_fsm_str[] = { 78 "INIT_CMD_INVAL_IDLE 79 "INIT_CMD_INVAL_NLINE 80 "INIT_CMD_XRAM_BRDCAST 81 "INIT_CMD_UPDT_IDLE 82 "INIT_CMD_WRITE_BRDCAST 83 "INIT_CMD_UPDT_NLINE 84 "INIT_CMD_UPDT_INDEX 85 "INIT_CMD_UPDT_DATA 86 "INIT_CMD_SC_UPDT_IDLE 87 "INIT_CMD_SC_BRDCAST 88 "INIT_CMD_SC_UPDT_NLINE 89 "INIT_CMD_SC_UPDT_INDEX 90 "INIT_CMD_SC_UPDT_DATA 77 "INIT_CMD_INVAL_IDLE", 78 "INIT_CMD_INVAL_NLINE", 79 "INIT_CMD_XRAM_BRDCAST", 80 "INIT_CMD_UPDT_IDLE", 81 "INIT_CMD_WRITE_BRDCAST", 82 "INIT_CMD_UPDT_NLINE", 83 "INIT_CMD_UPDT_INDEX", 84 "INIT_CMD_UPDT_DATA", 85 "INIT_CMD_SC_UPDT_IDLE", 86 "INIT_CMD_SC_BRDCAST", 87 "INIT_CMD_SC_UPDT_NLINE", 88 "INIT_CMD_SC_UPDT_INDEX", 89 "INIT_CMD_SC_UPDT_DATA", 91 90 "INIT_CMD_SC_UPDT_DATA_HIGH", 92 91 }; 93 92 const char *init_rsp_fsm_str[] = { 94 "INIT_RSP_IDLE 95 "INIT_RSP_UPT_LOCK 93 "INIT_RSP_IDLE", 94 "INIT_RSP_UPT_LOCK", 96 95 "INIT_RSP_UPT_CLEAR", 97 "INIT_RSP_END 96 "INIT_RSP_END", 98 97 }; 99 98 const char *read_fsm_str[] = { 100 "READ_IDLE 101 "READ_DIR_LOCK 102 "READ_DIR_HIT 103 "READ_HEAP_LOCK 99 "READ_IDLE", 100 "READ_DIR_LOCK", 101 "READ_DIR_HIT", 102 "READ_HEAP_LOCK", 104 103 "READ_HEAP_WRITE", 105 104 "READ_HEAP_ERASE", 106 "READ_HEAP_LAST 107 "READ_RSP 108 "READ_TRT_LOCK 109 "READ_TRT_SET 110 "READ_ XRAM_REQ",105 "READ_HEAP_LAST", 106 "READ_RSP", 107 "READ_TRT_LOCK", 108 "READ_TRT_SET", 109 "READ_TRT_REQ", 111 110 }; 112 111 const char *write_fsm_str[] = { 113 "WRITE_IDLE 114 "WRITE_NEXT 115 "WRITE_DIR_LOCK 116 "WRITE_DIR_HIT_READ 117 "WRITE_DIR_HIT 118 "WRITE_UPT_LOCK 119 "WRITE_HEAP_LOCK 120 "WRITE_UPT_REQ 121 "WRITE_UPDATE 122 "WRITE_UPT_DEC 123 "WRITE_RSP 124 "WRITE_TRT_LOCK 125 "WRITE_TRT_DATA 126 "WRITE_TRT_SET 127 "WRITE_WAIT 128 "WRITE_XRAM_REQ 112 "WRITE_IDLE", 113 "WRITE_NEXT", 114 "WRITE_DIR_LOCK", 115 "WRITE_DIR_HIT_READ", 116 "WRITE_DIR_HIT", 117 "WRITE_UPT_LOCK", 118 "WRITE_HEAP_LOCK", 119 "WRITE_UPT_REQ", 120 "WRITE_UPDATE", 121 "WRITE_UPT_DEC", 122 "WRITE_RSP", 123 "WRITE_TRT_LOCK", 124 "WRITE_TRT_DATA", 125 "WRITE_TRT_SET", 126 "WRITE_WAIT", 127 "WRITE_XRAM_REQ", 129 128 "WRITE_TRT_WRITE_LOCK", 130 "WRITE_INVAL_LOCK 131 "WRITE_DIR_INVAL 132 "WRITE_INVAL 133 "WRITE_XRAM_SEND 129 "WRITE_INVAL_LOCK", 130 "WRITE_DIR_INVAL", 131 "WRITE_INVAL", 132 "WRITE_XRAM_SEND", 134 133 }; 135 134 const char *ixr_rsp_fsm_str[] = { 136 "IXR_RSP_IDLE 137 "IXR_RSP_ACK 135 "IXR_RSP_IDLE", 136 "IXR_RSP_ACK", 138 137 "IXR_RSP_TRT_ERASE", 139 "IXR_RSP_TRT_READ 138 "IXR_RSP_TRT_READ", 140 139 }; 141 140 const char *xram_rsp_fsm_str[] = { 142 "XRAM_RSP_IDLE 143 "XRAM_RSP_TRT_COPY 144 "XRAM_RSP_TRT_DIRTY 145 "XRAM_RSP_DIR_LOCK 146 "XRAM_RSP_DIR_UPDT 147 "XRAM_RSP_DIR_RSP 148 "XRAM_RSP_INVAL_LOCK 149 "XRAM_RSP_INVAL_WAIT 150 "XRAM_RSP_INVAL 141 "XRAM_RSP_IDLE", 142 "XRAM_RSP_TRT_COPY", 143 "XRAM_RSP_TRT_DIRTY", 144 "XRAM_RSP_DIR_LOCK", 145 "XRAM_RSP_DIR_UPDT", 146 "XRAM_RSP_DIR_RSP", 147 "XRAM_RSP_INVAL_LOCK", 148 "XRAM_RSP_INVAL_WAIT", 149 "XRAM_RSP_INVAL", 151 150 "XRAM_RSP_WRITE_DIRTY", 152 "XRAM_RSP_HEAP_ERASE 153 "XRAM_RSP_HEAP_LAST 151 "XRAM_RSP_HEAP_ERASE", 152 "XRAM_RSP_HEAP_LAST", 154 153 "XRAM_RSP_ERROR_ERASE", 155 "XRAM_RSP_ERROR_RSP 154 "XRAM_RSP_ERROR_RSP", 156 155 }; 157 156 const char *ixr_cmd_fsm_str[] = { 158 "IXR_CMD_READ_IDLE 159 "IXR_CMD_WRITE_IDLE 160 "IXR_CMD_ LLSC_IDLE",161 "IXR_CMD_XRAM_IDLE 162 "IXR_CMD_READ_NLINE 163 "IXR_CMD_WRITE_NLINE 164 "IXR_CMD_ LLSC_NLINE",165 "IXR_CMD_XRAM_DATA 157 "IXR_CMD_READ_IDLE", 158 "IXR_CMD_WRITE_IDLE", 159 "IXR_CMD_SC_IDLE", 160 "IXR_CMD_XRAM_IDLE", 161 "IXR_CMD_READ_NLINE", 162 "IXR_CMD_WRITE_NLINE", 163 "IXR_CMD_SC_NLINE", 164 "IXR_CMD_XRAM_DATA", 166 165 }; 167 const char * llsc_fsm_str[] = {168 " LLSC_IDLE",169 "SC_DIR_LOCK 170 "SC_DIR_HIT_READ 166 const char *sc_fsm_str[] = { 167 "SC_IDLE", 168 "SC_DIR_LOCK", 169 "SC_DIR_HIT_READ", 171 170 "SC_DIR_HIT_WRITE", 172 "SC_UPT_LOCK 173 "SC_WAIT 174 "SC_HEAP_LOCK 175 "SC_UPT_REQ 176 "SC_UP DATE",177 "SC_TRT_ LOCK",178 "SC_INVAL_LOCK 179 "SC_DIR_INVAL 180 "SC_INVAL 181 "SC_ XRAM_SEND",182 "SC_RSP_FA LSE",183 "SC_RSP_ TRUE",184 " LLSC_TRT_LOCK",185 " LLSC_TRT_SET",186 " LLSC_XRAM_REQ",171 "SC_UPT_LOCK", 172 "SC_WAIT", 173 "SC_HEAP_LOCK", 174 "SC_UPT_REQ", 175 "SC_UPT_NEXT", 176 "SC_TRT_PUT_LOCK", 177 "SC_INVAL_LOCK", 178 "SC_DIR_INVAL", 179 "SC_INVAL", 180 "SC_TRT_PUT_REQ", 181 "SC_RSP_FAIL", 182 "SC_RSP_SUCCESS", 183 "SC_TRT_GET_LOCK", 184 "SC_TRT_GET_SET", 185 "SC_TRT_GET_REQ", 187 186 }; 188 187 const char *cleanup_fsm_str[] = { 189 "CLEANUP_IDLE 190 "CLEANUP_DIR_LOCK 191 "CLEANUP_DIR_WRITE 192 "CLEANUP_HEAP_LOCK 188 "CLEANUP_IDLE", 189 "CLEANUP_DIR_LOCK", 190 "CLEANUP_DIR_WRITE", 191 "CLEANUP_HEAP_LOCK", 193 192 "CLEANUP_HEAP_SEARCH", 194 "CLEANUP_HEAP_CLEAN 195 "CLEANUP_HEAP_FREE 196 "CLEANUP_UPT_LOCK 197 "CLEANUP_UPT_WRITE 198 "CLEANUP_WRITE_RSP 199 "CLEANUP_RSP 193 "CLEANUP_HEAP_CLEAN", 194 "CLEANUP_HEAP_FREE", 195 "CLEANUP_UPT_LOCK", 196 "CLEANUP_UPT_WRITE", 197 "CLEANUP_WRITE_RSP", 198 "CLEANUP_RSP", 200 199 }; 201 200 const char *alloc_dir_fsm_str[] = { 202 "ALLOC_DIR_READ 203 "ALLOC_DIR_WRITE 204 "ALLOC_DIR_ LLSC",205 "ALLOC_DIR_CLEANUP 201 "ALLOC_DIR_READ", 202 "ALLOC_DIR_WRITE", 203 "ALLOC_DIR_SC", 204 "ALLOC_DIR_CLEANUP", 206 205 "ALLOC_DIR_XRAM_RSP", 207 206 }; 208 207 const char *alloc_trt_fsm_str[] = { 209 "ALLOC_TRT_READ 210 "ALLOC_TRT_WRITE 211 "ALLOC_TRT_ LLSC",208 "ALLOC_TRT_READ", 209 "ALLOC_TRT_WRITE", 210 "ALLOC_TRT_SC", 212 211 "ALLOC_TRT_XRAM_RSP", 213 "ALLOC_TRT_IXR_RSP 212 "ALLOC_TRT_IXR_RSP", 214 213 }; 215 214 const char *alloc_upt_fsm_str[] = { 216 "ALLOC_UPT_WRITE 215 "ALLOC_UPT_WRITE", 217 216 "ALLOC_UPT_XRAM_RSP", 218 217 "ALLOC_UPT_INIT_RSP", 219 "ALLOC_UPT_CLEANUP 218 "ALLOC_UPT_CLEANUP", 220 219 }; 221 220 const char *alloc_heap_fsm_str[] = { 222 "ALLOC_HEAP_READ 223 "ALLOC_HEAP_WRITE 224 "ALLOC_HEAP_ LLSC",225 "ALLOC_HEAP_CLEANUP 221 "ALLOC_HEAP_READ", 222 "ALLOC_HEAP_WRITE", 223 "ALLOC_HEAP_SC", 224 "ALLOC_HEAP_CLEANUP", 226 225 "ALLOC_HEAP_XRAM_RSP", 227 226 }; … … 244 243 const soclib::common::IntTab &vci_tgt_index, 245 244 const soclib::common::IntTab &vci_tgt_index_cleanup, 246 size_t nways, 247 size_t nsets, 248 size_t nwords, 249 size_t heap_size, 250 size_t transaction_tab_lines, 251 size_t update_tab_lines) 245 size_t nways, // number of ways per set 246 size_t nsets, // number of cache sets 247 size_t nwords, // number of words in cache line 248 size_t heap_size, // number of heap entries 249 size_t transaction_tab_lines, // number of TRT entries 250 size_t update_tab_lines, // number of UPT entries 251 size_t debug_start_cycle, 252 bool debug_ok) 252 253 253 254 : soclib::caba::BaseModule(name), 255 256 m_debug_start_cycle( debug_start_cycle), 257 m_debug_ok ( debug_ok ), 254 258 255 259 p_clk("clk"), … … 275 279 m_update_tab( update_tab_lines ), 276 280 m_cache_directory( nways, nsets, nwords, vci_param::N ), 277 m_heap_directory( m_heap_size ), 281 m_heap( m_heap_size ), 282 278 283 #define L2 soclib::common::uint32_log2 279 284 m_x( L2(m_words), 2), … … 298 303 m_cmd_write_be_fifo("m_cmd_write_be_fifo",8), 299 304 300 m_cmd_ llsc_addr_fifo("m_cmd_llsc_addr_fifo",4),301 m_cmd_ llsc_eop_fifo("m_cmd_llsc_eop_fifo",4),302 m_cmd_ llsc_srcid_fifo("m_cmd_llsc_srcid_fifo",4),303 m_cmd_ llsc_trdid_fifo("m_cmd_llsc_trdid_fifo",4),304 m_cmd_ llsc_pktid_fifo("m_cmd_llsc_pktid_fifo",4),305 m_cmd_ llsc_wdata_fifo("m_cmd_llsc_wdata_fifo",4),305 m_cmd_sc_addr_fifo("m_cmd_sc_addr_fifo",4), 306 m_cmd_sc_eop_fifo("m_cmd_sc_eop_fifo",4), 307 m_cmd_sc_srcid_fifo("m_cmd_sc_srcid_fifo",4), 308 m_cmd_sc_trdid_fifo("m_cmd_sc_trdid_fifo",4), 309 m_cmd_sc_pktid_fifo("m_cmd_sc_pktid_fifo",4), 310 m_cmd_sc_wdata_fifo("m_cmd_sc_wdata_fifo",4), 306 311 307 312 r_tgt_cmd_fsm("r_tgt_cmd_fsm"), … … 319 324 r_init_rsp_fsm("r_init_rsp_fsm"), 320 325 r_cleanup_fsm("r_cleanup_fsm"), 321 r_ llsc_fsm("r_llsc_fsm"),322 m_ llsc_to_init_cmd_inst_fifo("m_llsc_to_init_cmd_inst_fifo",8),323 m_ llsc_to_init_cmd_srcid_fifo("m_llsc_to_init_cmd_srcid_fifo",8),326 r_sc_fsm("r_sc_fsm"), 327 m_sc_to_init_cmd_inst_fifo("m_sc_to_init_cmd_inst_fifo",8), 328 m_sc_to_init_cmd_srcid_fifo("m_sc_to_init_cmd_srcid_fifo",8), 324 329 #if L1_MULTI_CACHE 325 m_ llsc_to_init_cmd_cache_id_fifo("m_llsc_to_init_cmd_cache_id_fifo",8),330 m_sc_to_init_cmd_cache_id_fifo("m_sc_to_init_cmd_cache_id_fifo",8), 326 331 #endif 327 332 r_ixr_rsp_fsm("r_ixr_rsp_fsm"), … … 346 351 assert(nways); 347 352 353 // check Transaction table size 354 assert( (uint32_log2(transaction_tab_lines) <= vci_param::T) and 355 "Need more bits for VCI TRDID field"); 356 348 357 // Set the broadcast address with Xmin,Xmax,Ymin,Ymax set to maximum 349 358 m_broadcast_address = 0x3 | (0x7C1F << (vci_param::N-20)); … … 405 414 r_write_to_ixr_cmd_data = new sc_signal<data_t>[nwords]; 406 415 407 // Allocation for LLSC FSM408 r_ llsc_to_ixr_cmd_data= new sc_signal<data_t>[nwords];409 r_ llsc_rdata= new sc_signal<data_t>[2];416 // Allocation for SC FSM 417 r_sc_to_ixr_cmd_data = new sc_signal<data_t>[nwords]; 418 r_sc_rdata = new sc_signal<data_t>[2]; 410 419 411 420 … … 422 431 } // end constructor 423 432 424 ////////////////////////////////////////////////// 425 // This function prints a trace of internal states 426 ////////////////////////////////////////////////// 427 428 tmpl(void)::print_trace() 429 { 430 std::cout << "MEM_CACHE " << name() << std::endl; 431 std::cout << " / " << tgt_cmd_fsm_str[r_tgt_cmd_fsm] 432 << " / " << read_fsm_str[r_read_fsm] 433 << " / " << write_fsm_str[r_write_fsm] 434 << " / " << tgt_rsp_fsm_str[r_tgt_rsp_fsm] 435 << " / " << init_cmd_fsm_str[r_init_cmd_fsm] 436 << " / " << init_rsp_fsm_str[r_init_rsp_fsm] << std::endl; 437 } 438 439 ///////////////////////////////////////// 440 // This function prints the statistics 441 ///////////////////////////////////////// 442 443 tmpl(void)::print_stats() 444 { 433 ///////////////////////////////////////////////////// 434 tmpl(void)::cache_monitor( vci_addr_t addr ) 435 ///////////////////////////////////////////////////// 436 { 437 size_t way = 0; 438 DirectoryEntry entry = m_cache_directory.read(addr, way); 439 if ( (entry.count != m_debug_previous_count) or 440 (entry.valid != m_debug_previous_hit) ) 441 { 442 std::cout << " MEMC " << name() 443 << " cache change at cycle " << std::dec << m_cpt_cycles 444 << " for address " << std::hex << addr 445 << " / HIT = " << entry.valid 446 << " / COUNT = " << std::dec << entry.count << std::endl; 447 } 448 m_debug_previous_count = entry.count; 449 m_debug_previous_hit = entry.valid; 450 } 451 452 ////////////////////////////////////////////////// 453 tmpl(void)::print_trace() 454 ////////////////////////////////////////////////// 455 { 456 std::cout << "MEMC " << name() << std::endl; 457 std::cout << " " << tgt_cmd_fsm_str[r_tgt_cmd_fsm] 458 << " | " << tgt_rsp_fsm_str[r_tgt_rsp_fsm] 459 << " | " << read_fsm_str[r_read_fsm] 460 << " | " << write_fsm_str[r_write_fsm] 461 << " | " << sc_fsm_str[r_sc_fsm] 462 << " | " << cleanup_fsm_str[r_cleanup_fsm] << std::endl; 463 std::cout << " " << init_cmd_fsm_str[r_init_cmd_fsm] 464 << " | " << init_rsp_fsm_str[r_init_rsp_fsm] 465 << " | " << ixr_cmd_fsm_str[r_ixr_cmd_fsm] 466 << " | " << ixr_rsp_fsm_str[r_ixr_rsp_fsm] 467 << " | " << xram_rsp_fsm_str[r_xram_rsp_fsm] << std::endl; 468 } 469 470 ///////////////////////////////////////// 471 tmpl(void)::print_stats() 472 ///////////////////////////////////////// 473 { 445 474 std::cout << "----------------------------------" << std::dec << std::endl; 446 475 std::cout << "MEM_CACHE " << m_srcid_ini << " / Time = " << m_cpt_cycles << std::endl … … 464 493 << "- LL RATE = " << (double)m_cpt_ll/m_cpt_cycles << std::endl 465 494 << "- SC RATE = " << (double)m_cpt_sc/m_cpt_cycles << std::endl; 466 495 } 467 496 468 497 ///////////////////////////////// … … 495 524 } 496 525 497 498 499 500 526 ////////////////////////////////// 527 tmpl(void)::transition() 528 ////////////////////////////////// 529 { 501 530 using soclib::common::uint32_log2; 531 502 532 // RESET 503 533 if ( ! p_resetn.read() ) { … … 508 538 r_init_cmd_fsm = INIT_CMD_INVAL_IDLE; 509 539 r_init_rsp_fsm = INIT_RSP_IDLE; 510 r_read_fsm = READ_IDLE;511 r_write_fsm = WRITE_IDLE;512 r_ llsc_fsm = LLSC_IDLE;540 r_read_fsm = READ_IDLE; 541 r_write_fsm = WRITE_IDLE; 542 r_sc_fsm = SC_IDLE; 513 543 r_cleanup_fsm = CLEANUP_IDLE; 514 544 r_alloc_dir_fsm = ALLOC_DIR_READ; … … 519 549 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 520 550 551 m_debug_global = false; 552 m_debug_tgt_cmd_fsm = false; 553 m_debug_tgt_rsp_fsm = false; 554 m_debug_init_cmd_fsm = false; 555 m_debug_init_rsp_fsm = false; 556 m_debug_read_fsm = false; 557 m_debug_write_fsm = false; 558 m_debug_sc_fsm = false; 559 m_debug_cleanup_fsm = false; 560 m_debug_ixr_cmd_fsm = false; 561 m_debug_ixr_rsp_fsm = false; 562 m_debug_xram_rsp_fsm = false; 563 m_debug_previous_hit = false; 564 m_debug_previous_count = 0; 565 521 566 // Initializing Tables 522 567 m_cache_directory.init(); 523 568 m_transaction_tab.init(); 524 m_heap _directory.init();569 m_heap.init(); 525 570 526 571 // initializing FIFOs and communication Buffers … … 539 584 m_cmd_write_data_fifo.init(); 540 585 541 m_cmd_ llsc_addr_fifo.init();542 m_cmd_ llsc_srcid_fifo.init();543 m_cmd_ llsc_trdid_fifo.init();544 m_cmd_ llsc_pktid_fifo.init();545 m_cmd_ llsc_wdata_fifo.init();546 m_cmd_ llsc_eop_fifo.init();547 548 r_read_to_tgt_rsp_req = false;549 r_read_to_ixr_cmd_req = false;586 m_cmd_sc_addr_fifo.init(); 587 m_cmd_sc_srcid_fifo.init(); 588 m_cmd_sc_trdid_fifo.init(); 589 m_cmd_sc_pktid_fifo.init(); 590 m_cmd_sc_wdata_fifo.init(); 591 m_cmd_sc_eop_fifo.init(); 592 593 r_read_to_tgt_rsp_req = false; 594 r_read_to_ixr_cmd_req = false; 550 595 551 596 r_write_to_tgt_rsp_req = false; … … 560 605 #endif 561 606 562 r_cleanup_to_tgt_rsp_req = false;563 564 r_init_rsp_to_tgt_rsp_req = false;565 566 r_ llsc_to_tgt_rsp_req = false;567 r_ llsc_cpt= 0;568 r_ llsc_lfsr= -1;569 r_ llsc_to_ixr_cmd_req = false;570 r_ llsc_to_init_cmd_multi_req = false;571 r_ llsc_to_init_cmd_brdcast_req= false;572 m_ llsc_to_init_cmd_inst_fifo.init();573 m_ llsc_to_init_cmd_srcid_fifo.init();607 r_cleanup_to_tgt_rsp_req = false; 608 609 r_init_rsp_to_tgt_rsp_req = false; 610 611 r_sc_to_tgt_rsp_req = false; 612 r_sc_cpt = 0; 613 r_sc_lfsr = -1; 614 r_sc_to_ixr_cmd_req = false; 615 r_sc_to_init_cmd_multi_req = false; 616 r_sc_to_init_cmd_brdcast_req = false; 617 m_sc_to_init_cmd_inst_fifo.init(); 618 m_sc_to_init_cmd_srcid_fifo.init(); 574 619 #if L1_MULTI_CACHE 575 m_ llsc_to_init_cmd_cache_id_fifo.init();620 m_sc_to_init_cmd_cache_id_fifo.init(); 576 621 #endif 577 622 … … 611 656 m_cpt_ll = 0; 612 657 m_cpt_sc = 0; 613 m_cpt_trt_full = 0;614 m_cpt_trt_rb = 0;658 m_cpt_trt_full = 0; 659 m_cpt_trt_rb = 0; 615 660 616 661 return; … … 623 668 bool cmd_write_fifo_get = false; 624 669 625 bool cmd_ llsc_fifo_put = false;626 bool cmd_ llsc_fifo_get = false;670 bool cmd_sc_fifo_put = false; 671 bool cmd_sc_fifo_get = false; 627 672 628 673 bool write_to_init_cmd_fifo_put = false; … … 630 675 bool write_to_init_cmd_fifo_inst = false; 631 676 size_t write_to_init_cmd_fifo_srcid = 0; 677 632 678 #if L1_MULTI_CACHE 633 679 size_t write_to_init_cmd_fifo_cache_id = 0; … … 638 684 bool xram_rsp_to_init_cmd_fifo_inst = false; 639 685 size_t xram_rsp_to_init_cmd_fifo_srcid = 0; 686 640 687 #if L1_MULTI_CACHE 641 688 size_t xram_rsp_to_init_cmd_fifo_cache_id = 0; 642 689 #endif 643 690 644 bool llsc_to_init_cmd_fifo_put = false; 645 bool llsc_to_init_cmd_fifo_get = false; 646 bool llsc_to_init_cmd_fifo_inst = false; 647 size_t llsc_to_init_cmd_fifo_srcid = 0; 691 bool sc_to_init_cmd_fifo_put = false; 692 bool sc_to_init_cmd_fifo_get = false; 693 bool sc_to_init_cmd_fifo_inst = false; 694 size_t sc_to_init_cmd_fifo_srcid = 0; 695 648 696 #if L1_MULTI_CACHE 649 size_t llsc_to_init_cmd_fifo_cache_id = 0; 650 #endif 651 652 #if DEBUG_VCI_MEM_CACHE 653 if(m_cpt_cycles > DEBUG_START_CYCLE){ 697 size_t sc_to_init_cmd_fifo_cache_id = 0; 698 #endif 699 700 m_debug_global = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 701 m_debug_tgt_cmd_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 702 m_debug_tgt_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 703 m_debug_init_cmd_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 704 m_debug_init_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 705 m_debug_read_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 706 m_debug_write_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 707 m_debug_sc_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 708 m_debug_cleanup_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 709 m_debug_ixr_cmd_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 710 m_debug_ixr_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 711 m_debug_xram_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 712 713 714 #if DEBUG_MEMC_GLOBAL 715 if( m_debug_global ) 716 { 654 717 std::cout << "---------------------------------------------" << std::dec << std::endl; 655 718 std::cout << "MEM_CACHE " << m_srcid_ini << " ; Time = " << m_cpt_cycles << std::endl … … 660 723 << " - READ FSM = " << read_fsm_str[r_read_fsm] << std::endl 661 724 << " - WRITE FSM = " << write_fsm_str[r_write_fsm] << std::endl 662 << " - LLSC FSM = " << llsc_fsm_str[r_llsc_fsm] << std::endl725 << " - SC FSM = " << sc_fsm_str[r_sc_fsm] << std::endl 663 726 << " - CLEANUP FSM = " << cleanup_fsm_str[r_cleanup_fsm] << std::endl 664 727 << " - IXR_CMD FSM = " << ixr_cmd_fsm_str[r_ixr_cmd_fsm] << std::endl … … 672 735 #endif 673 736 674 #ifdef IDEBUG675 if(m_cpt_cycles > DEBUG_START_CYCLE){676 std::cout << sc_time_stamp() << " " << name() << " INIT_RSP_UPT_LOCK update table : " << std::endl;677 m_update_tab.print();678 }679 #endif680 681 737 //////////////////////////////////////////////////////////////////////////////////// 682 738 // TGT_CMD FSM … … 684 740 // The TGT_CMD_FSM controls the incoming VCI command pakets from the processors 685 741 // 686 // There is 4 types of packets for the m_mem_segment:742 // There is 3 types of accepted commands : 687 743 // - READ : a READ request has a length of 1 VCI cell. It can be a single word 688 744 // or an entire cache line, depending on the PLEN value. 689 745 // - WRITE : a WRITE request has a maximum length of 16 cells, and can only 690 746 // concern words in a same line. 691 // - LL : The LL request has a length of 1 cell. 692 // - SC : The SC request has a length of 1 cell. 693 // The WDATA field contains the data to write. 694 // 747 // - SC : The SC request has a length of 2 cells or 4 cells. 695 748 //////////////////////////////////////////////////////////////////////////////////// 696 749 697 switch ( r_tgt_cmd_fsm.read() ) { 698 699 ////////////////// 700 case TGT_CMD_IDLE: 701 { 702 if ( p_vci_tgt.cmdval ) { 703 704 PRINTF(" * <MEM_CACHE.TGT> Request from %d.%d (%d) at address %llx\n",(uint32_t)p_vci_tgt.srcid.read(),(uint32_t)p_vci_tgt.pktid.read(),(uint32_t)p_vci_tgt.trdid.read(),(uint64_t)p_vci_tgt.address.read()); 705 706 if ( p_vci_tgt.cmd.read() == vci_param::CMD_READ ) 707 { 708 r_tgt_cmd_fsm = TGT_CMD_READ; 750 switch ( r_tgt_cmd_fsm.read() ) 751 { 752 ////////////////// 753 case TGT_CMD_IDLE: 754 { 755 if ( p_vci_tgt.cmdval ) 756 { 757 758 #if DEBUG_MEMC_TGT_CMD 759 if( m_debug_tgt_cmd_fsm ) 760 { 761 std::cout << " <MEMC.TGT_CMD_IDLE> Receive command from srcid " << p_vci_tgt.srcid.read() 762 << " / for address " << p_vci_tgt.address.read() << std::endl; 763 } 764 #endif 765 if ( p_vci_tgt.cmd.read() == vci_param::CMD_READ ) 766 { 767 r_tgt_cmd_fsm = TGT_CMD_READ; 768 } 769 else if ( p_vci_tgt.cmd.read() == vci_param::CMD_WRITE ) 770 { 771 r_tgt_cmd_fsm = TGT_CMD_WRITE; 772 } 773 else if ( p_vci_tgt.cmd.read() == vci_param::CMD_STORE_COND ) 774 { 775 r_tgt_cmd_fsm = TGT_CMD_ATOMIC; 776 } 777 else 778 { 779 std::cout << "VCI_MEM_CACHE ERROR " << name() 780 << " TGT_CMD_IDLE state" << std::endl; 781 std::cout << " illegal VCI command type" << std::endl; 782 exit(0); 783 } 784 } 785 break; 786 } 787 ////////////////// 788 case TGT_CMD_READ: 789 { 790 if ((m_x[(vci_addr_t)p_vci_tgt.address.read()]+(p_vci_tgt.plen.read()>>2)) > 16) 791 { 792 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" << std::endl; 793 std::cout << " illegal address/plen combination for VCI read command" << std::endl; 794 exit(0); 795 } 796 if ( !p_vci_tgt.eop.read() ) 797 { 798 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" << std::endl; 799 std::cout << " read command packets must contain one single flit" << std::endl; 800 exit(0); 801 } 802 803 if ( p_vci_tgt.cmdval && m_cmd_read_addr_fifo.wok() ) 804 { 805 806 #if DEBUG_MEMC_TGT_CMD 807 if( m_debug_tgt_cmd_fsm ) 808 { 809 std::cout << " <MEMC.TGT_CMD_READ> Push into read_fifo:" 810 << " address = " << std::hex << p_vci_tgt.address.read() 811 << " srcid = " << p_vci_tgt.srcid.read() 812 << " trdid = " << p_vci_tgt.trdid.read() 813 << " plen = " << p_vci_tgt.plen.read() << std::endl; 814 } 815 #endif 816 cmd_read_fifo_put = true; 817 m_cpt_read++; 818 r_tgt_cmd_fsm = TGT_CMD_IDLE; 709 819 } 710 // else if (( p_vci_tgt.cmd.read() == vci_param::CMD_WRITE ) && ( p_vci_tgt.trdid.read() == 0x0 )) 711 else if ( p_vci_tgt.cmd.read() == vci_param::CMD_WRITE ) 712 { 713 r_tgt_cmd_fsm = TGT_CMD_WRITE; 714 } 715 else if ( p_vci_tgt.cmd.read() == vci_param::CMD_STORE_COND ) 716 { 717 r_tgt_cmd_fsm = TGT_CMD_ATOMIC; 718 } else { 719 std::cout << "MemCache error : wrong command " << std::endl; 820 break; 821 } 822 /////////////////// 823 case TGT_CMD_WRITE: 824 { 825 if ( p_vci_tgt.cmdval && m_cmd_write_addr_fifo.wok() ) 826 { 827 828 #if DEBUG_MEMC_TGT_CMD 829 if( m_debug_tgt_cmd_fsm ) 830 { 831 std::cout << " <MEMC.TGT_CMD_WRITE> Push into write_fifo:" 832 << " address = " << std::hex << p_vci_tgt.address.read() 833 << " srcid = " << p_vci_tgt.srcid.read() 834 << " trdid = " << p_vci_tgt.trdid.read() 835 << " wdata = " << p_vci_tgt.wdata.read() 836 << " be = " << p_vci_tgt.be.read() 837 << " plen = " << p_vci_tgt.plen.read() << std::endl; 838 } 839 #endif 840 cmd_write_fifo_put = true; 841 if( p_vci_tgt.eop ) r_tgt_cmd_fsm = TGT_CMD_IDLE; 842 } 843 break; 844 } 845 //////////////////// 846 case TGT_CMD_ATOMIC: 847 { 848 if ( (p_vci_tgt.plen.read() != 8) && (p_vci_tgt.plen.read() != 16) ) 849 { 850 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_ATOMIC state" << std::endl; 851 std::cout << "illegal format for sc command " << std::endl; 720 852 exit(0); 721 853 } 722 } 723 break; 724 } 725 ////////////////// 726 case TGT_CMD_READ: 727 728 { 729 ASSERT(((m_x[(vci_addr_t)p_vci_tgt.address.read()]+(p_vci_tgt.plen.read()>>2))<=16), 730 "VCI_MEM_CACHE All read request to the MemCache must stay within a cache line"); 731 732 if ( p_vci_tgt.cmdval && m_cmd_read_addr_fifo.wok() ) { 733 cmd_read_fifo_put = true; 734 if ( p_vci_tgt.eop ) { 735 m_cpt_read++; 736 r_tgt_cmd_fsm = TGT_CMD_IDLE; 737 } else r_tgt_cmd_fsm = TGT_CMD_READ_EOP; 738 } 739 break; 740 } 741 ////////////////////// 742 case TGT_CMD_READ_EOP: 743 { 744 if ( p_vci_tgt.cmdval && p_vci_tgt.eop ){ 745 m_cpt_read++; 746 r_tgt_cmd_fsm = TGT_CMD_IDLE; 747 } 748 break; 749 } 750 /////////////////// 751 case TGT_CMD_WRITE: 752 { 753 754 if ( p_vci_tgt.cmdval && m_cmd_write_addr_fifo.wok() ) { 755 cmd_write_fifo_put = true; 756 if( p_vci_tgt.eop ) r_tgt_cmd_fsm = TGT_CMD_IDLE; 757 758 } 759 break; 760 } 761 //////////////////// 762 case TGT_CMD_ATOMIC: 763 { 764 if ( p_vci_tgt.cmdval && m_cmd_llsc_addr_fifo.wok() ) { 765 cmd_llsc_fifo_put = true; 766 if( p_vci_tgt.eop ) r_tgt_cmd_fsm = TGT_CMD_IDLE; 767 } 768 break; 854 855 if ( p_vci_tgt.cmdval && m_cmd_sc_addr_fifo.wok() ) 856 { 857 858 #if DEBUG_MEMC_TGT_CMD 859 if( m_debug_tgt_cmd_fsm ) 860 { 861 std::cout << " <MEMC.TGT_CMD_ATOMIC> Pushing command into cmd_sc_fifo:" 862 << " address = " << std::hex << p_vci_tgt.address.read() 863 << " srcid = " << p_vci_tgt.srcid.read() 864 << " trdid = " << p_vci_tgt.trdid.read() 865 << " wdata = " << p_vci_tgt.wdata.read() 866 << " be = " << p_vci_tgt.be.read() 867 << " plen = " << p_vci_tgt.plen.read() << std::endl; 868 } 869 #endif 870 cmd_sc_fifo_put = true; 871 if( p_vci_tgt.eop ) r_tgt_cmd_fsm = TGT_CMD_IDLE; 872 } 873 break; 769 874 } 770 875 } // end switch tgt_cmd_fsm … … 773 878 // INIT_RSP FSM 774 879 ///////////////////////////////////////////////////////////////////////// 775 // This FSM controls the response to the update or inval idate requests776 // sent by the memory cache to the L1 caches :880 // This FSM controls the response to the update or inval coherence 881 // requests sent by the memory cache to the L1 caches : 777 882 // 778 // - update request initiated by the WRITE FSM. 779 // The FSM decrements the proper entry in the Update/Inval Table. 780 // It sends a request to the TGT_RSP FSM to complete the pending 781 // write transaction (acknowledge response to the writer processor), 782 // and clear the UPT entry when all responses have been received. 783 // - invalidate request initiated by the XRAM_RSP FSM. 784 // The FSM decrements the proper entry in the Update/Inval_Table, 785 // and clear the entry when all responses have been received. 883 // It can be update or inval requests initiated by the WRITE FSM, 884 // or inval requests initiated by the XRAM_RSP FSM. 885 // The FSM decrements the proper entry in UPT. 886 // It sends a request to the TGT_RSP FSM to complete the pending 887 // write transaction (acknowledge response to the writer processor), 888 // and clear the UPT entry when all responses have been received. 786 889 // 787 890 // All those response packets are one word, compact 788 891 // packets complying with the VCI advanced format. 789 892 // The index in the Table is defined in the RTRDID field, and 790 // the Transaction type is defined in the Update/Inval Table.893 // the transaction type is defined in the UPT entry. 791 894 ///////////////////////////////////////////////////////////////////// 792 895 793 switch ( r_init_rsp_fsm.read() ) { 794 795 /////////////////// 796 case INIT_RSP_IDLE: 797 { 798 799 if ( p_vci_ini.rspval ) { 800 PRINTF(" * <MEM_CACHE.INIT_RSP> rsp val - trdid %d\n",(uint32_t)p_vci_ini.rtrdid.read()); 801 802 ASSERT (( p_vci_ini.rtrdid.read() < m_update_tab.size()) 803 ,"VCI_MEM_CACHE UPT index too large in VCI response paquet received by memory cache" ); 804 ASSERT (p_vci_ini.reop 805 ,"VCI_MEM_CACHE All response packets to update/invalidate requests must be one cell") ; 806 r_init_rsp_upt_index = p_vci_ini.rtrdid.read(); 807 r_init_rsp_fsm = INIT_RSP_UPT_LOCK; 808 } else if( r_write_to_init_rsp_req.read() ){ 809 r_init_rsp_upt_index = r_write_to_init_rsp_upt_index.read(); 810 r_write_to_init_rsp_req = false; 811 r_init_rsp_fsm = INIT_RSP_UPT_LOCK; 812 } 813 break; 896 switch ( r_init_rsp_fsm.read() ) 897 { 898 /////////////////// 899 case INIT_RSP_IDLE: // wait a response for a coherence transaction 900 { 901 if ( p_vci_ini.rspval ) 902 { 903 904 #if DEBUG_MEMC_INIT_RSP 905 if( m_debug_init_rsp_fsm ) 906 { 907 std::cout << " <MEMC.INIT_RSP_IDLE> Response for UPT entry " 908 << p_vci_ini.rtrdid.read() << std::endl; 909 } 910 #endif 911 if ( p_vci_ini.rtrdid.read() >= m_update_tab.size() ) 912 { 913 std::cout << "VCI_MEM_CACHE ERROR " << name() 914 << " INIT_RSP_IDLE state" << std::endl; 915 std::cout << "index too large for UPT: " 916 << " / rtrdid = " << p_vci_ini.rtrdid.read() 917 << " / UPT size = " << m_update_tab.size() << std::endl; 918 exit(0); 919 } 920 if ( !p_vci_ini.reop.read() ) 921 { 922 std::cout << "VCI_MEM_CACHE ERROR " << name() 923 << " INIT_RSP_IDLE state" << std::endl; 924 std::cout << "all coherence response packets must be one flit" << std::endl; 925 exit(0); 926 } 927 928 r_init_rsp_upt_index = p_vci_ini.rtrdid.read(); 929 r_init_rsp_fsm = INIT_RSP_UPT_LOCK; 930 } 931 else if( r_write_to_init_rsp_req.read() ) 932 { 933 r_init_rsp_upt_index = r_write_to_init_rsp_upt_index.read(); 934 r_write_to_init_rsp_req = false; 935 r_init_rsp_fsm = INIT_RSP_UPT_LOCK; 936 } 937 break; 814 938 } 815 939 /////////////////////// 816 case INIT_RSP_UPT_LOCK: // decrement the number of expected responses 817 { 818 819 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_INIT_RSP ) { 820 size_t count = 0; 821 bool valid = m_update_tab.decrement(r_init_rsp_upt_index.read(), count); 822 #ifdef IDEBUG 823 if(m_cpt_cycles > DEBUG_START_CYCLE){ 824 std::cout << sc_time_stamp() << " " << name() << " INIT_RSP_UPT_LOCK update table : " << std::endl; 825 m_update_tab.print(); 826 } 827 #endif 828 while(!valid); 829 ASSERT ( valid 830 ,"VCI_MEM_CACHE Invalid UPT entry in VCI response paquet received by memory cache" ); 831 832 if ( count == 0 ) r_init_rsp_fsm = INIT_RSP_UPT_CLEAR; 833 else r_init_rsp_fsm = INIT_RSP_IDLE; 834 } 835 break; 940 case INIT_RSP_UPT_LOCK: // decrement the number of expected responses 941 { 942 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_INIT_RSP ) 943 { 944 size_t count = 0; 945 bool valid = m_update_tab.decrement(r_init_rsp_upt_index.read(), count); 946 947 #if DEBUG_MEMC_INIT_RSP 948 if( m_debug_init_rsp_fsm ) 949 { 950 std::cout << " <MEMC.INIT_RSP_UPT_LOCK> Decrement the responses counter for UPT:" 951 << " entry = " << r_init_rsp_upt_index.read() 952 << " / rsp_count = " << std::dec << count << std::endl; 953 } 954 #endif 955 if ( not valid ) 956 { 957 std::cout << "VCI_MEM_CACHE ERROR " << name() << " INIT_RSP_UPT_LOCK state" << std::endl; 958 std::cout << "unsuccessful access to decrement the UPT" << std::endl; 959 exit(0); 960 } 961 962 if ( count == 0 ) r_init_rsp_fsm = INIT_RSP_UPT_CLEAR; 963 else r_init_rsp_fsm = INIT_RSP_IDLE; 964 } 965 break; 836 966 } 837 967 //////////////////////// 838 case INIT_RSP_UPT_CLEAR: // clear the UPT entry 839 { 840 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_INIT_RSP ) { 841 r_init_rsp_srcid = m_update_tab.srcid(r_init_rsp_upt_index.read()); 842 r_init_rsp_trdid = m_update_tab.trdid(r_init_rsp_upt_index.read()); 843 r_init_rsp_pktid = m_update_tab.pktid(r_init_rsp_upt_index.read()); 844 r_init_rsp_nline = m_update_tab.nline(r_init_rsp_upt_index.read()); 845 bool need_rsp = m_update_tab.need_rsp(r_init_rsp_upt_index.read()); 846 if ( need_rsp ) r_init_rsp_fsm = INIT_RSP_END; 847 else r_init_rsp_fsm = INIT_RSP_IDLE; 848 m_update_tab.clear(r_init_rsp_upt_index.read()); 849 #ifdef IDEBUG 850 if(m_cpt_cycles > DEBUG_START_CYCLE){ 851 std::cout << sc_time_stamp() << " " << name() << " INIT_RSP_UPT_CLEAR update table : " << std::endl; 852 m_update_tab.print(); 853 } 854 #endif 855 } 856 break; 968 case INIT_RSP_UPT_CLEAR: // clear the UPT entry 969 { 970 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_INIT_RSP ) 971 { 972 r_init_rsp_srcid = m_update_tab.srcid(r_init_rsp_upt_index.read()); 973 r_init_rsp_trdid = m_update_tab.trdid(r_init_rsp_upt_index.read()); 974 r_init_rsp_pktid = m_update_tab.pktid(r_init_rsp_upt_index.read()); 975 r_init_rsp_nline = m_update_tab.nline(r_init_rsp_upt_index.read()); 976 bool need_rsp = m_update_tab.need_rsp(r_init_rsp_upt_index.read()); 977 978 if ( need_rsp ) r_init_rsp_fsm = INIT_RSP_END; 979 else r_init_rsp_fsm = INIT_RSP_IDLE; 980 981 m_update_tab.clear(r_init_rsp_upt_index.read()); 982 983 #if DEBUG_MEMC_INIT_RSP 984 if ( m_debug_init_rsp_fsm ) 985 { 986 std::cout << " <MEMC.INIT_RSP_UPT_CLEAR> Clear UPT entry " 987 << r_init_rsp_upt_index.read() << std::endl; 988 } 989 #endif 990 } 991 break; 857 992 } 858 993 ////////////////// 859 case INIT_RSP_END: 860 { 861 862 if ( !r_init_rsp_to_tgt_rsp_req ) { 863 r_init_rsp_to_tgt_rsp_req = true; 864 r_init_rsp_to_tgt_rsp_srcid = r_init_rsp_srcid.read(); 865 r_init_rsp_to_tgt_rsp_trdid = r_init_rsp_trdid.read(); 866 r_init_rsp_to_tgt_rsp_pktid = r_init_rsp_pktid.read(); 867 r_init_rsp_fsm = INIT_RSP_IDLE; 868 } 869 break; 994 case INIT_RSP_END: // Post a request to TGT_RSP FSM 995 { 996 if ( !r_init_rsp_to_tgt_rsp_req ) 997 { 998 r_init_rsp_to_tgt_rsp_req = true; 999 r_init_rsp_to_tgt_rsp_srcid = r_init_rsp_srcid.read(); 1000 r_init_rsp_to_tgt_rsp_trdid = r_init_rsp_trdid.read(); 1001 r_init_rsp_to_tgt_rsp_pktid = r_init_rsp_pktid.read(); 1002 r_init_rsp_fsm = INIT_RSP_IDLE; 1003 1004 #if DEBUG_MEMC_INIT_RSP 1005 if ( m_debug_init_rsp_fsm ) 1006 { 1007 std::cout << " <MEMC.INIT_RSP_END> Request TGT_RSP FSM to send a response to srcid " 1008 << r_init_rsp_srcid.read() << std::endl; 1009 } 1010 #endif 1011 } 1012 break; 870 1013 } 871 1014 } // end switch r_init_rsp_fsm … … 874 1017 // READ FSM 875 1018 //////////////////////////////////////////////////////////////////////////////////// 876 // The READ FSM controls the read requests sent by processors.1019 // The READ FSM controls the VCI read requests. 877 1020 // It takes the lock protecting the cache directory to check the cache line status: 878 // - In case of HIT, the fsm copies the data (one line, or one single word) 1021 // - In case of HIT 1022 // The fsm copies the data (one line, or one single word) 879 1023 // in the r_read_to_tgt_rsp buffer. It waits if this buffer is not empty. 880 1024 // The requesting initiator is registered in the cache directory. 881 // - In case of MISS, the READ fsm takes the lock protecting the transaction tab. 1025 // If the number of copy is larger than 1, the new copy is registered 1026 // in the HEAP. 1027 // If the number of copy is larger than the threshold, the HEAP is cleared, 1028 // and the corresponding line switches to the counter mode. 1029 // - In case of MISS 1030 // The READ fsm takes the lock protecting the transaction tab. 882 1031 // If a read transaction to the XRAM for this line already exists, 883 1032 // or if the transaction tab is full, the fsm is stalled. 884 // If a transaction entry is free, the READ fsm sends a request to the XRAM. 1033 // If a TRT entry is free, the READ request is registered in TRT, 1034 // it is consumed in the request FIFO, and transmited to the IXR_CMD FSM. 1035 // The READ FSM returns in the IDLE state as the read transaction will be 1036 // completed when the missing line will be received. 885 1037 //////////////////////////////////////////////////////////////////////////////////// 886 1038 887 PRINTF(" * <MEM_CACHE.TOP> Request from %d.%d at address %llx\n",(uint32_t)m_cmd_read_srcid_fifo.read(),(uint32_t)m_cmd_read_pktid_fifo.read(),(uint64_t)m_cmd_read_addr_fifo.read()); 888 889 switch ( r_read_fsm.read() ) { 890 891 /////////////// 892 case READ_IDLE: 893 { 894 if (m_cmd_read_addr_fifo.rok()) { 895 PRINTF(" * <MEM_CACHE.READ> Request from %d.%d at address %llx\n",(uint32_t)m_cmd_read_srcid_fifo.read(),(uint32_t)m_cmd_read_pktid_fifo.read(),(uint64_t)m_cmd_read_addr_fifo.read()); 896 897 r_read_fsm = READ_DIR_LOCK; 898 } 899 break; 1039 switch ( r_read_fsm.read() ) 1040 { 1041 /////////////// 1042 case READ_IDLE: // waiting a read request 1043 { 1044 if (m_cmd_read_addr_fifo.rok()) 1045 { 1046 1047 #if DEBUG_MEMC_READ 1048 if( m_debug_read_fsm ) 1049 { 1050 std::cout << " <MEMC.READ_IDLE> Read request:" 1051 << " srcid = " << std::hex << m_cmd_read_srcid_fifo.read() 1052 << " / address = " << m_cmd_read_addr_fifo.read() 1053 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 1054 } 1055 #endif 1056 r_read_fsm = READ_DIR_LOCK; 1057 } 1058 break; 900 1059 } 901 1060 /////////////////// 902 case READ_DIR_LOCK: // check directory for hit / miss 903 { 904 if( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) { 905 size_t way = 0; 906 DirectoryEntry entry = m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 907 #ifdef DDEBUG 908 if(m_cpt_cycles > DEBUG_START_CYCLE){ 909 std::cout << "In READ_DIR_LOCK printing the entry of address is : " << std::hex << m_cmd_read_addr_fifo.read() << std::endl; 910 entry.print(); 911 std::cout << "done" << std::endl; 912 } 913 #endif 914 r_read_is_cnt = entry.is_cnt; 915 r_read_dirty = entry.dirty; 916 r_read_lock = entry.lock; 917 r_read_tag = entry.tag; 918 r_read_way = way; 919 r_read_count = entry.count; 920 r_read_copy = entry.owner.srcid; 1061 case READ_DIR_LOCK: // check directory for hit / miss 1062 { 1063 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) 1064 { 1065 size_t way = 0; 1066 DirectoryEntry entry = m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 1067 1068 r_read_is_cnt = entry.is_cnt; 1069 r_read_dirty = entry.dirty; 1070 r_read_lock = entry.lock; 1071 r_read_tag = entry.tag; 1072 r_read_way = way; 1073 r_read_count = entry.count; 1074 r_read_copy = entry.owner.srcid; 1075 921 1076 #if L1_MULTI_CACHE 922 r_read_copy_cache = entry.owner.cache_id; 923 #endif 924 r_read_copy_inst = entry.owner.inst; 925 r_read_ptr = entry.ptr; 926 927 bool cached_read = (m_cmd_read_trdid_fifo.read() & 0x1); 928 // In case of hit, the read acces must be registered in the copies bit-vector 929 if( entry.valid ) { 930 if(entry.is_cnt || (entry.count == 0) || !cached_read) { // No new entry in the heap 931 r_read_fsm = READ_DIR_HIT; 932 } else { 933 r_read_fsm = READ_HEAP_LOCK; 934 } 935 } else { 936 r_read_fsm = READ_TRT_LOCK; 937 } 938 } 939 break; 1077 r_read_copy_cache = entry.owner.cache_id; 1078 #endif 1079 r_read_copy_inst = entry.owner.inst; 1080 r_read_ptr = entry.ptr; // pointer to the heap 1081 1082 bool cached_read = (m_cmd_read_trdid_fifo.read() & 0x1); 1083 if( entry.valid ) // hit 1084 { 1085 // test if we need to register a new copy in the heap 1086 if ( entry.is_cnt || (entry.count == 0) || !cached_read ) 1087 r_read_fsm = READ_DIR_HIT; 1088 else 1089 r_read_fsm = READ_HEAP_LOCK; 1090 } 1091 else // miss 1092 { 1093 r_read_fsm = READ_TRT_LOCK; 1094 } 1095 1096 #if DEBUG_MEMC_READ 1097 if( m_debug_read_fsm ) 1098 { 1099 std::cout << " <MEMC.READ_DIR_LOCK> Accessing directory: " 1100 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 1101 << " / hit = " << entry.valid 1102 << " / count = " <<std::dec << entry.count 1103 << " / is_cnt = " << entry.is_cnt << std::endl; 1104 } 1105 #endif 1106 } 1107 break; 940 1108 } 941 1109 ////////////////// 942 case READ_DIR_HIT: // read hit : update the memory cache 943 { 944 if( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) { 945 // signals generation 946 bool inst_read = (m_cmd_read_trdid_fifo.read() & 0x2); 947 bool cached_read = (m_cmd_read_trdid_fifo.read() & 0x1); 948 bool is_cnt = r_read_is_cnt.read(); 949 950 // read data in the cache 951 size_t set = m_y[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; 952 size_t way = r_read_way.read(); 953 for ( size_t i=0 ; i<m_words ; i++ ) { 954 r_read_data[i] = m_cache_data[way][set][i]; 955 } 956 957 // update the cache directory (for the copies) 958 DirectoryEntry entry; 959 entry.valid = true; 960 entry.is_cnt = is_cnt; 961 entry.dirty = r_read_dirty.read(); 962 entry.tag = r_read_tag.read(); 963 entry.lock = r_read_lock.read(); 964 entry.ptr = r_read_ptr.read(); 965 if(cached_read){ // Cached read, we update the copy 966 if(!is_cnt){ // Not counter mode 967 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 1110 case READ_DIR_HIT: // read data in cache & update the directory 1111 // we enter this state in 3 cases: 1112 // - the read request is uncachable 1113 // - the cache line is in counter mode 1114 // - the cache line is valid but not replcated 1115 { 1116 if( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) 1117 { 1118 // signals generation 1119 bool inst_read = (m_cmd_read_trdid_fifo.read() & 0x2); 1120 bool cached_read = (m_cmd_read_trdid_fifo.read() & 0x1); 1121 bool is_cnt = r_read_is_cnt.read(); 1122 1123 // read data in the cache 1124 size_t set = m_y[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; 1125 size_t way = r_read_way.read(); 1126 for ( size_t i=0 ; i<m_words ; i++ ) r_read_data[i] = m_cache_data[way][set][i]; 1127 1128 // update the cache directory 1129 DirectoryEntry entry; 1130 entry.valid = true; 1131 entry.is_cnt = is_cnt; 1132 entry.dirty = r_read_dirty.read(); 1133 entry.tag = r_read_tag.read(); 1134 entry.lock = r_read_lock.read(); 1135 entry.ptr = r_read_ptr.read(); 1136 if (cached_read) // Cached read => we must update the copies 1137 { 1138 if (!is_cnt) // Not counter mode 1139 { 1140 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 968 1141 #if L1_MULTI_CACHE 969 entry.owner.cache_id= m_cmd_read_pktid_fifo.read(); 970 #endif 971 972 entry.owner.inst = inst_read; 973 entry.count = r_read_count.read() + 1; 974 } else { // Counter mode 975 entry.owner.srcid = 0; 1142 entry.owner.cache_id = m_cmd_read_pktid_fifo.read(); 1143 #endif 1144 entry.owner.inst = inst_read; 1145 entry.count = r_read_count.read() + 1; 1146 } 1147 else // Counter mode 1148 { 1149 entry.owner.srcid = 0; 976 1150 #if L1_MULTI_CACHE 977 entry.owner.cache_id= 0; 978 #endif 979 entry.owner.inst = false; 980 entry.count = r_read_count.read() + 1; 981 } 982 } else { // Uncached read 983 entry.owner.srcid = r_read_copy.read(); 1151 entry.owner.cache_id = 0; 1152 #endif 1153 entry.owner.inst = false; 1154 entry.count = r_read_count.read() + 1; 1155 } 1156 } 1157 else // Uncached read 1158 { 1159 entry.owner.srcid = r_read_copy.read(); 984 1160 #if L1_MULTI_CACHE 985 entry.owner.cache_id = r_read_copy_cache.read(); 986 #endif 987 988 entry.owner.inst = r_read_copy_inst.read(); 989 entry.count = r_read_count.read(); 990 } 991 #ifdef DDEBUG 992 if(m_cpt_cycles > DEBUG_START_CYCLE){ 993 std::cout << "In READ_DIR_HIT printing the entry of address is : " << std::endl; 994 entry.print(); 995 std::cout << "done" << std::endl; 996 } 997 #endif 998 999 m_cache_directory.write(set, way, entry); 1000 r_read_fsm = READ_RSP; 1001 } 1002 break; 1161 entry.owner.cache_id = r_read_copy_cache.read(); 1162 #endif 1163 entry.owner.inst = r_read_copy_inst.read(); 1164 entry.count = r_read_count.read(); 1165 } 1166 1167 #if DEBUG_MEMC_READ 1168 if( m_debug_read_fsm ) 1169 { 1170 std::cout << " <MEMC.READ_DIR_HIT> Update directory entry:" 1171 << " set = " << std::dec << set 1172 << " / way = " << way 1173 << " / owner_id = " << entry.owner.srcid 1174 << " / owner_ins = " << entry.owner.inst 1175 << " / count = " << entry.count 1176 << " / is_cnt = " << entry.is_cnt << std::endl; 1177 } 1178 #endif 1179 1180 m_cache_directory.write(set, way, entry); 1181 r_read_fsm = READ_RSP; 1182 } 1183 break; 1184 } 1185 //////////////////// 1186 case READ_HEAP_LOCK: // read data in cache, update the directory 1187 // and prepare the HEAP update 1188 { 1189 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) 1190 { 1191 // enter counter mode when we reach the limit of copies or the heap is full 1192 bool go_cnt = (r_read_count.read() >= r_copies_limit.read()) || m_heap.is_full(); 1193 1194 // read data in the cache 1195 size_t set = m_y[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; 1196 size_t way = r_read_way.read(); 1197 for ( size_t i=0 ; i<m_words ; i++ ) r_read_data[i] = m_cache_data[way][set][i]; 1198 1199 // update the cache directory 1200 DirectoryEntry entry; 1201 entry.valid = true; 1202 entry.is_cnt = go_cnt; 1203 entry.dirty = r_read_dirty.read(); 1204 entry.tag = r_read_tag.read(); 1205 entry.lock = r_read_lock.read(); 1206 entry.count = r_read_count.read() + 1; 1207 1208 if (not go_cnt) // Not entering counter mode 1209 { 1210 entry.owner.srcid = r_read_copy.read(); 1211 #if L1_MULTI_CACHE 1212 entry.owner.cache_id= r_read_copy_cache.read(); 1213 #endif 1214 entry.owner.inst = r_read_copy_inst.read(); 1215 entry.ptr = m_heap.next_free_ptr(); // set pointer on the heap 1216 } 1217 else // Entering Counter mode 1218 { 1219 entry.owner.srcid = 0; 1220 #if L1_MULTI_CACHE 1221 entry.owner.cache_id= 0; 1222 #endif 1223 entry.owner.inst = false; 1224 entry.ptr = 0; 1225 } 1226 1227 m_cache_directory.write(set, way, entry); 1228 1229 // prepare the heap update (add an entry, or clear the linked list) 1230 if (not go_cnt) // not switching to counter mode 1231 { 1232 // We test if the next free entry in the heap is the last 1233 HeapEntry heap_entry = m_heap.next_free_entry(); 1234 r_read_next_ptr = heap_entry.next; 1235 r_read_last_free = ( heap_entry.next == m_heap.next_free_ptr() ); 1236 1237 r_read_fsm = READ_HEAP_WRITE; // add an entry in the HEAP 1238 } 1239 else // switching to counter mode 1240 { 1241 if ( r_read_count.read()>1 ) // heap must be cleared 1242 { 1243 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 1244 r_read_next_ptr = m_heap.next_free_ptr(); 1245 m_heap.write_free_ptr(r_read_ptr.read()); 1246 1247 if( next_entry.next == r_read_ptr.read() ) // last entry 1248 { 1249 r_read_fsm = READ_HEAP_LAST; // erase the entry 1250 } 1251 else // not the last entry 1252 { 1253 r_read_ptr = next_entry.next; 1254 r_read_fsm = READ_HEAP_ERASE; // erase the list 1255 } 1256 } 1257 else // the heap is not used / nothing to do 1258 { 1259 r_read_fsm = READ_RSP; 1260 } 1261 } 1262 1263 #if DEBUG_MEMC_READ 1264 if( m_debug_read_fsm ) 1265 { 1266 std::cout << " <MEMC.READ_HEAP_LOCK> Update directory:" 1267 << " tag = " << std::hex << entry.tag 1268 << " set = " << std::dec << set 1269 << " way = " << way 1270 << " count = " << entry.count 1271 << " is_cnt = " << entry.is_cnt << std::endl; 1272 } 1273 #endif 1274 } 1275 break; 1276 } 1277 ///////////////////// 1278 case READ_HEAP_WRITE: // add a entry in the heap 1279 { 1280 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) 1281 { 1282 HeapEntry heap_entry; 1283 heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 1284 #if L1_MULTI_CACHE 1285 heap_entry.owner.cache_id = m_cmd_read_pktid_fifo.read(); 1286 #endif 1287 heap_entry.owner.inst = (m_cmd_read_trdid_fifo.read() & 0x2); 1288 1289 if(r_read_count.read() == 1) // creation of a new linked list 1290 { 1291 heap_entry.next = m_heap.next_free_ptr(); 1292 } 1293 else // head insertion in existing list 1294 { 1295 heap_entry.next = r_read_ptr.read(); 1296 } 1297 m_heap.write_free_entry(heap_entry); 1298 m_heap.write_free_ptr(r_read_next_ptr.read()); 1299 if(r_read_last_free.read()) m_heap.set_full(); 1300 1301 r_read_fsm = READ_RSP; 1302 1303 #if DEBUG_MEMC_READ 1304 if( m_debug_read_fsm ) 1305 { 1306 std::cout << " <MEMC.READ_HEAP_WRITE> Add an entry in the heap:" 1307 << " owner_id = " << heap_entry.owner.srcid 1308 << " owner_ins = " << heap_entry.owner.inst << std::endl; 1309 } 1310 #endif 1311 } 1312 else 1313 { 1314 std::cout << "VCI_MEM_CACHE ERROR " << name() 1315 << " READ_HEAP_WRITE state" << std::endl; 1316 std::cout << "Bad HEAP allocation" << std::endl; 1317 exit(0); 1318 } 1319 break; 1320 } 1321 ///////////////////// 1322 case READ_HEAP_ERASE: 1323 { 1324 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) 1325 { 1326 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 1327 if( next_entry.next == r_read_ptr.read() ) 1328 { 1329 r_read_fsm = READ_HEAP_LAST; 1330 } 1331 else 1332 { 1333 r_read_ptr = next_entry.next; 1334 r_read_fsm = READ_HEAP_ERASE; 1335 } 1336 } 1337 else 1338 { 1339 std::cout << "VCI_MEM_CACHE ERROR " << name() 1340 << " READ_HEAP_ERASE state" << std::endl; 1341 std::cout << "Bad HEAP allocation" << std::endl; 1342 exit(0); 1343 } 1344 break; 1345 } 1346 //////////////////// 1347 case READ_HEAP_LAST: 1348 { 1349 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) 1350 { 1351 HeapEntry last_entry; 1352 last_entry.owner.srcid = 0; 1353 #if L1_MULTI_CACHE 1354 last_entry.owner.cache_id = 0; 1355 #endif 1356 last_entry.owner.inst = false; 1357 1358 if(m_heap.is_full()) 1359 { 1360 last_entry.next = r_read_ptr.read(); 1361 m_heap.unset_full(); 1362 } 1363 else 1364 { 1365 last_entry.next = r_read_next_ptr.read(); 1366 } 1367 m_heap.write(r_read_ptr.read(),last_entry); 1368 r_read_fsm = READ_RSP; 1369 } 1370 else 1371 { 1372 std::cout << "VCI_MEM_CACHE ERROR " << name() 1373 << " READ_HEAP_LAST state" << std::endl; 1374 std::cout << "Bad HEAP allocation" << std::endl; 1375 exit(0); 1376 } 1377 break; 1003 1378 } 1004 1379 ////////////// 1005 case READ_HEAP_LOCK: 1006 { 1007 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) { 1008 bool is_cnt = (r_read_count.read() >= r_copies_limit.read()) || m_heap_directory.is_full(); 1009 // read data in the cache 1010 size_t set = m_y[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; 1011 size_t way = r_read_way.read(); 1012 for ( size_t i=0 ; i<m_words ; i++ ) { 1013 r_read_data[i] = m_cache_data[way][set][i]; 1014 } 1015 1016 // update the cache directory (for the copies) 1017 DirectoryEntry entry; 1018 entry.valid = true; 1019 entry.is_cnt = is_cnt; // when we reach the limit of copies or the heap is full 1020 entry.dirty = r_read_dirty.read(); 1021 entry.tag = r_read_tag.read(); 1022 entry.lock = r_read_lock.read(); 1023 if(!is_cnt){ // Not counter mode 1024 entry.owner.srcid = r_read_copy.read(); 1025 #if L1_MULTI_CACHE 1026 entry.owner.cache_id= r_read_copy_cache.read(); 1027 #endif 1028 entry.owner.inst = r_read_copy_inst.read(); 1029 entry.count = r_read_count.read() + 1; 1030 entry.ptr = m_heap_directory.next_free_ptr(); 1031 } else { // Counter mode 1032 entry.owner.srcid = 0; 1033 #if L1_MULTI_CACHE 1034 entry.owner.cache_id= 0; 1035 #endif 1036 entry.owner.inst = false; 1037 entry.count = r_read_count.read() + 1; 1038 entry.ptr = 0; 1039 } 1040 #ifdef DDEBUG 1041 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1042 std::cout << "In READ_HEAP_LOCK printing the entry of address is : " << std::endl; 1043 entry.print(); 1044 std::cout << "done" << std::endl; 1045 } 1046 #endif 1047 1048 m_cache_directory.write(set, way, entry); 1049 1050 if(!is_cnt){ 1051 HeapEntry free_heap_entry = m_heap_directory.next_free_entry(); 1052 r_read_next_ptr = free_heap_entry.next; 1053 if( free_heap_entry.next == m_heap_directory.next_free_ptr() ) { // Last free heap entry 1054 r_read_last_free = true; 1055 } else { 1056 r_read_last_free = false; 1057 } 1058 r_read_fsm = READ_HEAP_WRITE; // we add an entry in the list of copies 1059 } else { 1060 if(r_read_count.read()>1) { // else there is no list of copies... 1061 HeapEntry next_entry = m_heap_directory.read(r_read_ptr.read()); 1062 r_read_next_ptr = m_heap_directory.next_free_ptr(); 1063 m_heap_directory.write_free_ptr(r_read_ptr.read()); 1064 if( next_entry.next == r_read_ptr.read() ) { // The last list member 1065 r_read_fsm = READ_HEAP_LAST; // we erase the list of copies (counter mode) 1066 } else { // Not the end of the list 1067 r_read_ptr = next_entry.next; 1068 r_read_fsm = READ_HEAP_ERASE; // we erase the list of copies (counter mode) 1069 } 1070 } else { 1071 r_read_fsm = READ_RSP; 1072 } 1073 } 1074 } 1075 break; 1076 } 1077 ////////////// 1078 case READ_HEAP_WRITE: 1079 { 1080 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ){ 1081 bool inst_read = (m_cmd_read_trdid_fifo.read() & 0x2); 1082 HeapEntry new_heap_entry; 1083 new_heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 1084 #if L1_MULTI_CACHE 1085 new_heap_entry.owner.cache_id = m_cmd_read_pktid_fifo.read(); 1086 #endif 1087 new_heap_entry.owner.inst = inst_read; 1088 if(r_read_count.read() == 1){ // creation of a new list 1089 new_heap_entry.next = m_heap_directory.next_free_ptr(); 1090 } else { // it is an insertion 1091 new_heap_entry.next = r_read_ptr.read(); 1092 } 1093 m_heap_directory.write_free_entry(new_heap_entry); 1094 m_heap_directory.write_free_ptr(r_read_next_ptr.read()); 1095 if(r_read_last_free.read()) { 1096 m_heap_directory.set_full(); 1097 } 1098 1099 r_read_fsm = READ_RSP; 1100 } else { 1101 ASSERT(false,"MEMCACHE Error : Bad HEAP allocation"); 1102 } 1103 break; 1104 } 1105 ////////////// 1106 case READ_HEAP_ERASE: 1107 { 1108 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ){ 1109 HeapEntry next_entry = m_heap_directory.read(r_read_ptr.read()); 1110 if( next_entry.next == r_read_ptr.read() ){ 1111 r_read_fsm = READ_HEAP_LAST; 1112 } else { 1113 r_read_ptr = next_entry.next; 1114 r_read_fsm = READ_HEAP_ERASE; 1115 } 1116 } else { 1117 ASSERT(false,"MEMCACHE Error : Bad HEAP allocation"); 1118 } 1119 break; 1120 } 1121 ////////////// 1122 case READ_HEAP_LAST: 1123 { 1124 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ){ 1125 HeapEntry last_entry; 1126 last_entry.owner.srcid = 0; 1127 #if L1_MULTI_CACHE 1128 last_entry.owner.cache_id = 0; 1129 #endif 1130 last_entry.owner.inst = false; 1131 if(m_heap_directory.is_full()){ 1132 last_entry.next = r_read_ptr.read(); 1133 m_heap_directory.unset_full(); 1134 } else { 1135 last_entry.next = r_read_next_ptr.read(); 1136 } 1137 m_heap_directory.write(r_read_ptr.read(),last_entry); 1138 r_read_fsm = READ_RSP; 1139 } else { 1140 ASSERT(false,"MEMCACHE Error : Bad HEAP allocation"); 1141 } 1142 break; 1143 } 1144 ////////////// 1145 case READ_RSP: // request the TGT_RSP FSM to return data 1146 { 1147 if( !r_read_to_tgt_rsp_req ) { 1148 for ( size_t i=0 ; i<m_words ; i++ ) { 1149 r_read_to_tgt_rsp_data[i] = r_read_data[i]; 1150 } 1151 r_read_to_tgt_rsp_word = m_x[(vci_addr_t)m_cmd_read_addr_fifo.read()]; 1152 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read(); 1153 cmd_read_fifo_get = true; 1154 r_read_to_tgt_rsp_req = true; 1155 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read(); 1156 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); 1157 r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read(); 1158 r_read_fsm = READ_IDLE; 1159 } 1160 break; 1380 case READ_RSP: // request the TGT_RSP FSM to return data 1381 { 1382 if( !r_read_to_tgt_rsp_req ) 1383 { 1384 for ( size_t i=0 ; i<m_words ; i++ ) r_read_to_tgt_rsp_data[i] = r_read_data[i]; 1385 r_read_to_tgt_rsp_word = m_x[(vci_addr_t)m_cmd_read_addr_fifo.read()]; 1386 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read(); 1387 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read(); 1388 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); 1389 r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read(); 1390 cmd_read_fifo_get = true; 1391 r_read_to_tgt_rsp_req = true; 1392 r_read_fsm = READ_IDLE; 1393 1394 #if DEBUG_MEMC_READ 1395 if( m_debug_read_fsm ) 1396 { 1397 std::cout << " <MEMC.READ_RSP> Request the TGT_RSP FSM to return data:" 1398 << " rsrcid = " << std::hex << m_cmd_read_srcid_fifo.read() 1399 << " / address = " << m_cmd_read_addr_fifo.read() 1400 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 1401 } 1402 #endif 1403 } 1404 break; 1161 1405 } 1162 1406 /////////////////// 1163 case READ_TRT_LOCK: // read miss : check the Transaction Table 1164 { 1165 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_READ ) { 1166 #ifdef TDEBUG 1167 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1168 std::cout << sc_time_stamp() << " " << name() << " READ_TRT_LOCK " << std::endl; 1169 } 1170 #endif 1171 size_t index = 0; 1172 bool hit_read = m_transaction_tab.hit_read(m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())], index); 1173 bool hit_write = m_transaction_tab.hit_write(m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())]); 1174 bool wok = !m_transaction_tab.full(index); 1175 if( hit_read || !wok || hit_write ) { // missing line already requested or no space 1176 if(!wok) 1177 m_cpt_trt_full++; 1178 if(hit_read || hit_write) 1179 m_cpt_trt_rb++; 1180 r_read_fsm = READ_IDLE; 1181 } else { // missing line is requested to the XRAM 1182 m_cpt_read_miss++; 1183 r_read_trt_index = index; 1184 r_read_fsm = READ_TRT_SET; 1185 } 1186 } 1187 break; 1407 case READ_TRT_LOCK: // read miss : check the Transaction Table 1408 { 1409 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_READ ) 1410 { 1411 size_t index = 0; 1412 vci_addr_t addr = (vci_addr_t)m_cmd_read_addr_fifo.read(); 1413 bool hit_read = m_transaction_tab.hit_read(m_nline[addr], index); 1414 bool hit_write = m_transaction_tab.hit_write(m_nline[addr]); 1415 bool wok = !m_transaction_tab.full(index); 1416 1417 if( hit_read || !wok || hit_write ) // missing line already requested or no space 1418 { 1419 if(!wok) m_cpt_trt_full++; 1420 if(hit_read || hit_write) m_cpt_trt_rb++; 1421 r_read_fsm = READ_IDLE; 1422 } 1423 else // missing line is requested to the XRAM 1424 { 1425 m_cpt_read_miss++; 1426 r_read_trt_index = index; 1427 r_read_fsm = READ_TRT_SET; 1428 } 1429 1430 #if DEBUG_MEMC_READ 1431 if( m_debug_read_fsm ) 1432 { 1433 std::cout << " <MEMC.READ_TRT_LOCK> Check TRT:" 1434 << " hit_read = " << hit_read 1435 << " / hit_write = " << hit_write 1436 << " / full = " << !wok << std::endl; 1437 } 1438 #endif 1439 } 1440 break; 1188 1441 } 1189 1442 ////////////////// 1190 case READ_TRT_SET: 1191 { 1192 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_READ ) { 1193 m_transaction_tab.set(r_read_trt_index.read(), 1194 true, 1195 m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())], 1196 m_cmd_read_srcid_fifo.read(), 1197 m_cmd_read_trdid_fifo.read(), 1198 m_cmd_read_pktid_fifo.read(), 1199 true, 1200 m_cmd_read_length_fifo.read(), 1201 m_x[(vci_addr_t)(m_cmd_read_addr_fifo.read())], 1202 std::vector<be_t>(m_words,0), 1203 std::vector<data_t>(m_words,0)); 1204 #ifdef TDEBUG 1205 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1206 std::cout << sc_time_stamp() << " " << name() << " READ_TRT_SET transaction table : " << std::endl; 1207 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 1208 m_transaction_tab.print(i); 1209 } 1210 #endif 1211 1212 r_read_fsm = READ_XRAM_REQ; 1213 } 1214 break; 1215 } 1216 ///////////////////// 1217 case READ_XRAM_REQ: 1218 { 1219 if( !r_read_to_ixr_cmd_req ) { 1220 cmd_read_fifo_get = true; 1221 r_read_to_ixr_cmd_req = true; 1222 r_read_to_ixr_cmd_nline = m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; 1223 r_read_to_ixr_cmd_trdid = r_read_trt_index.read(); 1224 r_read_fsm = READ_IDLE; 1225 } 1226 break; 1443 case READ_TRT_SET: // register get transaction in TRT 1444 { 1445 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_READ ) 1446 { 1447 m_transaction_tab.set(r_read_trt_index.read(), 1448 true, 1449 m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())], 1450 m_cmd_read_srcid_fifo.read(), 1451 m_cmd_read_trdid_fifo.read(), 1452 m_cmd_read_pktid_fifo.read(), 1453 true, 1454 m_cmd_read_length_fifo.read(), 1455 m_x[(vci_addr_t)(m_cmd_read_addr_fifo.read())], 1456 std::vector<be_t>(m_words,0), 1457 std::vector<data_t>(m_words,0)); 1458 #if DEBUG_MEMC_READ 1459 if( m_debug_read_fsm ) 1460 { 1461 std::cout << " <MEMC.READ_TRT_SET> Write in Transaction Table: " << std::hex 1462 << " address = " << m_cmd_read_addr_fifo.read() 1463 << " / srcid = " << m_cmd_read_srcid_fifo.read() << std::endl; 1464 } 1465 #endif 1466 r_read_fsm = READ_TRT_REQ; 1467 } 1468 break; 1469 } 1470 ////////////////// 1471 case READ_TRT_REQ: // consume the read request in the FIFO, 1472 // and send it to the ixr_cmd_fsm 1473 { 1474 if( not r_read_to_ixr_cmd_req ) 1475 { 1476 cmd_read_fifo_get = true; 1477 r_read_to_ixr_cmd_req = true; 1478 r_read_to_ixr_cmd_nline = m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; 1479 r_read_to_ixr_cmd_trdid = r_read_trt_index.read(); 1480 r_read_fsm = READ_IDLE; 1481 1482 #if DEBUG_MEMC_READ 1483 if( m_debug_read_fsm ) 1484 { 1485 std::cout << " <MEMC.READ_TRT_REQ> Request GET transaction for address " 1486 << m_cmd_read_addr_fifo.read() << std::endl; 1487 } 1488 #endif 1489 } 1490 break; 1227 1491 } 1228 1492 } // end switch read_fsm … … 1240 1504 // If there is no other copy, an acknowledge response is immediately 1241 1505 // returned to the writing processor. 1242 // if the data is cached by other processoris, the FSM takes the lock 1243 // protecting the Update Table (UPT) to register this update transaction. 1244 // If the UPT is full, it releases the lock and waits. Then, it sends 1506 // If the data is cached by other processors, a coherence transaction must 1507 // be launched: 1508 // It is a multicast update if the line is not in counter mode, and the processor 1509 // takes the lock protecting the Update Table (UPT) to register this transaction. 1510 // It is a broadcast invalidate if the line is in counter mode. 1511 // If the UPT is full, it releases the lock(s) and retry. Then, it sends 1245 1512 // a multi-update request to all owners of the line (but the writer), 1246 1513 // through the INIT_CMD FSM. In case of multi-update transaction, the WRITE FSM … … 1256 1523 ///////////////////////////////////////////////////////////////////////////////////// 1257 1524 1258 switch ( r_write_fsm.read() ) { 1259 1260 //////////////// 1261 case WRITE_IDLE: // copy first word of a write burst in local buffer 1262 { 1263 if ( m_cmd_write_addr_fifo.rok()) { 1264 PRINTF(" * <MEM_CACHE.WRITE> KANE Request from %d.%d (%d) at address %llx\n",(uint32_t)m_cmd_write_srcid_fifo.read(),(uint32_t)m_cmd_write_pktid_fifo.read(),(uint32_t) m_cmd_write_trdid_fifo.read(), (uint64_t)m_cmd_write_addr_fifo.read()); 1265 1266 m_cpt_write++; 1267 m_cpt_write_cells++; 1268 // consume a word in the FIFO & write it in the local buffer 1269 cmd_write_fifo_get = true; 1270 size_t index = m_x[(vci_addr_t)(m_cmd_write_addr_fifo.read())]; 1271 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 1272 r_write_word_index = index; 1273 r_write_word_count = 1; 1274 r_write_data[index] = m_cmd_write_data_fifo.read(); 1275 r_write_srcid = m_cmd_write_srcid_fifo.read(); 1276 r_write_trdid = m_cmd_write_trdid_fifo.read(); 1277 r_write_pktid = m_cmd_write_pktid_fifo.read(); 1278 1279 // the be field must be set for all words 1280 for ( size_t i=0 ; i<m_words ; i++ ) { 1281 if ( i == index ) r_write_be[i] = m_cmd_write_be_fifo.read(); 1282 else r_write_be[i] = 0x0; 1283 } 1284 if( !((m_cmd_write_be_fifo.read() == 0x0)||(m_cmd_write_be_fifo.read() == 0xF)) ) 1285 r_write_byte=true; 1286 else r_write_byte=false; 1287 1288 if( m_cmd_write_eop_fifo.read() ) r_write_fsm = WRITE_DIR_LOCK; 1289 else r_write_fsm = WRITE_NEXT; 1290 } 1291 break; 1292 } 1525 switch ( r_write_fsm.read() ) 1526 { 1293 1527 //////////////// 1294 case WRITE_NEXT: // copy next word of a write burst in local buffer 1295 { 1296 if ( m_cmd_write_addr_fifo.rok() ) { 1297 m_cpt_write_cells++; 1298 1299 // check that the next word is in the same cache line 1300 ASSERT( (m_nline[(vci_addr_t)(r_write_address.read())] == m_nline[(vci_addr_t)(m_cmd_write_addr_fifo.read())]) 1301 ,"VCI_MEM_CACHE write error in vci_mem_cache : write burst over a line" ); 1302 // consume a word in the FIFO & write it in the local buffer 1303 cmd_write_fifo_get=true; 1304 size_t index = r_write_word_index.read() + r_write_word_count.read(); 1305 r_write_be[index] = m_cmd_write_be_fifo.read(); 1306 r_write_data[index] = m_cmd_write_data_fifo.read(); 1307 r_write_word_count = r_write_word_count.read() + 1; 1308 if( !((m_cmd_write_be_fifo.read() == 0x0)||(m_cmd_write_be_fifo.read() == 0xF)) ) 1309 r_write_byte=true; 1310 if ( m_cmd_write_eop_fifo.read() ) r_write_fsm = WRITE_DIR_LOCK; 1311 } 1312 break; 1528 case WRITE_IDLE: // copy first word of a write burst in local buffer 1529 { 1530 if ( m_cmd_write_addr_fifo.rok() ) 1531 { 1532 m_cpt_write++; 1533 m_cpt_write_cells++; 1534 1535 // consume a word in the FIFO & write it in the local buffer 1536 cmd_write_fifo_get = true; 1537 size_t index = m_x[(vci_addr_t)(m_cmd_write_addr_fifo.read())]; 1538 1539 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 1540 r_write_word_index = index; 1541 r_write_word_count = 1; 1542 r_write_data[index] = m_cmd_write_data_fifo.read(); 1543 r_write_srcid = m_cmd_write_srcid_fifo.read(); 1544 r_write_trdid = m_cmd_write_trdid_fifo.read(); 1545 r_write_pktid = m_cmd_write_pktid_fifo.read(); 1546 1547 // initialize the be field for all words 1548 for ( size_t i=0 ; i<m_words ; i++ ) 1549 { 1550 if ( i == index ) r_write_be[i] = m_cmd_write_be_fifo.read(); 1551 else r_write_be[i] = 0x0; 1552 } 1553 1554 if( !((m_cmd_write_be_fifo.read() == 0x0)||(m_cmd_write_be_fifo.read() == 0xF)) ) 1555 r_write_byte = true; 1556 else 1557 r_write_byte = false; 1558 1559 if( m_cmd_write_eop_fifo.read() ) r_write_fsm = WRITE_DIR_LOCK; 1560 else r_write_fsm = WRITE_NEXT; 1561 1562 #if DEBUG_MEMC_WRITE 1563 if( m_debug_write_fsm ) 1564 { 1565 std::cout << " <MEMC.WRITE_IDLE> Write request " 1566 << " srcid = " << std::hex << m_cmd_write_srcid_fifo.read() 1567 << " / address = " << m_cmd_write_addr_fifo.read() 1568 << " / data = " << m_cmd_write_data_fifo.read() << std::endl; 1569 } 1570 #endif 1571 } 1572 break; 1573 } 1574 //////////////// 1575 case WRITE_NEXT: // copy next word of a write burst in local buffer 1576 { 1577 if ( m_cmd_write_addr_fifo.rok() ) 1578 { 1579 1580 #if DEBUG_MEMC_WRITE 1581 if( m_debug_write_fsm ) 1582 { 1583 std::cout << " <MEMC.WRITE_NEXT> Write another word in local buffer" << std::endl; 1584 } 1585 #endif 1586 m_cpt_write_cells++; 1587 1588 // check that the next word is in the same cache line 1589 if ( (m_nline[(vci_addr_t)(r_write_address.read())] != 1590 m_nline[(vci_addr_t)(m_cmd_write_addr_fifo.read())]) ) 1591 { 1592 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_NEXT state" << std::endl; 1593 std::cout << "all words in a write burst must be in same cache line" << std::endl; 1594 exit(0); 1595 } 1596 1597 // consume a word in the FIFO & write it in the local buffer 1598 cmd_write_fifo_get=true; 1599 size_t index = r_write_word_index.read() + r_write_word_count.read(); 1600 1601 r_write_be[index] = m_cmd_write_be_fifo.read(); 1602 r_write_data[index] = m_cmd_write_data_fifo.read(); 1603 r_write_word_count = r_write_word_count.read() + 1; 1604 1605 if( !((m_cmd_write_be_fifo.read() == 0x0)||(m_cmd_write_be_fifo.read() == 0xF)) ) 1606 r_write_byte = true; 1607 1608 if ( m_cmd_write_eop_fifo.read() ) r_write_fsm = WRITE_DIR_LOCK; 1609 } 1610 break; 1313 1611 } 1314 1612 //////////////////// 1315 case WRITE_DIR_LOCK: // access directory to check hit/miss 1316 { 1317 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE ) { 1318 size_t way = 0; 1319 DirectoryEntry entry(m_cache_directory.read(r_write_address.read(), way)); 1320 1321 // copy directory entry in local buffers in case of hit 1322 if ( entry.valid ) { 1323 r_write_is_cnt = entry.is_cnt; 1324 r_write_lock = entry.lock; 1325 r_write_tag = entry.tag; 1326 r_write_copy = entry.owner.srcid; 1613 case WRITE_DIR_LOCK: // access directory to check hit/miss 1614 { 1615 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE ) 1616 { 1617 size_t way = 0; 1618 DirectoryEntry entry(m_cache_directory.read(r_write_address.read(), way)); 1619 1620 if ( entry.valid ) // hit 1621 { 1622 // copy directory entry in local buffer in case of hit 1623 r_write_is_cnt = entry.is_cnt; 1624 r_write_lock = entry.lock; 1625 r_write_tag = entry.tag; 1626 r_write_copy = entry.owner.srcid; 1327 1627 #if L1_MULTI_CACHE 1328 r_write_copy_cache= entry.owner.cache_id; 1329 #endif 1330 r_write_copy_inst = entry.owner.inst; 1331 r_write_count = entry.count; 1332 r_write_ptr = entry.ptr; 1333 r_write_way = way; 1334 if( entry.is_cnt && entry.count ) { 1335 r_write_fsm = WRITE_DIR_HIT_READ; 1336 } else { 1337 if(r_write_byte.read()) 1338 r_write_fsm = WRITE_DIR_HIT_READ; 1339 else r_write_fsm = WRITE_DIR_HIT; 1340 } 1341 } else { 1342 r_write_fsm = WRITE_TRT_LOCK; 1343 } 1344 } 1345 break; 1628 r_write_copy_cache = entry.owner.cache_id; 1629 #endif 1630 r_write_copy_inst = entry.owner.inst; 1631 r_write_count = entry.count; 1632 r_write_ptr = entry.ptr; 1633 r_write_way = way; 1634 1635 if( entry.is_cnt && entry.count ) 1636 { 1637 r_write_fsm = WRITE_DIR_HIT_READ; 1638 } 1639 else 1640 { 1641 if (r_write_byte.read()) r_write_fsm = WRITE_DIR_HIT_READ; 1642 else r_write_fsm = WRITE_DIR_HIT; 1643 } 1644 } 1645 else // miss 1646 { 1647 r_write_fsm = WRITE_TRT_LOCK; 1648 } 1649 1650 #if DEBUG_MEMC_WRITE 1651 if( m_debug_write_fsm ) 1652 { 1653 std::cout << " <MEMC.WRITE_DIR_LOCK> Check the directory: " 1654 << " address = " << r_write_address.read() 1655 << " hit = " << entry.valid 1656 << " count = " << std::dec << entry.count 1657 << " is_cnt = " << entry.is_cnt << std::endl; 1658 } 1659 #endif 1660 } 1661 break; 1662 } 1663 //////////////////////// 1664 case WRITE_DIR_HIT_READ: // read the cache and complete the buffer when be!=0xF 1665 { 1666 // update local buffer 1667 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 1668 size_t way = r_write_way.read(); 1669 for(size_t i=0 ; i<m_words ; i++) 1670 { 1671 data_t mask = 0; 1672 if (r_write_be[i].read() & 0x1) mask = mask | 0x000000FF; 1673 if (r_write_be[i].read() & 0x2) mask = mask | 0x0000FF00; 1674 if (r_write_be[i].read() & 0x4) mask = mask | 0x00FF0000; 1675 if (r_write_be[i].read() & 0x8) mask = mask | 0xFF000000; 1676 1677 // complete only if mask is not null (for energy consumption) 1678 if ( r_write_be[i].read() || r_write_is_cnt.read() ) 1679 { 1680 r_write_data[i] = (r_write_data[i].read() & mask) | 1681 (m_cache_data[way][set][i] & ~mask); 1682 } 1683 } // end for 1684 1685 // test if a coherence broadcast is required 1686 if( r_write_is_cnt.read() && r_write_count.read() ) r_write_fsm = WRITE_TRT_WRITE_LOCK; 1687 else r_write_fsm = WRITE_DIR_HIT; 1688 1689 #if DEBUG_MEMC_WRITE 1690 if( m_debug_write_fsm ) 1691 { 1692 if( r_write_is_cnt.read() && r_write_count.read() ) 1693 { 1694 std::cout << " <MEMC.WRITE_DIR_HIT_READ> Read the cache to complete local buffer /" 1695 << " coherence broadcast required" << std::endl; 1696 } 1697 else 1698 { 1699 std::cout << " <MEMC.WRITE_DIR_HIT_READ> Read the cache to complete local buffer" 1700 << std::endl; 1701 } 1702 } 1703 #endif 1704 break; 1346 1705 } 1347 1706 /////////////////// 1348 case WRITE_DIR_HIT_READ: // read the cache and complete the buffer (data, when be!=0xF) 1349 { 1350 // update local buffer 1351 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 1352 size_t way = r_write_way.read(); 1353 for(size_t i=0 ; i<m_words ; i++) { 1354 data_t mask = 0; 1355 if (r_write_be[i].read() & 0x1) mask = mask | 0x000000FF; 1356 if (r_write_be[i].read() & 0x2) mask = mask | 0x0000FF00; 1357 if (r_write_be[i].read() & 0x4) mask = mask | 0x00FF0000; 1358 if (r_write_be[i].read() & 0x8) mask = mask | 0xFF000000; 1359 if(r_write_be[i].read()||r_write_is_cnt.read()) { // complete only if mask is not null (for energy consumption) 1360 r_write_data[i] = (r_write_data[i].read() & mask) | 1361 (m_cache_data[way][set][i] & ~mask); 1362 } 1363 } // end for 1364 1365 if( r_write_is_cnt.read() && r_write_count.read() ) { 1366 r_write_fsm = WRITE_TRT_WRITE_LOCK; 1367 } else { 1368 r_write_fsm = WRITE_DIR_HIT; 1369 } 1370 break; 1371 } 1372 /////////////////// 1373 case WRITE_DIR_HIT: // update the cache (data & dirty bit) 1374 { 1375 // update directory with Dirty bit 1376 DirectoryEntry entry; 1377 entry.valid = true; 1378 entry.dirty = true; 1379 entry.tag = r_write_tag.read(); 1380 entry.is_cnt = r_write_is_cnt.read(); 1381 entry.lock = r_write_lock.read(); 1382 entry.owner.srcid = r_write_copy.read(); 1707 case WRITE_DIR_HIT: // update the cache (data & directory) 1708 { 1709 // update directory with Dirty bit 1710 DirectoryEntry entry; 1711 entry.valid = true; 1712 entry.dirty = true; 1713 entry.tag = r_write_tag.read(); 1714 entry.is_cnt = r_write_is_cnt.read(); 1715 entry.lock = r_write_lock.read(); 1716 entry.owner.srcid = r_write_copy.read(); 1383 1717 #if L1_MULTI_CACHE 1384 entry.owner.cache_id= r_write_copy_cache.read(); 1385 #endif 1386 entry.owner.inst = r_write_copy_inst.read(); 1387 entry.count = r_write_count.read(); 1388 entry.ptr = r_write_ptr.read(); 1389 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 1390 size_t way = r_write_way.read(); 1391 m_cache_directory.write(set, way, entry); 1392 1393 bool owner = (((r_write_copy.read()==r_write_srcid.read()) 1718 entry.owner.cache_id = r_write_copy_cache.read(); 1719 #endif 1720 entry.owner.inst = r_write_copy_inst.read(); 1721 entry.count = r_write_count.read(); 1722 entry.ptr = r_write_ptr.read(); 1723 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 1724 size_t way = r_write_way.read(); 1725 1726 // update directory 1727 m_cache_directory.write(set, way, entry); 1728 1729 // owner is true when the writer is the owner 1730 bool owner = (((r_write_copy.read() == r_write_srcid.read()) 1394 1731 #if L1_MULTI_CACHE 1395 1732 and (r_write_copy_cache.read()==r_write_pktid.read()) … … 1397 1734 ) and not r_write_copy_inst.read()); 1398 1735 1399 bool no_update = (r_write_count.read()==0) || ( owner && (r_write_count.read()==1)); 1400 1401 if( no_update ) // no update 1402 { 1403 // write data in cache 1404 for(size_t i=0 ; i<m_words ; i++) { 1405 if ( r_write_be[i].read() ) { 1406 m_cache_data[way][set][i] = r_write_data[i].read(); 1407 } 1408 } // end for 1409 } 1410 1411 size_t count_signal = r_write_count.read(); 1412 if(owner){ 1413 count_signal = count_signal - 1; 1414 } 1415 r_write_count = count_signal; 1416 r_write_to_dec = false; 1417 1418 if ( no_update ) r_write_fsm = WRITE_RSP; 1419 else 1736 // no_update is true when there is no need for coherence transaction 1737 bool no_update = (r_write_count.read()==0) || ( owner && (r_write_count.read()==1)); 1738 1739 // write data in the cache if no transaction on the coherence network 1740 if( no_update ) 1741 { 1742 for(size_t i=0 ; i<m_words ; i++) 1743 { 1744 if ( r_write_be[i].read() ) m_cache_data[way][set][i] = r_write_data[i].read(); 1745 } 1746 } 1747 1748 if ( owner ) r_write_count = r_write_count.read() - 1; 1749 r_write_to_dec = false; 1750 1751 if ( no_update ) // Write transaction completed 1752 { 1753 r_write_fsm = WRITE_RSP; 1754 } 1755 else // coherence update required 1756 { 1757 if( !r_write_to_init_cmd_multi_req.read() && 1758 !r_write_to_init_cmd_brdcast_req.read() ) r_write_fsm = WRITE_UPT_LOCK; 1759 else r_write_fsm = WRITE_WAIT; 1760 } 1761 1762 #if DEBUG_MEMC_WRITE 1763 if( m_debug_write_fsm ) 1764 { 1765 if ( no_update ) 1766 { 1767 std::cout << " <MEMC.WRITE_DIR_HIT> Write into cache / No coherence transaction" 1768 << std::endl; 1769 } 1770 else 1771 { 1772 std::cout << " <MEMC.WRITE_DIR_HIT> Coherence update required:" 1773 << " is_cnt = " << r_write_is_cnt.read() 1774 << " count = " << std::dec << r_write_count.read() 1775 << std::endl; 1776 } 1777 } 1778 #endif 1779 break; 1780 } 1781 ///////////////////// 1782 case WRITE_UPT_LOCK: // Try to register the update request in UPT 1783 { 1784 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) 1785 { 1786 bool wok = false; 1787 size_t index = 0; 1788 size_t srcid = r_write_srcid.read(); 1789 size_t trdid = r_write_trdid.read(); 1790 size_t pktid = r_write_pktid.read(); 1791 addr_t nline = m_nline[(vci_addr_t)(r_write_address.read())]; 1792 size_t nb_copies = r_write_count.read(); 1793 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 1794 size_t way = r_write_way.read(); 1795 1796 wok = m_update_tab.set(true, // it's an update transaction 1797 false, // it's not a broadcast 1798 true, // it needs a response 1799 srcid, 1800 trdid, 1801 pktid, 1802 nline, 1803 nb_copies, 1804 index); 1805 if ( wok ) // write data in cache 1806 { 1807 for(size_t i=0 ; i<m_words ; i++) 1808 { 1809 if ( r_write_be[i].read() ) m_cache_data[way][set][i] = r_write_data[i].read(); 1810 } 1811 } 1812 1813 #if DEBUG_MEMC_WRITE 1814 if( m_debug_write_fsm ) 1815 { 1816 if ( wok ) 1817 { 1818 std::cout << " <MEMC.WRITE_UPT_LOCK> Register the multicast update in UPT / " 1819 << " nb_copies = " << r_write_count.read() << std::endl; 1820 //m_update_tab.print(); 1821 } 1822 } 1823 #endif 1824 r_write_upt_index = index; 1825 // releases the lock protecting the Update Table and the Directory if no entry... 1826 if ( wok ) r_write_fsm = WRITE_HEAP_LOCK; 1827 else r_write_fsm = WRITE_WAIT; 1828 } 1829 break; 1830 } 1831 ///////////////////// 1832 case WRITE_HEAP_LOCK: 1833 { 1834 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE ) 1835 { 1836 1837 #if DEBUG_MEMC_WRITE 1838 if( m_debug_write_fsm ) 1839 { 1840 std::cout << " <MEMC.WRITE_HEAP_LOCK> Get acces to the HEAP" << std::endl; 1841 } 1842 #endif 1843 r_write_fsm = WRITE_UPT_REQ; 1844 } 1845 break; 1846 } 1847 ////////////////// 1848 case WRITE_UPT_REQ: 1849 { 1850 if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_WRITE ) 1851 { 1852 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_UPT_REQ state" << std::endl; 1853 std::cout << "bad HEAP allocation" << std::endl; 1854 exit(0); 1855 } 1856 1420 1857 if( !r_write_to_init_cmd_multi_req.read() && 1421 !r_write_to_init_cmd_brdcast_req.read() ) 1422 r_write_fsm = WRITE_UPT_LOCK; 1423 else 1424 r_write_fsm = WRITE_WAIT; 1425 break; 1858 !r_write_to_init_cmd_brdcast_req.read() ) 1859 { 1860 r_write_to_init_cmd_brdcast_req = false; 1861 r_write_to_init_cmd_trdid = r_write_upt_index.read(); 1862 r_write_to_init_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 1863 r_write_to_init_cmd_index = r_write_word_index.read(); 1864 r_write_to_init_cmd_count = r_write_word_count.read(); 1865 1866 for(size_t i=0; i<m_words ; i++) 1867 { 1868 r_write_to_init_cmd_be[i]=r_write_be[i].read(); 1869 } 1870 1871 size_t min = r_write_word_index.read(); 1872 size_t max = r_write_word_index.read() + r_write_word_count.read(); 1873 for (size_t i=min ; i<max ; i++) r_write_to_init_cmd_data[i] = r_write_data[i]; 1874 1875 if( (r_write_copy.read() != r_write_srcid.read()) or 1876 #if L1_MULTI_CACHE 1877 (r_write_copy_cache.read() != r_write_pktid.read()) or 1878 #endif 1879 r_write_copy_inst.read() ) 1880 { 1881 // We put the first copy in the fifo 1882 write_to_init_cmd_fifo_put = true; 1883 write_to_init_cmd_fifo_inst = r_write_copy_inst.read(); 1884 write_to_init_cmd_fifo_srcid = r_write_copy.read(); 1885 #if L1_MULTI_CACHE 1886 write_to_init_cmd_fifo_cache_id= r_write_copy_cache.read(); 1887 #endif 1888 if(r_write_count.read() == 1) 1889 { 1890 r_write_fsm = WRITE_IDLE; 1891 r_write_to_init_cmd_multi_req = true; 1892 } 1893 else 1894 { 1895 r_write_fsm = WRITE_UPDATE; 1896 } 1897 } 1898 else 1899 { 1900 r_write_fsm = WRITE_UPDATE; 1901 } 1902 } 1903 break; 1904 } 1905 ////////////////// 1906 case WRITE_UPDATE: // send a multi-update request to INIT_CMD fsm 1907 { 1908 if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_WRITE ) 1909 { 1910 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_UPDATE state" << std::endl; 1911 std::cout << "bad HEAP allocation" << std::endl; 1912 exit(0); 1913 } 1914 1915 HeapEntry entry = m_heap.read(r_write_ptr.read()); 1916 write_to_init_cmd_fifo_inst = entry.owner.inst; 1917 write_to_init_cmd_fifo_srcid = entry.owner.srcid; 1918 #if L1_MULTI_CACHE 1919 write_to_init_cmd_fifo_cache_id = entry.owner.cache_id; 1920 #endif 1921 1922 bool dec_upt_counter = r_write_to_dec.read(); 1923 if( (entry.owner.srcid != r_write_srcid.read()) or 1924 #if L1_MULTI_CACHE 1925 (entry.owner.cache_id != r_write_pktid.read()) or 1926 #endif 1927 entry.owner.inst) 1928 { 1929 write_to_init_cmd_fifo_put = true; 1930 } 1931 else 1932 { 1933 dec_upt_counter = true; 1934 } 1935 r_write_to_dec = dec_upt_counter; 1936 1937 if( m_write_to_init_cmd_inst_fifo.wok() ) 1938 { 1939 r_write_ptr = entry.next; 1940 if( entry.next == r_write_ptr.read() ) // last copy 1941 { 1942 r_write_to_init_cmd_multi_req = true; 1943 if(dec_upt_counter) r_write_fsm = WRITE_UPT_DEC; 1944 else r_write_fsm = WRITE_IDLE; 1945 } 1946 else 1947 { 1948 r_write_fsm = WRITE_UPDATE; 1949 } 1950 } 1951 else 1952 { 1953 r_write_fsm = WRITE_UPDATE; 1954 } 1955 break; 1956 } 1957 ////////////////// 1958 case WRITE_UPT_DEC: // Post another coherence update request 1959 { 1960 if ( !r_write_to_init_rsp_req.read() ) 1961 { 1962 r_write_to_init_rsp_req = true; 1963 r_write_to_init_rsp_upt_index = r_write_upt_index.read(); 1964 r_write_fsm = WRITE_IDLE; 1965 } 1966 break; 1967 } 1968 /////////////// 1969 case WRITE_RSP: // Post a request to TGT_RSP FSM to acknowledge the write 1970 { 1971 if ( !r_write_to_tgt_rsp_req.read() ) 1972 { 1973 1974 #if DEBUG_MEMC_WRITE 1975 if( m_debug_write_fsm ) 1976 { 1977 std::cout << " <MEMC.WRITE_RSP> Post a request to TGT_RSP FSM: rsrcid = " 1978 << std::hex << r_write_srcid.read() << std:: endl; 1979 } 1980 #endif 1981 r_write_to_tgt_rsp_req = true; 1982 r_write_to_tgt_rsp_srcid = r_write_srcid.read(); 1983 r_write_to_tgt_rsp_trdid = r_write_trdid.read(); 1984 r_write_to_tgt_rsp_pktid = r_write_pktid.read(); 1985 r_write_fsm = WRITE_IDLE; 1986 } 1987 break; 1988 } 1989 //////////////////// 1990 case WRITE_TRT_LOCK: // Miss : check Transaction Table 1991 { 1992 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) 1993 { 1994 1995 #if DEBUG_MEMC_WRITE 1996 if( m_debug_write_fsm ) 1997 { 1998 std::cout << " <MEMC.WRITE_TRT_LOCK> Check the TRT" << std::endl; 1999 } 2000 #endif 2001 size_t hit_index = 0; 2002 size_t wok_index = 0; 2003 vci_addr_t addr = (vci_addr_t)r_write_address.read(); 2004 bool hit_read = m_transaction_tab.hit_read(m_nline[addr], hit_index); 2005 bool hit_write = m_transaction_tab.hit_write(m_nline[addr]); 2006 bool wok = !m_transaction_tab.full(wok_index); 2007 2008 if ( hit_read ) // register the modified data in TRT 2009 { 2010 r_write_trt_index = hit_index; 2011 r_write_fsm = WRITE_TRT_DATA; 2012 m_cpt_write_miss++; 2013 } 2014 else if ( wok && !hit_write ) // set a new entry in TRT 2015 { 2016 r_write_trt_index = wok_index; 2017 r_write_fsm = WRITE_TRT_SET; 2018 m_cpt_write_miss++; 2019 } 2020 else // wait an empty entry in TRT 2021 { 2022 r_write_fsm = WRITE_WAIT; 2023 m_cpt_trt_full++; 2024 } 2025 } 2026 break; 2027 } 2028 //////////////// 2029 case WRITE_WAIT: // release the locks protecting the shared ressources 2030 { 2031 2032 #if DEBUG_MEMC_WRITE 2033 if( m_debug_write_fsm ) 2034 { 2035 std::cout << " <MEMC.WRITE_WAIT> Releases the locks before retry" << std::endl; 2036 } 2037 #endif 2038 r_write_fsm = WRITE_DIR_LOCK; 2039 break; 2040 } 2041 /////////////////// 2042 case WRITE_TRT_SET: // register a new transaction in TRT (Write Buffer) 2043 { 2044 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) 2045 { 2046 std::vector<be_t> be_vector; 2047 std::vector<data_t> data_vector; 2048 be_vector.clear(); 2049 data_vector.clear(); 2050 for ( size_t i=0; i<m_words; i++ ) 2051 { 2052 be_vector.push_back(r_write_be[i]); 2053 data_vector.push_back(r_write_data[i]); 2054 } 2055 m_transaction_tab.set(r_write_trt_index.read(), 2056 true, // read request to XRAM 2057 m_nline[(vci_addr_t)(r_write_address.read())], 2058 r_write_srcid.read(), 2059 r_write_trdid.read(), 2060 r_write_pktid.read(), 2061 false, // not a processor read 2062 0, // not a single word 2063 0, // word index 2064 be_vector, 2065 data_vector); 2066 r_write_fsm = WRITE_XRAM_REQ; 2067 2068 #if DEBUG_MEMC_WRITE 2069 if( m_debug_write_fsm ) 2070 { 2071 std::cout << " <MEMC.WRITE_TRT_SET> Set a new entry in TRT" << std::endl; 2072 } 2073 #endif 2074 } 2075 break; 2076 } 2077 //////////////////// 2078 case WRITE_TRT_DATA: // update an entry in TRT (Write Buffer) 2079 { 2080 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) 2081 { 2082 std::vector<be_t> be_vector; 2083 std::vector<data_t> data_vector; 2084 be_vector.clear(); 2085 data_vector.clear(); 2086 for ( size_t i=0; i<m_words; i++ ) 2087 { 2088 be_vector.push_back(r_write_be[i]); 2089 data_vector.push_back(r_write_data[i]); 2090 } 2091 m_transaction_tab.write_data_mask(r_write_trt_index.read(), 2092 be_vector, 2093 data_vector); 2094 r_write_fsm = WRITE_RSP; 2095 2096 #if DEBUG_MEMC_WRITE 2097 if( m_debug_write_fsm ) 2098 { 2099 std::cout << " <MEMC.WRITE_TRT_DATA> Modify an existing entry in TRT" << std::endl; 2100 m_transaction_tab.print( r_write_trt_index.read() ); 2101 } 2102 #endif 2103 } 2104 break; 2105 } 2106 //////////////////// 2107 case WRITE_XRAM_REQ: // send a request to IXR_CMD FSM 2108 { 2109 if ( !r_write_to_ixr_cmd_req ) 2110 { 2111 r_write_to_ixr_cmd_req = true; 2112 r_write_to_ixr_cmd_write = false; 2113 r_write_to_ixr_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 2114 r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); 2115 r_write_fsm = WRITE_RSP; 2116 2117 #if DEBUG_MEMC_WRITE 2118 if( m_debug_write_fsm ) 2119 { 2120 std::cout << " <MEMC.WRITE_XRAM_REQ> Post a request to the IXR_CMD FSM" << std::endl; 2121 } 2122 #endif 2123 } 2124 break; 2125 } 2126 ////////////////////////// 2127 case WRITE_TRT_WRITE_LOCK: // Check TRT not full 2128 { 2129 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) 2130 { 2131 size_t wok_index = 0; 2132 bool wok = !m_transaction_tab.full( wok_index ); 2133 if ( wok ) // set a new entry in TRT 2134 { 2135 r_write_trt_index = wok_index; 2136 r_write_fsm = WRITE_INVAL_LOCK; 2137 } 2138 else // wait an empty entry in TRT 2139 { 2140 r_write_fsm = WRITE_WAIT; 2141 } 2142 2143 #if DEBUG_MEMC_WRITE 2144 if( m_debug_write_fsm ) 2145 { 2146 std::cout << " <MEMC.WRITE_TRT_WRITE_LOCK> Check TRT : wok = " 2147 << wok << " index = " << wok_index << std::endl; 2148 } 2149 #endif 2150 } 2151 break; 2152 } 2153 ////////////////////// 2154 case WRITE_INVAL_LOCK: 2155 { 2156 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) 2157 { 2158 bool wok = false; 2159 size_t index = 0; 2160 size_t srcid = r_write_srcid.read(); 2161 size_t trdid = r_write_trdid.read(); 2162 size_t pktid = r_write_pktid.read(); 2163 addr_t nline = m_nline[(vci_addr_t)(r_write_address.read())]; 2164 size_t nb_copies = r_write_count.read(); 2165 2166 wok =m_update_tab.set(false, // it's an inval transaction 2167 true, // it's a broadcast 2168 true, // it needs a response 2169 srcid, 2170 trdid, 2171 pktid, 2172 nline, 2173 nb_copies, 2174 index); 2175 2176 #if DEBUG_MEMC_WRITE 2177 if( m_debug_write_fsm ) 2178 { 2179 if ( wok ) 2180 { 2181 std::cout << " <MEMC.WRITE_INVAL_LOCK> Register the broadcast inval in UPT / " 2182 << " nb_copies = " << r_write_count.read() << std::endl; 2183 //m_update_tab.print(); 2184 } 2185 } 2186 #endif 2187 r_write_upt_index = index; 2188 2189 if ( wok ) r_write_fsm = WRITE_DIR_INVAL; 2190 else r_write_fsm = WRITE_WAIT; 2191 } 2192 break; 1426 2193 } 1427 2194 ///////////////////// 1428 case WRITE_UPT_LOCK: // Try to register the request in Update Table 1429 { 1430 1431 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) { 1432 bool wok = false; 1433 size_t index = 0; 1434 size_t srcid = r_write_srcid.read(); 1435 size_t trdid = r_write_trdid.read(); 1436 size_t pktid = r_write_pktid.read(); 1437 addr_t nline = m_nline[(vci_addr_t)(r_write_address.read())]; 1438 size_t nb_copies = r_write_count.read(); 1439 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 1440 size_t way = r_write_way.read(); 1441 1442 wok =m_update_tab.set(true, // it's an update transaction 1443 false, // it's not a broadcast 1444 true, // it needs a response 1445 srcid, 1446 trdid, 1447 pktid, 1448 nline, 1449 nb_copies, 1450 index); 1451 if(wok){ 1452 // write data in cache 1453 for(size_t i=0 ; i<m_words ; i++) { 1454 if ( r_write_be[i].read() ) { 1455 m_cache_data[way][set][i] = r_write_data[i].read(); 1456 } 1457 } // end for 1458 } 1459 #ifdef IDEBUG 1460 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1461 if(wok){ 1462 std::cout << sc_time_stamp() << " " << name() << " WRITE_UPT_LOCK update table : " << std::endl; 1463 m_update_tab.print(); 1464 } 1465 } 1466 #endif 1467 r_write_upt_index = index; 1468 // releases the lock protecting the Update Table and the Directory if no entry... 1469 if ( wok ) r_write_fsm = WRITE_HEAP_LOCK; 1470 else r_write_fsm = WRITE_WAIT; 1471 } 1472 break; 1473 } 1474 ////////////////// 1475 case WRITE_HEAP_LOCK: 1476 { 1477 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE ){ 1478 r_write_fsm = WRITE_UPT_REQ; 1479 } 1480 break; 1481 } 1482 ////////////////// 1483 case WRITE_UPT_REQ: 1484 { 1485 ASSERT( (r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE) 1486 ,"MemCache ERROR : bad HEAP allocation"); 1487 if( !r_write_to_init_cmd_multi_req.read() && 1488 !r_write_to_init_cmd_brdcast_req.read() ){ 1489 r_write_to_init_cmd_brdcast_req = false; 1490 r_write_to_init_cmd_trdid = r_write_upt_index.read(); 1491 r_write_to_init_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 1492 r_write_to_init_cmd_index = r_write_word_index.read(); 1493 r_write_to_init_cmd_count = r_write_word_count.read(); 1494 1495 for(size_t i=0; i<m_words ; i++){ 1496 r_write_to_init_cmd_be[i]=r_write_be[i].read(); 1497 } 1498 1499 size_t min = r_write_word_index.read(); 1500 size_t max = r_write_word_index.read() + r_write_word_count.read(); 1501 for (size_t i=min ; i<max ; i++) { 1502 r_write_to_init_cmd_data[i] = r_write_data[i]; 1503 } 1504 1505 if((r_write_copy.read() != r_write_srcid.read()) or 1506 #if L1_MULTI_CACHE 1507 (r_write_copy_cache.read() != r_write_pktid.read()) or 1508 #endif 1509 r_write_copy_inst.read() ) { 1510 // We put the first copy in the fifo 1511 write_to_init_cmd_fifo_put = true; 1512 write_to_init_cmd_fifo_inst = r_write_copy_inst.read(); 1513 write_to_init_cmd_fifo_srcid = r_write_copy.read(); 1514 #if L1_MULTI_CACHE 1515 write_to_init_cmd_fifo_cache_id= r_write_copy_cache.read(); 1516 #endif 1517 if(r_write_count.read() == 1){ 1518 r_write_fsm = WRITE_IDLE; 1519 r_write_to_init_cmd_multi_req = true; 1520 } else { 1521 r_write_fsm = WRITE_UPDATE; 1522 } 1523 } else { 1524 r_write_fsm = WRITE_UPDATE; 1525 } 1526 } 1527 break; 1528 } 1529 ////////////////// 1530 case WRITE_UPDATE: // send a multi-update request to INIT_CMD fsm 1531 { 1532 ASSERT( (r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE) 1533 ,"MemCache ERROR : bad HEAP allocation"); 1534 HeapEntry entry = m_heap_directory.read(r_write_ptr.read()); 1535 write_to_init_cmd_fifo_inst = entry.owner.inst; 1536 write_to_init_cmd_fifo_srcid = entry.owner.srcid; 1537 #if L1_MULTI_CACHE 1538 write_to_init_cmd_fifo_cache_id = entry.owner.cache_id; 1539 #endif 1540 1541 bool dec_upt_counter = r_write_to_dec.read(); 1542 if( (entry.owner.srcid != r_write_srcid.read()) or 1543 #if L1_MULTI_CACHE 1544 (entry.owner.cache_id != r_write_pktid.read()) or 1545 #endif 1546 entry.owner.inst){ 1547 write_to_init_cmd_fifo_put = true; 1548 } else { 1549 dec_upt_counter = true; 1550 } 1551 r_write_to_dec = dec_upt_counter; 1552 1553 if( m_write_to_init_cmd_inst_fifo.wok() ){ 1554 r_write_ptr = entry.next; 1555 if( entry.next == r_write_ptr.read() ) { // last copy 1556 r_write_to_init_cmd_multi_req = true; 1557 if(dec_upt_counter){ 1558 r_write_fsm = WRITE_UPT_DEC; 1559 } else { 1560 r_write_fsm = WRITE_IDLE; 1561 } 1562 } else { 1563 r_write_fsm = WRITE_UPDATE; 1564 } 1565 } else { 1566 r_write_fsm = WRITE_UPDATE; 1567 } 1568 break; 1569 } 1570 ////////////////// 1571 case WRITE_UPT_DEC: 1572 { 1573 if(!r_write_to_init_rsp_req.read()){ 1574 r_write_to_init_rsp_req = true; 1575 r_write_to_init_rsp_upt_index = r_write_upt_index.read(); 1576 r_write_fsm = WRITE_IDLE; 1577 } 1578 break; 1579 } 1580 /////////////// 1581 case WRITE_RSP: // send a request to TGT_RSP FSM to acknowledge the write 1582 { 1583 if ( !r_write_to_tgt_rsp_req.read() ) { 1584 1585 PRINTF(" * <MEM_CACHE.WRITE> Request from %d.%d (%d)\n",(uint32_t)r_write_srcid.read(), (uint32_t)r_write_trdid.read(), (uint32_t)r_write_pktid.read()); 1586 1587 r_write_to_tgt_rsp_req = true; 1588 r_write_to_tgt_rsp_srcid = r_write_srcid.read(); 1589 r_write_to_tgt_rsp_trdid = r_write_trdid.read(); 1590 r_write_to_tgt_rsp_pktid = r_write_pktid.read(); 1591 r_write_fsm = WRITE_IDLE; 1592 } 1593 break; 1594 } 1595 //////////////////// 1596 case WRITE_TRT_LOCK: // Miss : check Transaction Table 1597 { 1598 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) { 1599 #ifdef TDEBUG 1600 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1601 std::cout << sc_time_stamp() << " " << name() << " READ_TRT_LOCK " << std::endl; 1602 } 1603 #endif 1604 size_t hit_index = 0; 1605 size_t wok_index = 0; 1606 bool hit_read = m_transaction_tab.hit_read(m_nline[(vci_addr_t)(r_write_address.read())],hit_index); 1607 bool hit_write = m_transaction_tab.hit_write(m_nline[(vci_addr_t)(r_write_address.read())]); 1608 bool wok = !m_transaction_tab.full(wok_index); 1609 if ( hit_read ) { // register the modified data in TRT 1610 r_write_trt_index = hit_index; 1611 r_write_fsm = WRITE_TRT_DATA; 1612 m_cpt_write_miss++; 1613 } else if ( wok && !hit_write ) { // set a new entry in TRT 1614 r_write_trt_index = wok_index; 1615 r_write_fsm = WRITE_TRT_SET; 1616 m_cpt_write_miss++; 1617 } else { // wait an empty entry in TRT 1618 r_write_fsm = WRITE_WAIT; 1619 m_cpt_trt_full++; 1620 } 1621 } 1622 break; 1623 } 1624 //////////////////// 1625 case WRITE_WAIT: // release the lock protecting TRT 1626 { 1627 r_write_fsm = WRITE_DIR_LOCK; 1628 break; 1629 } 1630 /////////////////// 1631 case WRITE_TRT_SET: // register a new transaction in TRT (Write Buffer) 1632 { 1633 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) 1634 { 1635 std::vector<be_t> be_vector; 1636 std::vector<data_t> data_vector; 1637 be_vector.clear(); 1638 data_vector.clear(); 1639 for ( size_t i=0; i<m_words; i++ ) 1640 { 1641 be_vector.push_back(r_write_be[i]); 1642 data_vector.push_back(r_write_data[i]); 1643 } 2195 case WRITE_DIR_INVAL: // Register a put transaction to XRAM in TRT 2196 // and invalidate the line in directory 2197 { 2198 if ( (r_alloc_trt_fsm.read() != ALLOC_TRT_WRITE ) || 2199 (r_alloc_upt_fsm.read() != ALLOC_UPT_WRITE ) || 2200 (r_alloc_dir_fsm.read() != ALLOC_DIR_WRITE ) ) 2201 { 2202 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_DIR_INVAL state" << std::endl; 2203 std::cout << "bad TRT, DIR, or UPT allocation" << std::endl; 2204 exit(0); 2205 } 2206 2207 // register a write request to XRAM in TRT 1644 2208 m_transaction_tab.set(r_write_trt_index.read(), 1645 true, // read request to XRAM 1646 m_nline[(vci_addr_t)(r_write_address.read())], 1647 r_write_srcid.read(), 1648 r_write_trdid.read(), 1649 r_write_pktid.read(), 1650 false, // not a processor read 1651 0, // not a single word 1652 0, // word index 1653 be_vector, 1654 data_vector); 1655 #ifdef TDEBUG 1656 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1657 std::cout << sc_time_stamp() << " " << name() << " WRITE_TRT_SET transaction table : " << std::endl; 1658 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 1659 m_transaction_tab.print(i); 1660 } 1661 #endif 1662 1663 r_write_fsm = WRITE_XRAM_REQ; 1664 } 1665 break; 1666 } 1667 /////////////////// 1668 case WRITE_TRT_DATA: // update an entry in TRT (Write Buffer) 1669 { 1670 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) { 1671 std::vector<be_t> be_vector; 1672 std::vector<data_t> data_vector; 1673 be_vector.clear(); 1674 data_vector.clear(); 1675 for ( size_t i=0; i<m_words; i++ ) { 1676 be_vector.push_back(r_write_be[i]); 1677 data_vector.push_back(r_write_data[i]); 1678 } 1679 m_transaction_tab.write_data_mask(r_write_trt_index.read(), 1680 be_vector, 1681 data_vector); 1682 r_write_fsm = WRITE_RSP; 1683 #ifdef TDEBUG 1684 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1685 std::cout << sc_time_stamp() << " " << name() << " WRITE_TRT_DATA transaction table : " << std::endl; 1686 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 1687 m_transaction_tab.print(i); 1688 } 1689 #endif 1690 1691 } 1692 break; 1693 } 1694 //////////////////// 1695 case WRITE_XRAM_REQ: // send a request to IXR_CMD FSM 1696 { 1697 1698 if ( !r_write_to_ixr_cmd_req ) { 1699 r_write_to_ixr_cmd_req = true; 1700 r_write_to_ixr_cmd_write = false; 1701 r_write_to_ixr_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 1702 r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); 1703 r_write_fsm = WRITE_RSP; 1704 } 1705 break; 1706 } 1707 //////////////////// 1708 case WRITE_TRT_WRITE_LOCK: 1709 { 1710 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) { 1711 size_t wok_index = 0; 1712 bool wok = !m_transaction_tab.full(wok_index); 1713 if ( wok ) { // set a new entry in TRT 1714 r_write_trt_index = wok_index; 1715 r_write_fsm = WRITE_INVAL_LOCK; 1716 } else { // wait an empty entry in TRT 1717 r_write_fsm = WRITE_WAIT; 1718 } 1719 } 1720 1721 break; 1722 } 1723 //////////////////// 1724 case WRITE_INVAL_LOCK: 1725 { 1726 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) { 1727 bool wok = false; 1728 size_t index = 0; 1729 size_t srcid = r_write_srcid.read(); 1730 size_t trdid = r_write_trdid.read(); 1731 size_t pktid = r_write_pktid.read(); 1732 addr_t nline = m_nline[(vci_addr_t)(r_write_address.read())]; 1733 size_t nb_copies = r_write_count.read(); 1734 1735 wok =m_update_tab.set(false, // it's an inval transaction 1736 true, // it's a broadcast 1737 true, // it needs a response 1738 srcid, 1739 trdid, 1740 pktid, 1741 nline, 1742 nb_copies, 1743 index); 1744 #ifdef IDEBUG 1745 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1746 if(wok){ 1747 std::cout << sc_time_stamp() << " " << name() << " WRITE_INVAL_LOCK update table : " << std::endl; 1748 m_update_tab.print(); 1749 } 1750 } 1751 #endif 1752 r_write_upt_index = index; 1753 // releases the lock protecting Update Table if no entry... 1754 if ( wok ) r_write_fsm = WRITE_DIR_INVAL; 1755 else r_write_fsm = WRITE_WAIT; 1756 } 1757 1758 break; 1759 } 1760 //////////////////// 1761 case WRITE_DIR_INVAL: 1762 { 1763 ASSERT(((r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) && 1764 (r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) && 1765 (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE )) 1766 ,"MemCache ERROR : bad TRT,DIR or UPT allocation error"); 1767 m_transaction_tab.set(r_write_trt_index.read(), 1768 false, // write request to XRAM 1769 m_nline[(vci_addr_t)(r_write_address.read())], 1770 0, 1771 0, 1772 0, 1773 false, // not a processor read 1774 0, // not a single word 1775 0, // word index 1776 std::vector<be_t>(m_words,0), 1777 std::vector<data_t>(m_words,0)); 1778 #ifdef TDEBUG 1779 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1780 std::cout << sc_time_stamp() << " " << name() << " WRITE_DIR_INVAL transaction table : " << std::endl; 1781 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 1782 m_transaction_tab.print(i); 1783 } 1784 #endif 1785 2209 false, // write request to XRAM 2210 m_nline[(vci_addr_t)(r_write_address.read())], 2211 0, 2212 0, 2213 0, 2214 false, // not a processor read 2215 0, // not a single word 2216 0, // word index 2217 std::vector<be_t>(m_words,0), 2218 std::vector<data_t>(m_words,0)); 1786 2219 // invalidate directory entry 1787 2220 DirectoryEntry entry; … … 1800 2233 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 1801 2234 size_t way = r_write_way.read(); 2235 1802 2236 m_cache_directory.write(set, way, entry); 1803 2237 2238 #if DEBUG_MEMC_WRITE 2239 if( m_debug_write_fsm ) 2240 { 2241 std::cout << " <MEMC.WRITE_DIR_INVAL> Invalidate the directory entry: @ = " 2242 << r_write_address.read() << " / register the put transaction in TRT:" << std::endl; 2243 } 2244 #endif 1804 2245 r_write_fsm = WRITE_INVAL; 1805 2246 break; 1806 2247 } 1807 //////////////////// 1808 case WRITE_INVAL: 1809 { 1810 if ( !r_write_to_init_cmd_multi_req.read() && 1811 !r_write_to_init_cmd_brdcast_req.read() ) { 1812 r_write_to_init_cmd_multi_req = false; 1813 r_write_to_init_cmd_brdcast_req = true; 1814 r_write_to_init_cmd_trdid = r_write_upt_index.read(); 1815 r_write_to_init_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 1816 r_write_to_init_cmd_index = 0; 1817 r_write_to_init_cmd_count = 0; 1818 1819 for(size_t i=0; i<m_words ; i++){ 1820 r_write_to_init_cmd_be[i]=0; 1821 r_write_to_init_cmd_data[i] = 0; 1822 } 1823 r_write_fsm = WRITE_XRAM_SEND; 1824 // all inval responses 1825 } 1826 1827 break; 1828 } 1829 //////////////////// 1830 case WRITE_XRAM_SEND: 1831 { 1832 if ( !r_write_to_ixr_cmd_req ) { 1833 r_write_to_ixr_cmd_req = true; 1834 r_write_to_ixr_cmd_write = true; 1835 r_write_to_ixr_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 1836 r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); 1837 for(size_t i=0; i<m_words; i++){ 1838 r_write_to_ixr_cmd_data[i] = r_write_data[i]; 1839 } 1840 r_write_fsm = WRITE_IDLE; 1841 } 1842 break; 2248 ///////////////// 2249 case WRITE_INVAL: // Post a coherence broadcast request to INIT_CMD FSM 2250 { 2251 if ( !r_write_to_init_cmd_multi_req.read() && !r_write_to_init_cmd_brdcast_req.read() ) 2252 { 2253 r_write_to_init_cmd_multi_req = false; 2254 r_write_to_init_cmd_brdcast_req = true; 2255 r_write_to_init_cmd_trdid = r_write_upt_index.read(); 2256 r_write_to_init_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 2257 r_write_to_init_cmd_index = 0; 2258 r_write_to_init_cmd_count = 0; 2259 2260 for(size_t i=0; i<m_words ; i++) 2261 { 2262 r_write_to_init_cmd_be[i]=0; 2263 r_write_to_init_cmd_data[i] = 0; 2264 } 2265 r_write_fsm = WRITE_XRAM_SEND; 2266 2267 #if DEBUG_MEMC_WRITE 2268 if( m_debug_write_fsm ) 2269 { 2270 std::cout << " <MEMC.WRITE_INVAL> Post a broadcast request to INIT_CMD FSM" << std::endl; 2271 } 2272 #endif 2273 } 2274 break; 2275 } 2276 ///////////////////// 2277 case WRITE_XRAM_SEND: // Post a put request to IXR_CMD FSM 2278 { 2279 if ( !r_write_to_ixr_cmd_req ) 2280 { 2281 r_write_to_ixr_cmd_req = true; 2282 r_write_to_ixr_cmd_write = true; 2283 r_write_to_ixr_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 2284 r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); 2285 2286 for(size_t i=0; i<m_words; i++) r_write_to_ixr_cmd_data[i] = r_write_data[i]; 2287 2288 r_write_fsm = WRITE_IDLE; 2289 2290 #if DEBUG_MEMC_WRITE 2291 if( m_debug_write_fsm ) 2292 { 2293 std::cout << " <MEMC.WRITE_XRAM_SEND> Post a put request to IXR_CMD FSM" << std::endl; 2294 } 2295 #endif 2296 } 2297 break; 1843 2298 } 1844 2299 } // end switch r_write_fsm … … 1848 2303 /////////////////////////////////////////////////////////////////////// 1849 2304 // The IXR_CMD fsm controls the command packets to the XRAM : 1850 // - It sends a single cell VCI read to the XRAM in case of MISS request1851 // posted by the READ, WRITE or LLSC FSMs : the TRDID field contains2305 // - It sends a single cell VCI get request to the XRAM in case of MISS 2306 // posted by the READ, WRITE or SC FSMs : the TRDID field contains 1852 2307 // the Transaction Tab index. 1853 2308 // The VCI response is a multi-cell packet : the N cells contain 1854 2309 // the N data words. 1855 // - It sends a multi-cell VCI write when the XRAM_RSP FSM request1856 // to save a dirty line to the XRAM.2310 // - It sends a multi-cell VCI write when the XRAM_RSP FSM, WRITE FSM 2311 // or SC FSM request to save a dirty line to the XRAM. 1857 2312 // The VCI response is a single cell packet. 1858 // This FSM handles requests from the READ, WRITE, LLSC & XRAM_RSP FSMs2313 // This FSM handles requests from the READ, WRITE, SC & XRAM_RSP FSMs 1859 2314 // with a round-robin priority. 1860 2315 //////////////////////////////////////////////////////////////////////// 1861 2316 1862 switch ( r_ixr_cmd_fsm.read() ) { 1863 //////////////////////// 1864 case IXR_CMD_READ_IDLE: 2317 switch ( r_ixr_cmd_fsm.read() ) 2318 { 2319 //////////////////////// 2320 case IXR_CMD_READ_IDLE: 1865 2321 if ( r_write_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_WRITE_NLINE; 1866 else if ( r_ llsc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_LLSC_NLINE;2322 else if ( r_sc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_SC_NLINE; 1867 2323 else if ( r_xram_rsp_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_XRAM_DATA; 1868 2324 else if ( r_read_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_READ_NLINE; 1869 2325 break; 1870 2326 //////////////////////// 1871 case IXR_CMD_WRITE_IDLE:1872 if ( r_ llsc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_LLSC_NLINE;2327 case IXR_CMD_WRITE_IDLE: 2328 if ( r_sc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_SC_NLINE; 1873 2329 else if ( r_xram_rsp_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_XRAM_DATA; 1874 2330 else if ( r_read_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_READ_NLINE; … … 1876 2332 break; 1877 2333 //////////////////////// 1878 case IXR_CMD_LLSC_IDLE:2334 case IXR_CMD_SC_IDLE: 1879 2335 if ( r_xram_rsp_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_XRAM_DATA; 1880 2336 else if ( r_read_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_READ_NLINE; 1881 2337 else if ( r_write_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_WRITE_NLINE; 1882 else if ( r_ llsc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_LLSC_NLINE;2338 else if ( r_sc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_SC_NLINE; 1883 2339 break; 1884 2340 //////////////////////// 1885 case IXR_CMD_XRAM_IDLE:2341 case IXR_CMD_XRAM_IDLE: 1886 2342 if ( r_read_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_READ_NLINE; 1887 2343 else if ( r_write_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_WRITE_NLINE; 1888 else if ( r_ llsc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_LLSC_NLINE;2344 else if ( r_sc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_SC_NLINE; 1889 2345 else if ( r_xram_rsp_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_XRAM_DATA; 1890 2346 break; 1891 2347 ///////////////////////// 1892 case IXR_CMD_READ_NLINE: 1893 if ( p_vci_ixr.cmdack ) { 1894 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 1895 r_read_to_ixr_cmd_req = false; 2348 case IXR_CMD_READ_NLINE: 2349 if ( p_vci_ixr.cmdack ) 2350 { 2351 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 2352 r_read_to_ixr_cmd_req = false; 2353 2354 #if DEBUG_MEMC_IXR_CMD 2355 if( m_debug_ixr_cmd_fsm ) 2356 { 2357 std::cout << " <MEMC.IXR_CMD_READ_NLINE> Send a get request to xram" ; 2358 } 2359 #endif 1896 2360 } 1897 2361 break; 1898 2362 ////////////////////////// 1899 case IXR_CMD_WRITE_NLINE: 1900 if ( p_vci_ixr.cmdack ) { 1901 if( r_write_to_ixr_cmd_write.read()){ 1902 if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) { 1903 r_ixr_cmd_cpt = 0; 1904 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 1905 r_write_to_ixr_cmd_req = false; 1906 } else { 1907 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 1; 1908 } 1909 } else { 1910 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 1911 r_write_to_ixr_cmd_req = false; 1912 } 2363 case IXR_CMD_WRITE_NLINE: 2364 if ( p_vci_ixr.cmdack ) 2365 { 2366 if( r_write_to_ixr_cmd_write.read()) 2367 { 2368 if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) 2369 { 2370 r_ixr_cmd_cpt = 0; 2371 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 2372 r_write_to_ixr_cmd_req = false; 2373 } 2374 else 2375 { 2376 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 1; 2377 } 2378 2379 #if DEBUG_MEMC_IXR_CMD 2380 if( m_debug_ixr_cmd_fsm ) 2381 { 2382 std::cout << " <MEMC.IXR_CMD_WRITE_NLINE> Send a put request to xram" ; 2383 } 2384 #endif 2385 } 2386 else 2387 { 2388 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 2389 r_write_to_ixr_cmd_req = false; 2390 2391 #if DEBUG_MEMC_IXR_CMD 2392 if( m_debug_ixr_cmd_fsm ) 2393 { 2394 std::cout << " <MEMC.IXR_CMD_WRITE_NLINE> Send a get request to xram" ; 2395 } 2396 #endif 2397 } 1913 2398 } 1914 2399 break; 1915 ///////////////////////// 1916 case IXR_CMD_LLSC_NLINE: 1917 if ( p_vci_ixr.cmdack ) { 1918 if( r_llsc_to_ixr_cmd_write.read()){ 1919 if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) { 1920 r_ixr_cmd_cpt = 0; 1921 r_ixr_cmd_fsm = IXR_CMD_LLSC_IDLE; 1922 r_llsc_to_ixr_cmd_req = false; 1923 } else { 1924 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 1; 1925 } 1926 } else { 1927 r_ixr_cmd_fsm = IXR_CMD_LLSC_IDLE; 1928 r_llsc_to_ixr_cmd_req = false; 1929 } 2400 ////////////////////// 2401 case IXR_CMD_SC_NLINE: 2402 if ( p_vci_ixr.cmdack ) 2403 { 2404 if( r_sc_to_ixr_cmd_write.read()) 2405 { 2406 if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) 2407 { 2408 r_ixr_cmd_cpt = 0; 2409 r_ixr_cmd_fsm = IXR_CMD_SC_IDLE; 2410 r_sc_to_ixr_cmd_req = false; 2411 } 2412 else 2413 { 2414 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 1; 2415 } 2416 2417 #if DEBUG_MEMC_IXR_CMD 2418 if( m_debug_ixr_cmd_fsm ) 2419 { 2420 std::cout << " <MEMC.IXR_CMD_SC_NLINE> Send a put request to xram" ; 2421 } 2422 #endif 2423 } 2424 else 2425 { 2426 r_ixr_cmd_fsm = IXR_CMD_SC_IDLE; 2427 r_sc_to_ixr_cmd_req = false; 2428 2429 #if DEBUG_MEMC_IXR_CMD 2430 if( m_debug_ixr_cmd_fsm ) 2431 { 2432 std::cout << " <MEMC.IXR_CMD_SC_NLINE> Send a get request to xram" ; 2433 } 2434 #endif 2435 } 1930 2436 } 1931 2437 break; 1932 2438 //////////////////////// 1933 case IXR_CMD_XRAM_DATA: 1934 if ( p_vci_ixr.cmdack ) { 1935 if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) { 1936 r_ixr_cmd_cpt = 0; 1937 r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE; 1938 r_xram_rsp_to_ixr_cmd_req = false; 1939 } else { 1940 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 1; 1941 } 2439 case IXR_CMD_XRAM_DATA: 2440 if ( p_vci_ixr.cmdack ) 2441 { 2442 if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) 2443 { 2444 r_ixr_cmd_cpt = 0; 2445 r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE; 2446 r_xram_rsp_to_ixr_cmd_req = false; 2447 } 2448 else 2449 { 2450 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 1; 2451 } 2452 2453 #if DEBUG_MEMC_IXR_CMD 2454 if( m_debug_ixr_cmd_fsm ) 2455 { 2456 std::cout << " <MEMC.IXR_CMD_XRAM_DATA> Send a put request to xram" ; 2457 } 2458 #endif 1942 2459 } 1943 2460 break; … … 1949 2466 //////////////////////////////////////////////////////////////////////////// 1950 2467 // The IXR_RSP FSM receives the response packets from the XRAM, 1951 // for both write transaction, and readtransaction.2468 // for both put transaction, and get transaction. 1952 2469 // 1953 // - A response to a writerequest is a single-cell VCI packet.2470 // - A response to a put request is a single-cell VCI packet. 1954 2471 // The Transaction Tab index is contained in the RTRDID field. 1955 2472 // The FSM takes the lock protecting the TRT, and the corresponding 1956 2473 // entry is erased. 1957 2474 // 1958 // - A response to a readrequest is a multi-cell VCI packet.2475 // - A response to a get request is a multi-cell VCI packet. 1959 2476 // The Transaction Tab index is contained in the RTRDID field. 1960 2477 // The N cells contain the N words of the cache line in the RDATA field. … … 1964 2481 /////////////////////////////////////////////////////////////////////////////// 1965 2482 1966 switch ( r_ixr_rsp_fsm.read() ) { 1967 1968 /////////////////// 1969 case IXR_RSP_IDLE: // test if it's a read or a write transaction 1970 { 1971 if ( p_vci_ixr.rspval.read() ) { 1972 r_ixr_rsp_cpt = 0; 1973 r_ixr_rsp_trt_index = p_vci_ixr.rtrdid.read(); 1974 if ( p_vci_ixr.reop.read() && !(p_vci_ixr.rerror.read()&0x1)) 1975 r_ixr_rsp_fsm = IXR_RSP_ACK; 1976 else 1977 r_ixr_rsp_fsm = IXR_RSP_TRT_READ; 1978 } 1979 break; 2483 switch ( r_ixr_rsp_fsm.read() ) 2484 { 2485 /////////////////// 2486 case IXR_RSP_IDLE: // test if it's a get or a put transaction 2487 { 2488 if ( p_vci_ixr.rspval.read() ) 2489 { 2490 r_ixr_rsp_cpt = 0; 2491 r_ixr_rsp_trt_index = p_vci_ixr.rtrdid.read(); 2492 if ( p_vci_ixr.reop.read() && !(p_vci_ixr.rerror.read()&0x1)) // put transaction 2493 { 2494 r_ixr_rsp_fsm = IXR_RSP_ACK; 2495 2496 #if DEBUG_MEMC_IXR_RSP 2497 if( m_debug_ixr_rsp_fsm ) 2498 { 2499 std::cout << " <MEMC.IXR_RSP_IDLE> Response from XRAM to a put transaction" << std::endl; 2500 } 2501 #endif 2502 } 2503 else // get transaction 2504 { 2505 r_ixr_rsp_fsm = IXR_RSP_TRT_READ; 2506 2507 #if DEBUG_MEMC_IXR_RSP 2508 if( m_debug_ixr_rsp_fsm ) 2509 { 2510 std::cout << " <MEMC.IXR_RSP_IDLE> Response from XRAM to a get transaction" << std::endl; 2511 } 2512 #endif 2513 } 2514 } 2515 break; 1980 2516 } 1981 2517 //////////////////////// 1982 case IXR_RSP_ACK: // Acknowledge the vci response 1983 { 1984 if(p_vci_ixr.rspval.read()) 1985 r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 2518 case IXR_RSP_ACK: // Aknowledge the VCI response 2519 { 2520 if(p_vci_ixr.rspval.read()) r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 2521 2522 #if DEBUG_MEMC_IXR_RSP 2523 if( m_debug_ixr_rsp_fsm ) 2524 { 2525 std::cout << " <MEMC.IXR_RSP_ACK>" << std::endl; 2526 } 2527 #endif 1986 2528 break; 1987 2529 } 1988 2530 //////////////////////// 1989 case IXR_RSP_TRT_ERASE: // erase the entry in the TRT 1990 { 1991 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP ) { 1992 m_transaction_tab.erase(r_ixr_rsp_trt_index.read()); 1993 r_ixr_rsp_fsm = IXR_RSP_IDLE; 1994 #ifdef TDEBUG 1995 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1996 std::cout << sc_time_stamp() << " " << name() << " IXR_RSP_TRT_ERASE transaction table : " << std::endl; 1997 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 1998 m_transaction_tab.print(i); 1999 } 2000 #endif 2001 2002 } 2003 break; 2531 case IXR_RSP_TRT_ERASE: // erase the entry in the TRT 2532 { 2533 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP ) 2534 { 2535 m_transaction_tab.erase(r_ixr_rsp_trt_index.read()); 2536 r_ixr_rsp_fsm = IXR_RSP_IDLE; 2537 2538 #if DEBUG_MEMC_IXR_RSP 2539 if( m_debug_ixr_rsp_fsm ) 2540 { 2541 std::cout << " <MEMC.IXR_RSP_TRT_ERASE> Erase TRT entry " 2542 << r_ixr_rsp_trt_index.read() << std::endl; 2543 } 2544 #endif 2545 } 2546 break; 2004 2547 } 2005 2548 /////////////////////// 2006 case IXR_RSP_TRT_READ: // write data in the TRT 2007 { 2008 if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) && p_vci_ixr.rspval ) { 2009 bool eop = p_vci_ixr.reop.read(); 2010 data_t data = p_vci_ixr.rdata.read(); 2011 size_t index = r_ixr_rsp_trt_index.read(); 2012 ASSERT(((eop == (r_ixr_rsp_cpt.read() == (m_words-1))) || 2013 p_vci_ixr.rerror.read()) 2014 ,"Error in VCI_MEM_CACHE : invalid length for a response from XRAM"); 2015 m_transaction_tab.write_rsp(index, r_ixr_rsp_cpt.read(), data, p_vci_ixr.rerror.read()&0x1); 2016 r_ixr_rsp_cpt = r_ixr_rsp_cpt.read() + 1; 2017 if ( eop ) { 2018 #ifdef TDEBUG 2019 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2020 std::cout << sc_time_stamp() << " " << name() << " IXR_RSP_TRT_READ transaction table : " << std::endl; 2021 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 2022 m_transaction_tab.print(i); 2023 } 2024 #endif 2025 2026 r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()]=true; 2027 r_ixr_rsp_fsm = IXR_RSP_IDLE; 2028 } 2029 } 2030 break; 2549 case IXR_RSP_TRT_READ: // write data in the TRT 2550 { 2551 if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) && p_vci_ixr.rspval ) 2552 { 2553 size_t index = r_ixr_rsp_trt_index.read(); 2554 bool eop = p_vci_ixr.reop.read(); 2555 data_t data = p_vci_ixr.rdata.read(); 2556 bool error = (p_vci_ixr.rerror.read()&0x1 == 0); 2557 assert(((eop == (r_ixr_rsp_cpt.read() == (m_words-1))) || p_vci_ixr.rerror.read()) 2558 and "Error in VCI_MEM_CACHE : invalid length for a response from XRAM"); 2559 m_transaction_tab.write_rsp(index, 2560 r_ixr_rsp_cpt.read(), 2561 data, 2562 error); 2563 r_ixr_rsp_cpt = r_ixr_rsp_cpt.read() + 1; 2564 if ( eop ) 2565 { 2566 r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()]=true; 2567 r_ixr_rsp_fsm = IXR_RSP_IDLE; 2568 } 2569 2570 #if DEBUG_MEMC_IXR_RSP 2571 if( m_debug_ixr_rsp_fsm ) 2572 { 2573 std::cout << " <MEMC.IXR_RSP_TRT_READ> Writing a word in TRT : " 2574 << " index = " << std::dec << index 2575 << " / word = " << r_ixr_rsp_cpt.read() 2576 << " / data = " << std::hex << data << std::endl; 2577 } 2578 #endif 2579 } 2580 break; 2031 2581 } 2032 2582 } // end swich r_ixr_rsp_fsm 2033 2034 2583 2035 2584 //////////////////////////////////////////////////////////////////////////// … … 2037 2586 //////////////////////////////////////////////////////////////////////////// 2038 2587 // The XRAM_RSP FSM handles the incoming cache lines from the XRAM. 2039 // The cache line has been written in the TRT buffer by the IXR_FSM. 2588 // The cache line has been written in the TRT by the IXR_CMD_FSM. 2589 // As the IXR_RSP FSM and the XRAM_RSP FSM are running in parallel, 2590 // there is as many flip-flops r_ixr_rsp_to_xram_rsp_rok[i] 2591 // as the number of entries in the TRT, that are handled with 2592 // a round-robin priority... 2040 2593 // 2041 2594 // When a response is available, the corresponding TRT entry 2042 // is copied in a local buffer to be written in the cache. 2043 // Then, the FSM releases the lock protecting the TRT, and takes the lock 2044 // protecting the cache directory. 2595 // must be copied in a local buffer to be written in the cache. 2596 // The FSM takes the lock protecting the TRT, and the lock protecting the DIR. 2045 2597 // It selects a cache slot and writes the line in the cache. 2046 2598 // If it was a read MISS, the XRAM_RSP FSM send a request to the TGT_RSP … … 2053 2605 /////////////////////////////////////////////////////////////////////////////// 2054 2606 2055 switch ( r_xram_rsp_fsm.read() ) { 2056 2057 /////////////////// 2058 case XRAM_RSP_IDLE: // test if there is a response with a round robin priority 2059 { 2060 size_t ptr = r_xram_rsp_trt_index.read(); 2061 size_t lines = m_transaction_tab_lines; 2062 for(size_t i=0; i<lines; i++){ 2063 size_t index=(i+ptr+1)%lines; 2064 if(r_ixr_rsp_to_xram_rsp_rok[index]){ 2065 r_xram_rsp_trt_index=index; 2066 r_ixr_rsp_to_xram_rsp_rok[index]=false; 2067 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 2068 break; 2069 #ifdef TDEBUG 2070 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2071 std::cout << "XRAM_RSP FSM in XRAM_RSP_IDLE state" << std::endl; 2072 } 2073 #endif 2074 } 2075 } 2076 break; 2607 switch ( r_xram_rsp_fsm.read() ) 2608 { 2609 /////////////////// 2610 case XRAM_RSP_IDLE: // scan the XRAM responses to get the TRT index (round robin) 2611 { 2612 size_t ptr = r_xram_rsp_trt_index.read(); 2613 size_t lines = m_transaction_tab_lines; 2614 for( size_t i=0 ; i<lines ; i++) 2615 { 2616 size_t index=(i+ptr+1)%lines; 2617 if ( r_ixr_rsp_to_xram_rsp_rok[index] ) 2618 { 2619 r_xram_rsp_trt_index = index; 2620 r_ixr_rsp_to_xram_rsp_rok[index] = false; 2621 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 2622 2623 #if DEBUG_MEMC_XRAM_RSP 2624 if( m_debug_xram_rsp_fsm ) 2625 { 2626 std::cout << " <MEMC.XRAM_RSP_IDLE> Available cache line in TRT:" 2627 << " index = " << std::dec << index << std::endl; 2628 } 2629 #endif 2630 break; 2631 } 2632 } 2633 break; 2077 2634 } 2078 2635 /////////////////////// 2079 case XRAM_RSP_DIR_LOCK: // Take the lock on the directory 2080 { 2081 if( r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP ) { 2082 r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; 2083 #ifdef TDEBUG 2084 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2085 std::cout << "XRAM_RSP FSM in XRAM_RSP_DIR_LOCK state" << std::endl; 2636 case XRAM_RSP_DIR_LOCK: // Takes the lock on the directory 2637 { 2638 if( r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP ) 2639 { 2640 r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; 2641 2642 #if DEBUG_MEMC_XRAM_RSP 2643 if( m_debug_xram_rsp_fsm ) 2644 { 2645 std::cout << " <MEMC.XRAM_RSP_DIR_LOCK> Get access to directory" << std::endl; 2646 } 2647 #endif 2648 } 2649 break; 2650 } 2651 /////////////////////// 2652 case XRAM_RSP_TRT_COPY: // Takes the lock on TRT 2653 // Copy the TRT entry in a local buffer 2654 // and select a victim cache line 2655 { 2656 if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) ) 2657 { 2658 // copy the TRT entry in the r_xram_rsp_trt_buf local buffer 2659 size_t index = r_xram_rsp_trt_index.read(); 2660 TransactionTabEntry trt_entry(m_transaction_tab.read(index)); 2661 r_xram_rsp_trt_buf.copy(trt_entry); // TRT entry local buffer 2662 2663 // selects & extracts a victim line from cache 2664 size_t way = 0; 2665 size_t set = m_y[(vci_addr_t)(trt_entry.nline * m_words * 4)]; 2666 DirectoryEntry victim(m_cache_directory.select(set, way)); 2667 2668 bool inval = (victim.count && victim.valid) ; 2669 2670 // copy the victim line in a local buffer 2671 for (size_t i=0 ; i<m_words ; i++) 2672 r_xram_rsp_victim_data[i] = m_cache_data[way][set][i]; 2673 r_xram_rsp_victim_copy = victim.owner.srcid; 2674 #if L1_MULTI_CACHE 2675 r_xram_rsp_victim_copy_cache= victim.owner.cache_id; 2676 #endif 2677 r_xram_rsp_victim_copy_inst = victim.owner.inst; 2678 r_xram_rsp_victim_count = victim.count; 2679 r_xram_rsp_victim_ptr = victim.ptr; 2680 r_xram_rsp_victim_way = way; 2681 r_xram_rsp_victim_set = set; 2682 r_xram_rsp_victim_nline = victim.tag*m_sets + set; 2683 r_xram_rsp_victim_is_cnt = victim.is_cnt; 2684 r_xram_rsp_victim_inval = inval ; 2685 r_xram_rsp_victim_dirty = victim.dirty; 2686 2687 if(!trt_entry.rerror) r_xram_rsp_fsm = XRAM_RSP_INVAL_LOCK; 2688 else r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 2689 2690 #if DEBUG_MEMC_XRAM_RSP 2691 if( m_debug_xram_rsp_fsm ) 2692 { 2693 std::cout << " <MEMC.XRAM_RSP_TRT_COPY> Select a slot: " 2694 << " way = " << std::dec << way 2695 << " / set = " << set 2696 << " / inval_required = " << inval << std::endl; 2697 } 2698 #endif 2699 } 2700 break; 2701 } 2702 ///////////////////////// 2703 case XRAM_RSP_INVAL_LOCK: // check a possible pending inval 2704 { 2705 if ( r_alloc_upt_fsm == ALLOC_UPT_XRAM_RSP ) 2706 { 2707 size_t index; 2708 if (m_update_tab.search_inval(r_xram_rsp_trt_buf.nline, index)) 2709 { 2710 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 2711 2712 #if DEBUG_MEMC_XRAM_RSP 2713 if( m_debug_xram_rsp_fsm ) 2714 { 2715 std::cout << " <MEMC.XRAM_RSP_INVAL_LOCK> Get acces to UPT," 2716 << " but an invalidation is already registered at this address" << std::endl; 2717 m_update_tab.print(); 2718 } 2719 #endif 2720 2721 } 2722 else if (m_update_tab.is_full() && r_xram_rsp_victim_inval.read()) 2723 { 2724 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 2725 2726 #if DEBUG_MEMC_XRAM_RSP 2727 if( m_debug_xram_rsp_fsm ) 2728 { 2729 std::cout << " <MEMC.XRAM_RSP_INVAL_LOCK> Get acces to UPT," 2730 << " but the table is full" << std::endl; 2731 m_update_tab.print(); 2732 } 2733 #endif 2734 } 2735 else 2736 { 2737 r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; 2738 2739 #if DEBUG_MEMC_XRAM_RSP 2740 if( m_debug_xram_rsp_fsm ) 2741 { 2742 std::cout << " <MEMC.XRAM_RSP_INVAL_LOCK> Get acces to UPT" << std::endl; 2743 } 2744 #endif 2745 } 2746 } 2747 break; 2748 } 2749 ///////////////////////// 2750 case XRAM_RSP_INVAL_WAIT: // returns to DIR_LOCK to retry 2751 { 2752 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 2753 break; 2754 } 2755 /////////////////////// 2756 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory) 2757 // and possibly set an inval request in UPT 2758 { 2759 // signals generation 2760 bool inst_read = (r_xram_rsp_trt_buf.trdid & 0x2) && r_xram_rsp_trt_buf.proc_read; 2761 bool cached_read = (r_xram_rsp_trt_buf.trdid & 0x1) && r_xram_rsp_trt_buf.proc_read; 2762 // update data 2763 size_t set = r_xram_rsp_victim_set.read(); 2764 size_t way = r_xram_rsp_victim_way.read(); 2765 for(size_t i=0; i<m_words ; i++) m_cache_data[way][set][i] = r_xram_rsp_trt_buf.wdata[i]; 2766 // compute dirty 2767 bool dirty = false; 2768 for(size_t i=0; i<m_words;i++) dirty = dirty || (r_xram_rsp_trt_buf.wdata_be[i] != 0); 2769 // update directory 2770 DirectoryEntry entry; 2771 entry.valid = true; 2772 entry.is_cnt = false; 2773 entry.lock = false; 2774 entry.dirty = dirty; 2775 entry.tag = r_xram_rsp_trt_buf.nline / m_sets; 2776 entry.ptr = 0; 2777 if(cached_read) 2778 { 2779 entry.owner.srcid = r_xram_rsp_trt_buf.srcid; 2780 #if L1_MULTI_CACHE 2781 entry.owner.cache_id= r_xram_rsp_trt_buf.pktid; 2782 #endif 2783 entry.owner.inst = inst_read; 2784 entry.count = 1; 2785 } 2786 else 2787 { 2788 entry.owner.srcid = 0; 2789 #if L1_MULTI_CACHE 2790 entry.owner.cache_id = 0; 2791 #endif 2792 entry.owner.inst = 0; 2793 entry.count = 0; 2794 } 2795 m_cache_directory.write(set, way, entry); 2796 2797 if (r_xram_rsp_victim_inval.read()) 2798 { 2799 bool brdcast = r_xram_rsp_victim_is_cnt.read(); 2800 size_t index = 0; 2801 size_t count_copies = r_xram_rsp_victim_count.read(); 2802 2803 bool wok = m_update_tab.set( false, // it's an inval transaction 2804 brdcast, // set brdcast bit 2805 false, // it does not need a response 2806 0, // srcid 2807 0, // trdid 2808 0, // pktid 2809 r_xram_rsp_victim_nline.read(), 2810 count_copies, 2811 index); 2812 r_xram_rsp_upt_index = index; 2813 2814 if (!wok) 2815 { 2816 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_HEAP_LAST state" << std::endl; 2817 std::cout << "an update_tab entry was free but write is unsuccessful" << std::endl; 2818 exit(0); 2819 } 2820 } 2821 2822 #if DEBUG_MEMC_XRAM_RSP 2823 if( m_debug_xram_rsp_fsm ) 2824 { 2825 std::cout << " <MEMC.XRAM_RSP_DIR_UPDT> Directory update: " 2826 << " way = " << std::dec << way 2827 << " / set = " << set 2828 << " / count = " << entry.count 2829 << " / is_cnt = " << entry.is_cnt << std::endl; 2830 if (r_xram_rsp_victim_inval.read()) 2831 std::cout << " Invalidation request for victim line " 2832 << std::hex << r_xram_rsp_victim_nline.read() 2833 << " / broadcast = " << r_xram_rsp_victim_is_cnt.read() << std::endl; 2834 } 2835 #endif 2836 2837 // If the victim is not dirty, we don't need another XRAM put transaction, 2838 // and we canwe erase the TRT entry 2839 if (!r_xram_rsp_victim_dirty.read()) m_transaction_tab.erase(r_xram_rsp_trt_index.read()); 2840 2841 // Next state 2842 if ( r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; 2843 else if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 2844 else if ( r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 2845 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 2846 break; 2847 } 2848 //////////////////////// 2849 case XRAM_RSP_TRT_DIRTY: // set the TRT entry (write to XRAM) if the victim is dirty 2850 { 2851 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP ) 2852 { 2853 m_transaction_tab.set( r_xram_rsp_trt_index.read(), 2854 false, // write to XRAM 2855 r_xram_rsp_victim_nline.read(), // line index 2856 0, 2857 0, 2858 0, 2859 false, 2860 0, 2861 0, 2862 std::vector<be_t>(m_words,0), 2863 std::vector<data_t>(m_words,0) ); 2864 2865 #if DEBUG_MEMC_XRAM_RSP 2866 if( m_debug_xram_rsp_fsm ) 2867 { 2868 std::cout << " <MEMC.XRAM_RSP_TRT_DIRTY> Set TRT entry for the put transaction:" 2869 << " dirty victim line = " << r_xram_rsp_victim_nline.read() << std::endl; 2870 } 2871 #endif 2872 if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 2873 else if ( r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 2874 else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 2875 } 2876 break; 2877 } 2878 ////////////////////// 2879 case XRAM_RSP_DIR_RSP: // Request a response to TGT_RSP FSM 2880 { 2881 if ( !r_xram_rsp_to_tgt_rsp_req.read() ) 2882 { 2883 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 2884 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 2885 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 2886 for (size_t i=0; i < m_words; i++) r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 2887 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 2888 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 2889 r_xram_rsp_to_tgt_rsp_rerror = false; 2890 r_xram_rsp_to_tgt_rsp_req = true; 2891 2892 if ( r_xram_rsp_victim_inval ) r_xram_rsp_fsm = XRAM_RSP_INVAL; 2893 else if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 2894 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 2895 2896 2897 #if DEBUG_MEMC_XRAM_RSP 2898 if( m_debug_xram_rsp_fsm ) 2899 { 2900 std::cout << " <MEMC.XRAM_RSP_DIR_RSP> Request the TGT_RSP FSM to return data:" 2901 << " rsrcid = " << std::hex << r_xram_rsp_trt_buf.srcid 2902 << " / address = " << r_xram_rsp_trt_buf.nline*m_words*4 2903 << " / nwords = " << std::dec << r_xram_rsp_trt_buf.read_length << std::endl; 2904 } 2905 #endif 2906 } 2907 break; 2908 } 2909 //////////////////// 2910 case XRAM_RSP_INVAL: // send invalidate request to INIT_CMD FSM 2911 { 2912 if( !r_xram_rsp_to_init_cmd_multi_req.read() && 2913 !r_xram_rsp_to_init_cmd_brdcast_req.read() ) 2914 { 2915 bool multi_req = !r_xram_rsp_victim_is_cnt.read(); 2916 bool last_multi_req = multi_req && (r_xram_rsp_victim_count.read() == 1); 2917 bool not_last_multi_req = multi_req && (r_xram_rsp_victim_count.read() != 1); 2918 2919 r_xram_rsp_to_init_cmd_multi_req = last_multi_req; 2920 r_xram_rsp_to_init_cmd_brdcast_req = r_xram_rsp_victim_is_cnt.read(); 2921 r_xram_rsp_to_init_cmd_nline = r_xram_rsp_victim_nline.read(); 2922 r_xram_rsp_to_init_cmd_trdid = r_xram_rsp_upt_index; 2923 xram_rsp_to_init_cmd_fifo_srcid = r_xram_rsp_victim_copy.read(); 2924 xram_rsp_to_init_cmd_fifo_inst = r_xram_rsp_victim_copy_inst.read(); 2925 #if L1_MULTI_CACHE 2926 xram_rsp_to_init_cmd_fifo_cache_id = r_xram_rsp_victim_copy_cache.read(); 2927 #endif 2928 xram_rsp_to_init_cmd_fifo_put = multi_req; 2929 r_xram_rsp_next_ptr = r_xram_rsp_victim_ptr.read(); 2930 2931 if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 2932 else if (not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 2933 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 2934 2935 #if DEBUG_MEMC_XRAM_RSP 2936 if( m_debug_xram_rsp_fsm ) 2937 { 2938 std::cout << " <MEMC.XRAM_RSP_INVAL> Send an inval request to INIT_CMD FSM:" 2939 << " victim line = " << r_xram_rsp_victim_nline.read() << std::endl; 2086 2940 } 2087 2941 #endif … … 2089 2943 break; 2090 2944 } 2091 /////////////////////// 2092 case XRAM_RSP_TRT_COPY: // Copy the TRT entry in the local buffer and eviction of a cache line 2093 { 2094 if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) ) { 2095 size_t index = r_xram_rsp_trt_index.read(); 2096 TransactionTabEntry trt_entry(m_transaction_tab.read(index)); 2097 2098 r_xram_rsp_trt_buf.copy(trt_entry); // TRT entry local buffer 2099 2100 // selects & extracts a victim line from cache 2101 size_t way = 0; 2102 size_t set = m_y[(vci_addr_t)(trt_entry.nline * m_words * 4)]; 2103 DirectoryEntry victim(m_cache_directory.select(set, way)); 2104 2105 for (size_t i=0 ; i<m_words ; i++) r_xram_rsp_victim_data[i] = m_cache_data[way][set][i]; 2106 2107 bool inval = (victim.count && victim.valid) ; 2108 2109 r_xram_rsp_victim_copy = victim.owner.srcid; 2945 ////////////////////////// 2946 case XRAM_RSP_WRITE_DIRTY: // send a write request to IXR_CMD FSM 2947 { 2948 if ( !r_xram_rsp_to_ixr_cmd_req.read() ) 2949 { 2950 r_xram_rsp_to_ixr_cmd_req = true; 2951 r_xram_rsp_to_ixr_cmd_nline = r_xram_rsp_victim_nline.read(); 2952 r_xram_rsp_to_ixr_cmd_trdid = r_xram_rsp_trt_index.read(); 2953 for(size_t i=0; i<m_words ; i++) r_xram_rsp_to_ixr_cmd_data[i] = r_xram_rsp_victim_data[i]; 2954 m_cpt_write_dirty++; 2955 2956 bool multi_req = !r_xram_rsp_victim_is_cnt.read() && r_xram_rsp_victim_inval.read(); 2957 bool not_last_multi_req = multi_req && (r_xram_rsp_victim_count.read() != 1); 2958 if ( not_last_multi_req ) r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 2959 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 2960 2961 #if DEBUG_MEMC_XRAM_RSP 2962 if( m_debug_xram_rsp_fsm ) 2963 { 2964 std::cout << " <MEMC.XRAM_RSP_WRITE_DIRTY> Send the put request to IXR_CMD FSM:" 2965 << " victim line = " << r_xram_rsp_victim_nline.read() << std::endl; 2966 } 2967 #endif 2968 } 2969 break; 2970 } 2971 ///////////////////////// 2972 case XRAM_RSP_HEAP_ERASE: // erase the list of copies and sent invalidations 2973 { 2974 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP ) 2975 { 2976 HeapEntry entry = m_heap.read(r_xram_rsp_next_ptr.read()); 2977 2978 xram_rsp_to_init_cmd_fifo_srcid = entry.owner.srcid; 2110 2979 #if L1_MULTI_CACHE 2111 r_xram_rsp_victim_copy_cache= victim.owner.cache_id; 2112 #endif 2113 r_xram_rsp_victim_copy_inst = victim.owner.inst; 2114 r_xram_rsp_victim_count = victim.count; 2115 r_xram_rsp_victim_ptr = victim.ptr; 2116 r_xram_rsp_victim_way = way; 2117 r_xram_rsp_victim_set = set; 2118 r_xram_rsp_victim_nline = victim.tag*m_sets + set; 2119 r_xram_rsp_victim_is_cnt = victim.is_cnt; 2120 r_xram_rsp_victim_inval = inval ; 2121 r_xram_rsp_victim_dirty = victim.dirty; 2122 2123 if(!trt_entry.rerror) 2124 r_xram_rsp_fsm = XRAM_RSP_INVAL_LOCK; 2125 else 2126 r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 2127 #ifdef TDEBUG 2128 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2129 std::cout << "XRAM_RSP FSM in XRAM_RSP_TRT_COPY state" << std::endl; 2130 std::cout << "Victim way : " << std::hex << way << " set " << set << std::dec << std::endl; 2131 victim.print(); 2132 } 2133 #endif 2134 } 2135 break; 2136 } 2137 /////////////////////// 2138 case XRAM_RSP_INVAL_LOCK: 2139 { 2140 if ( r_alloc_upt_fsm == ALLOC_UPT_XRAM_RSP ) { 2141 #ifdef IDEBUG 2142 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2143 std::cout << "XRAM_RSP FSM in XRAM_RSP_INVAL_LOCK state" << std::endl; 2144 } 2145 #endif 2146 size_t index; 2147 if(m_update_tab.search_inval(r_xram_rsp_trt_buf.nline, index)){ 2148 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 2149 #ifdef IDEBUG 2150 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2151 std::cout << "XRAM_RSP FSM in XRAM_RSP_INVAL_LOCK state to XRAM_RSP_INVAL_WAIT state" << std::endl; 2152 std::cout << "A invalidation is already registered at this address" << std::endl; 2153 m_update_tab.print(); 2154 } 2155 #endif 2156 2157 } 2158 else if(m_update_tab.is_full() && r_xram_rsp_victim_inval.read()){ 2159 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 2160 #ifdef IDEBUG 2161 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2162 std::cout << "XRAM_RSP FSM in XRAM_RSP_INVAL_LOCK state to XRAM_RSP_INVAL_WAIT state" << std::endl; 2163 std::cout << "The inval tab is full" << std::endl; 2164 m_update_tab.print(); 2165 } 2166 #endif 2167 } 2168 else { 2169 r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; 2170 #ifdef IDEBUG 2171 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2172 std::cout << "XRAM_RSP FSM in XRAM_RSP_INVAL_LOCK state to XRAM_RSP_DIR_UPDT state" << std::endl; 2173 m_update_tab.print(); 2174 } 2175 #endif 2176 } 2177 } 2178 break; 2179 } 2180 /////////////////////// 2181 case XRAM_RSP_INVAL_WAIT: 2182 { 2183 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 2184 break; 2185 #ifdef IDEBUG 2186 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2187 std::cout << "XRAM_RSP FSM in XRAM_RSP_INVAL_WAIT state" << std::endl; 2188 } 2189 #endif 2190 } 2191 /////////////////////// 2192 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory) 2193 { 2194 // signals generation 2195 bool inst_read = (r_xram_rsp_trt_buf.trdid & 0x2) && r_xram_rsp_trt_buf.proc_read; // It is an instruction read 2196 bool cached_read = (r_xram_rsp_trt_buf.trdid & 0x1) && r_xram_rsp_trt_buf.proc_read ; 2197 // update data 2198 size_t set = r_xram_rsp_victim_set.read(); 2199 size_t way = r_xram_rsp_victim_way.read(); 2200 for(size_t i=0; i<m_words ; i++){ 2201 m_cache_data[way][set][i] = r_xram_rsp_trt_buf.wdata[i]; 2202 } 2203 // compute dirty 2204 bool dirty = false; 2205 for(size_t i=0; i<m_words;i++){ 2206 dirty = dirty || (r_xram_rsp_trt_buf.wdata_be[i] != 0); 2207 } 2208 2209 // update directory 2210 DirectoryEntry entry; 2211 entry.valid = true; 2212 entry.is_cnt = false; 2213 entry.lock = false; 2214 entry.dirty = dirty; 2215 entry.tag = r_xram_rsp_trt_buf.nline / m_sets; 2216 entry.ptr = 0; 2217 if(cached_read) { 2218 entry.owner.srcid = r_xram_rsp_trt_buf.srcid; 2980 xram_rsp_to_init_cmd_fifo_cache_id = entry.owner.cache_id; 2981 #endif 2982 xram_rsp_to_init_cmd_fifo_inst = entry.owner.inst; 2983 xram_rsp_to_init_cmd_fifo_put = true; 2984 if( m_xram_rsp_to_init_cmd_inst_fifo.wok() ) 2985 { 2986 r_xram_rsp_next_ptr = entry.next; 2987 if( entry.next == r_xram_rsp_next_ptr.read() ) // last copy 2988 { 2989 r_xram_rsp_to_init_cmd_multi_req = true; 2990 r_xram_rsp_fsm = XRAM_RSP_HEAP_LAST; 2991 } 2992 else 2993 { 2994 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 2995 } 2996 } 2997 else 2998 { 2999 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 3000 } 3001 3002 #if DEBUG_MEMC_XRAM_RSP 3003 if( m_debug_xram_rsp_fsm ) 3004 { 3005 std::cout << " <MEMC.XRAM_RSP_HEAP_ERASE> Erase the list of copies:" 3006 << " srcid = " << entry.owner.srcid 3007 << " / inst = " << entry.owner.inst << std::endl; 3008 } 3009 #endif 3010 } 3011 break; 3012 } 3013 ///////////////////////// 3014 case XRAM_RSP_HEAP_LAST: // last member of the list 3015 { 3016 if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_XRAM_RSP ) 3017 { 3018 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_HEAP_LAST state" << std::endl; 3019 std::cout << "bad HEAP allocation" << std::endl; 3020 exit(0); 3021 } 3022 size_t free_pointer = m_heap.next_free_ptr(); 3023 3024 HeapEntry last_entry; 3025 last_entry.owner.srcid = 0; 2219 3026 #if L1_MULTI_CACHE 2220 entry.owner.cache_id= r_xram_rsp_trt_buf.pktid; 2221 #endif 2222 entry.owner.inst = inst_read; 2223 entry.count = 1; 2224 } else { 2225 entry.owner.srcid = 0; 2226 #if L1_MULTI_CACHE 2227 entry.owner.cache_id = 0; 2228 #endif 2229 entry.owner.inst = 0; 2230 entry.count = 0; 2231 } 2232 m_cache_directory.write(set, way, entry); 2233 #ifdef DDEBUG 2234 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2235 std::cout << "printing the entry : " << std::endl; 2236 entry.print(); 2237 std::cout << "done" << std::endl; 2238 } 2239 #endif 2240 2241 #ifdef TDEBUG 2242 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2243 std::cout << sc_time_stamp() << " " << name() << " XRAM_RSP_DIR_UPDT transaction table : " << std::endl; 2244 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 2245 m_transaction_tab.print(i); 2246 } 2247 #endif 2248 2249 if(r_xram_rsp_victim_inval.read()){ 2250 bool brdcast = r_xram_rsp_victim_is_cnt.read(); 2251 size_t index = 0; 2252 size_t count_copies = r_xram_rsp_victim_count.read(); 2253 2254 //@@ 2255 bool wok = m_update_tab.set(false, // it's an inval transaction 2256 brdcast, // set brdcast bit 2257 false, // it does not need a response 2258 0,//srcid 2259 0,//trdid 2260 0,//pktid 2261 r_xram_rsp_victim_nline.read(), 2262 count_copies, 2263 index); 2264 2265 #ifdef IDEBUG 2266 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2267 std::cout << "xram_rsp : record invalidation, time = " << std::dec << m_cpt_cycles << std::endl; 2268 m_update_tab.print(); 2269 } 2270 #endif 2271 r_xram_rsp_upt_index = index; 2272 if(!wok) { 2273 ASSERT(false,"mem_cache error : xram_rsp_dir_upt, an update_tab entry was free but write unsuccessful"); 2274 } 2275 } 2276 // If the victim is not dirty, we erase the entry in the TRT 2277 if (!r_xram_rsp_victim_dirty.read()){ 2278 m_transaction_tab.erase(r_xram_rsp_trt_index.read()); 2279 2280 } 2281 // Next state 2282 if ( r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; 2283 else if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 2284 else if ( r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 2285 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 2286 break; 3027 last_entry.owner.cache_id = 0; 3028 #endif 3029 last_entry.owner.inst = false; 3030 if(m_heap.is_full()) 3031 { 3032 last_entry.next = r_xram_rsp_next_ptr.read(); 3033 m_heap.unset_full(); 3034 } 3035 else 3036 { 3037 last_entry.next = free_pointer; 3038 } 3039 3040 m_heap.write_free_ptr(r_xram_rsp_victim_ptr.read()); 3041 m_heap.write(r_xram_rsp_next_ptr.read(),last_entry); 3042 3043 r_xram_rsp_fsm = XRAM_RSP_IDLE; 3044 3045 #if DEBUG_MEMC_XRAM_RSP 3046 if( m_debug_xram_rsp_fsm ) 3047 { 3048 std::cout << " <MEMC.XRAM_RSP_HEAP_LAST> Heap housekeeping" << std::endl; 3049 } 3050 #endif 3051 break; 3052 } 3053 // /////////////////////// 3054 case XRAM_RSP_ERROR_ERASE: // erase TRT entry in case of error 3055 { 3056 m_transaction_tab.erase(r_xram_rsp_trt_index.read()); 3057 3058 // Next state 3059 if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_ERROR_RSP; 3060 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 3061 3062 #if DEBUG_MEMC_XRAM_RSP 3063 if( m_debug_xram_rsp_fsm ) 3064 { 3065 std::cout << " <MEMC.XRAM_RSP_ERROR_ERASE> Error reported by XRAM / erase the TRT entry" << std::endl; 3066 } 3067 #endif 3068 break; 2287 3069 } 2288 3070 //////////////////////// 2289 case XRAM_RSP_TRT_DIRTY: // set the TRT entry (write line to XRAM) if the victim is dirty 2290 { 2291 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP ) { 2292 m_transaction_tab.set(r_xram_rsp_trt_index.read(), 2293 false, // write to XRAM 2294 r_xram_rsp_victim_nline.read(), // line index 2295 0, 2296 0, 2297 0, 2298 false, 2299 0, 2300 0, 2301 std::vector<be_t>(m_words,0), 2302 std::vector<data_t>(m_words,0) ); 2303 #ifdef TDEBUG 2304 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2305 std::cout << sc_time_stamp() << " " << name() << " XRAM_RSP_TRT_DIRTY transaction table : " << std::endl; 2306 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 2307 m_transaction_tab.print(i); 2308 } 2309 #endif 2310 2311 if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 2312 else if ( r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 2313 else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 2314 } 2315 break; 2316 } 2317 ////////////////////// 2318 case XRAM_RSP_DIR_RSP: // send a request to TGT_RSP FSM in case of read 2319 { 2320 if ( !r_xram_rsp_to_tgt_rsp_req.read() ) { 2321 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 2322 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 2323 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 2324 for (size_t i=0; i < m_words; i++) { 2325 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 2326 } 2327 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 2328 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 2329 r_xram_rsp_to_tgt_rsp_rerror = false; 2330 r_xram_rsp_to_tgt_rsp_req = true; 2331 2332 if ( r_xram_rsp_victim_inval ) r_xram_rsp_fsm = XRAM_RSP_INVAL; 2333 else if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 2334 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 2335 2336 #ifdef DDEBUG 2337 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2338 std::cout << "XRAM_RSP FSM in XRAM_RSP_DIR_RSP state" << std::endl; 2339 } 2340 #endif 2341 } 2342 break; 2343 } 2344 //////////////////// 2345 case XRAM_RSP_INVAL: // send invalidate request to INIT_CMD FSM 2346 { 2347 if( !r_xram_rsp_to_init_cmd_multi_req.read() && 2348 !r_xram_rsp_to_init_cmd_brdcast_req.read() ) { 2349 2350 bool multi_req = !r_xram_rsp_victim_is_cnt.read(); 2351 bool last_multi_req = multi_req && (r_xram_rsp_victim_count.read() == 1); 2352 bool not_last_multi_req = multi_req && (r_xram_rsp_victim_count.read() != 1); 2353 2354 r_xram_rsp_to_init_cmd_multi_req = last_multi_req; 2355 r_xram_rsp_to_init_cmd_brdcast_req = r_xram_rsp_victim_is_cnt.read(); 2356 r_xram_rsp_to_init_cmd_nline = r_xram_rsp_victim_nline.read(); 2357 r_xram_rsp_to_init_cmd_trdid = r_xram_rsp_upt_index; 2358 xram_rsp_to_init_cmd_fifo_srcid = r_xram_rsp_victim_copy.read(); 2359 xram_rsp_to_init_cmd_fifo_inst = r_xram_rsp_victim_copy_inst.read(); 2360 #if L1_MULTI_CACHE 2361 xram_rsp_to_init_cmd_fifo_cache_id = r_xram_rsp_victim_copy_cache.read(); 2362 #endif 2363 xram_rsp_to_init_cmd_fifo_put = multi_req; 2364 2365 r_xram_rsp_next_ptr = r_xram_rsp_victim_ptr.read(); 2366 2367 if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 2368 else if (not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 2369 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 2370 #ifdef IDEBUG 2371 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2372 std::cout << "XRAM_RSP FSM in XRAM_RSP_INVAL state" << std::endl; 2373 } 2374 #endif 2375 } 2376 break; 2377 } 2378 ////////////////////////// 2379 case XRAM_RSP_WRITE_DIRTY: // send a write request to IXR_CMD FSM 2380 { 2381 if ( !r_xram_rsp_to_ixr_cmd_req.read() ) { 2382 r_xram_rsp_to_ixr_cmd_req = true; 2383 r_xram_rsp_to_ixr_cmd_nline = r_xram_rsp_victim_nline.read(); 2384 r_xram_rsp_to_ixr_cmd_trdid = r_xram_rsp_trt_index.read(); 2385 for(size_t i=0; i<m_words ; i++) { 2386 r_xram_rsp_to_ixr_cmd_data[i] = r_xram_rsp_victim_data[i]; 2387 } 2388 m_cpt_write_dirty++; 2389 bool multi_req = !r_xram_rsp_victim_is_cnt.read() && r_xram_rsp_victim_inval.read(); 2390 bool not_last_multi_req = multi_req && (r_xram_rsp_victim_count.read() != 1); 2391 if ( not_last_multi_req ) r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 2392 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 2393 #ifdef TDEBUG 2394 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2395 std::cout << "XRAM_RSP FSM in XRAM_RSP_WRITE_DIRTY state" << std::endl; 2396 } 2397 #endif 2398 } 2399 break; 2400 } 2401 ////////////////////////// 2402 case XRAM_RSP_HEAP_ERASE: // erase the list of copies and sent invalidations 2403 { 2404 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP ) { 2405 HeapEntry entry = m_heap_directory.read(r_xram_rsp_next_ptr.read()); 2406 xram_rsp_to_init_cmd_fifo_srcid = entry.owner.srcid; 2407 #if L1_MULTI_CACHE 2408 xram_rsp_to_init_cmd_fifo_cache_id = entry.owner.cache_id; 2409 #endif 2410 xram_rsp_to_init_cmd_fifo_inst = entry.owner.inst; 2411 xram_rsp_to_init_cmd_fifo_put = true; 2412 if( m_xram_rsp_to_init_cmd_inst_fifo.wok() ){ 2413 r_xram_rsp_next_ptr = entry.next; 2414 if( entry.next == r_xram_rsp_next_ptr.read() ){ // last copy 2415 r_xram_rsp_to_init_cmd_multi_req = true; 2416 r_xram_rsp_fsm = XRAM_RSP_HEAP_LAST; 2417 } else { 2418 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 2419 } 2420 } else { 2421 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 2422 } 2423 } 2424 break; 2425 } 2426 ////////////////////////// 2427 case XRAM_RSP_HEAP_LAST: // last member of the list 2428 { 2429 ASSERT((r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 2430 ,"MemCache ERROR : bad HEAP allocation"); 2431 size_t free_pointer = m_heap_directory.next_free_ptr(); 2432 2433 HeapEntry last_entry; 2434 last_entry.owner.srcid = 0; 2435 #if L1_MULTI_CACHE 2436 last_entry.owner.cache_id = 0; 2437 #endif 2438 last_entry.owner.inst = false; 2439 if(m_heap_directory.is_full()){ 2440 last_entry.next = r_xram_rsp_next_ptr.read(); 2441 m_heap_directory.unset_full(); 2442 } else { 2443 last_entry.next = free_pointer; 2444 } 2445 2446 m_heap_directory.write_free_ptr(r_xram_rsp_victim_ptr.read()); 2447 m_heap_directory.write(r_xram_rsp_next_ptr.read(),last_entry); 2448 2449 r_xram_rsp_fsm = XRAM_RSP_IDLE; 2450 2451 break; 2452 } 2453 /////////////////////// 2454 case XRAM_RSP_ERROR_ERASE: // erase xram transaction 2455 { 2456 2457 #ifdef TDEBUG 2458 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2459 std::cout << sc_time_stamp() << " " << name() << " XRAM_RSP_ERROR_ERASE transaction table : " << std::endl; 2460 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 2461 m_transaction_tab.print(i); 2462 } 2463 #endif 2464 2465 m_transaction_tab.erase(r_xram_rsp_trt_index.read()); 2466 2467 // Next state 2468 if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_ERROR_RSP; 2469 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 2470 break; 2471 } 2472 ////////////////////// 2473 case XRAM_RSP_ERROR_RSP: // send a request to TGT_RSP FSM in case of read 2474 { 2475 if ( !r_xram_rsp_to_tgt_rsp_req.read() ) { 2476 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 2477 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 2478 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 2479 for (size_t i=0; i < m_words; i++) { 2480 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 2481 } 2482 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 2483 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 2484 r_xram_rsp_to_tgt_rsp_rerror = true; 2485 r_xram_rsp_to_tgt_rsp_req = true; 2486 2487 r_xram_rsp_fsm = XRAM_RSP_IDLE; 2488 2489 #ifdef DDEBUG 2490 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2491 std::cout << "XRAM_RSP FSM in XRAM_RSP_DIR_RSP state" << std::endl; 2492 } 2493 #endif 2494 } 2495 break; 3071 case XRAM_RSP_ERROR_RSP: // Request an error response to TGT_RSP FSM 3072 { 3073 if ( !r_xram_rsp_to_tgt_rsp_req.read() ) 3074 { 3075 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 3076 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 3077 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 3078 for (size_t i=0; i < m_words; i++) r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 3079 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 3080 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 3081 r_xram_rsp_to_tgt_rsp_rerror = true; 3082 r_xram_rsp_to_tgt_rsp_req = true; 3083 3084 r_xram_rsp_fsm = XRAM_RSP_IDLE; 3085 3086 #if DEBUG_MEMC_XRAM_RSP 3087 if( m_debug_xram_rsp_fsm ) 3088 { 3089 std::cout << " <MEMC.XRAM_RSP_ERROR_RSP> Request a response error to TGT_RSP FSM:" 3090 << " srcid = " << r_xram_rsp_trt_buf.srcid << std::endl; 3091 } 3092 #endif 3093 } 3094 break; 2496 3095 } 2497 3096 } // end swich r_xram_rsp_fsm … … 2501 3100 //////////////////////////////////////////////////////////////////////////////////// 2502 3101 // The CLEANUP FSM handles the cleanup request from L1 caches. 2503 // It accesses the cache directory to update the list of copies. 2504 // 3102 // It accesses the cache directory and the heap to update the list of copies. 2505 3103 //////////////////////////////////////////////////////////////////////////////////// 2506 switch ( r_cleanup_fsm.read() ) { 2507 2508 /////////////////// 2509 case CLEANUP_IDLE: 2510 { 2511 if ( p_vci_tgt_cleanup.cmdval.read() ) { 2512 ASSERT((p_vci_tgt_cleanup.srcid.read() < m_initiators) 2513 ,"VCI_MEM_CACHE error in a cleanup request : received SRCID is larger than the number of initiators"); 2514 2515 bool reached = false; 2516 for ( size_t index = 0 ; index < ncseg && !reached ; index++ ){ 2517 if ( m_cseg[index]->contains((addr_t)(p_vci_tgt_cleanup.address.read())) ){ 2518 reached = true; 2519 } 2520 } 2521 // only write request to a mapped address that are not broadcast are handled 2522 if ( (p_vci_tgt_cleanup.cmd.read() == vci_param::CMD_WRITE) && 2523 ((p_vci_tgt_cleanup.address.read() & 0x3) == 0) && 2524 reached) 2525 { 2526 PRINTF(" * <MEM_CACHE.CLEANUP> Request from %d.%d at address %llx\n",(uint32_t)p_vci_tgt_cleanup.srcid.read(),(uint32_t)p_vci_tgt_cleanup.pktid.read(),(uint64_t)p_vci_tgt_cleanup.address.read()); 2527 2528 m_cpt_cleanup++; 2529 2530 r_cleanup_nline = (addr_t)(m_nline[(vci_addr_t)(p_vci_tgt_cleanup.address.read())]) ; 2531 r_cleanup_srcid = p_vci_tgt_cleanup.srcid.read(); 2532 r_cleanup_trdid = p_vci_tgt_cleanup.trdid.read(); 2533 r_cleanup_pktid = p_vci_tgt_cleanup.pktid.read(); 2534 2535 r_cleanup_fsm = CLEANUP_DIR_LOCK; 2536 } 2537 } 2538 break; 3104 3105 3106 switch ( r_cleanup_fsm.read() ) 3107 { 3108 ////////////////// 3109 case CLEANUP_IDLE: 3110 { 3111 if ( p_vci_tgt_cleanup.cmdval.read() ) 3112 { 3113 if (p_vci_tgt_cleanup.srcid.read() >= m_initiators ) 3114 { 3115 std::cout << "VCI_MEM_CACHE ERROR " << name() 3116 << " CLEANUP_IDLE state" << std::endl; 3117 std::cout << "illegal srcid for cleanup request" << std::endl; 3118 exit(0); 3119 } 3120 3121 bool reached = false; 3122 for ( size_t index = 0 ; index < ncseg && !reached ; index++ ) 3123 { 3124 if ( m_cseg[index]->contains((addr_t)(p_vci_tgt_cleanup.address.read())) ) 3125 reached = true; 3126 } 3127 // only write request to a mapped address that are not broadcast are handled 3128 if ( (p_vci_tgt_cleanup.cmd.read() == vci_param::CMD_WRITE) && 3129 ((p_vci_tgt_cleanup.address.read() & 0x3) == 0) && reached) 3130 { 3131 addr_t line = (addr_t)(m_nline[(vci_addr_t)(p_vci_tgt_cleanup.address.read())]); 3132 3133 r_cleanup_nline = line; 3134 r_cleanup_srcid = p_vci_tgt_cleanup.srcid.read(); 3135 r_cleanup_trdid = p_vci_tgt_cleanup.trdid.read(); 3136 r_cleanup_pktid = p_vci_tgt_cleanup.pktid.read(); 3137 r_cleanup_fsm = CLEANUP_DIR_LOCK; 3138 3139 #if DEBUG_MEMC_CLEANUP 3140 if( m_debug_cleanup_fsm ) 3141 { 3142 std::cout << " <MEMC.CLEANUP_IDLE> Cleanup request:" << std::hex 3143 << " line = " << line * m_words * 4 3144 << " / owner_id = " << p_vci_tgt_cleanup.srcid.read() 3145 << " / owner_ins = " << (p_vci_tgt_cleanup.trdid.read()&0x1) 3146 << std::endl; 3147 } 3148 #endif 3149 m_cpt_cleanup++; 3150 } 3151 } 3152 break; 2539 3153 } 2540 3154 ////////////////////// 2541 case CLEANUP_DIR_LOCK: 2542 { 2543 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP ) { 2544 2545 // Read the directory 2546 size_t way = 0; 2547 addr_t cleanup_address = r_cleanup_nline.read() * m_words * 4; 2548 DirectoryEntry entry = m_cache_directory.read(cleanup_address , way); 2549 #ifdef DDEBUG 2550 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2551 std::cout << "In CLEANUP_DIR_LOCK printing the entry of address is : " << std::hex << cleanup_address << std::endl; 2552 entry.print(); 2553 std::cout << "done" << std::endl; 2554 } 2555 #endif 2556 r_cleanup_is_cnt = entry.is_cnt; 2557 r_cleanup_dirty = entry.dirty; 2558 r_cleanup_tag = entry.tag; 2559 r_cleanup_lock = entry.lock; 2560 r_cleanup_way = way; 2561 r_cleanup_copy = entry.owner.srcid; 3155 case CLEANUP_DIR_LOCK: // test directory status 3156 { 3157 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP ) 3158 { 3159 // Read the directory 3160 size_t way = 0; 3161 addr_t cleanup_address = r_cleanup_nline.read() * m_words * 4; 3162 DirectoryEntry entry = m_cache_directory.read(cleanup_address , way); 3163 r_cleanup_is_cnt = entry.is_cnt; 3164 r_cleanup_dirty = entry.dirty; 3165 r_cleanup_tag = entry.tag; 3166 r_cleanup_lock = entry.lock; 3167 r_cleanup_way = way; 3168 r_cleanup_copy = entry.owner.srcid; 2562 3169 #if L1_MULTI_CACHE 2563 r_cleanup_copy_cache= entry.owner.cache_id; 2564 #endif 2565 r_cleanup_copy_inst = entry.owner.inst; 2566 r_cleanup_count = entry.count; 2567 r_cleanup_ptr = entry.ptr; 2568 2569 // In case of hit, the copy must be cleaned in the copies bit-vector 2570 if( entry.valid){ 2571 if ( (entry.count==1) || (entry.is_cnt) ) { // no access to the heap 2572 r_cleanup_fsm = CLEANUP_DIR_WRITE; 2573 } else { 2574 r_cleanup_fsm = CLEANUP_HEAP_LOCK; 2575 } 2576 } else { 2577 r_cleanup_fsm = CLEANUP_UPT_LOCK; 2578 } 2579 } 2580 break; 3170 r_cleanup_copy_cache= entry.owner.cache_id; 3171 #endif 3172 r_cleanup_copy_inst = entry.owner.inst; 3173 r_cleanup_count = entry.count; 3174 r_cleanup_ptr = entry.ptr; 3175 3176 if( entry.valid) // hit : the copy must be cleared 3177 { 3178 if ( (entry.count==1) || (entry.is_cnt) ) // no access to the heap 3179 { 3180 r_cleanup_fsm = CLEANUP_DIR_WRITE; 3181 } 3182 else // access to the heap 3183 { 3184 r_cleanup_fsm = CLEANUP_HEAP_LOCK; 3185 } 3186 } 3187 else // miss : we must check the update table 3188 { 3189 r_cleanup_fsm = CLEANUP_UPT_LOCK; 3190 } 3191 3192 #if DEBUG_MEMC_CLEANUP 3193 if( m_debug_cleanup_fsm ) 3194 { 3195 std::cout << " <MEMC.CLEANUP_DIR_LOCK> Test directory status: " << std::hex 3196 << " line = " << r_cleanup_nline.read() * m_words * 4 3197 << " / hit = " << entry.valid 3198 << " / dir_id = " << entry.owner.srcid 3199 << " / dir_ins = " << entry.owner.inst 3200 << " / search_id = " << r_cleanup_srcid.read() 3201 << " / search_ins = " << (r_cleanup_trdid.read()&0x1) 3202 << " / count = " << entry.count 3203 << " / is_cnt = " << entry.is_cnt << std::endl; 3204 } 3205 #endif 3206 } 3207 break; 2581 3208 } 2582 3209 /////////////////////// 2583 case CLEANUP_DIR_WRITE: 2584 { 2585 ASSERT((r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP) 2586 ,"MemCache ERROR : Bad DIR allocation"); 2587 size_t way = r_cleanup_way.read(); 2588 #define L2 soclib::common::uint32_log2 2589 size_t set = m_y[(vci_addr_t)(r_cleanup_nline.read() << (L2(m_words) +2))]; 2590 #undef L2 2591 bool cleanup_inst = r_cleanup_trdid.read() & 0x1; 2592 bool match_srcid = ((r_cleanup_copy.read() == r_cleanup_srcid.read()) 3210 case CLEANUP_DIR_WRITE: // update the directory entry without heap access 3211 { 3212 if ( r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP ) 3213 { 3214 std::cout << "VCI_MEM_CACHE ERROR " << name() 3215 << " CLEANUP_DIR_WRITE state" 3216 << " bad DIR allocation" << std::endl; 3217 exit(0); 3218 } 3219 3220 size_t way = r_cleanup_way.read(); 3221 size_t set = m_y[(vci_addr_t)(r_cleanup_nline.read()*m_words*4)]; 3222 bool cleanup_inst = r_cleanup_trdid.read() & 0x1; 3223 bool match_srcid = ((r_cleanup_copy.read() == r_cleanup_srcid.read()) 2593 3224 #if L1_MULTI_CACHE 2594 3225 and (r_cleanup_copy_cache.read() == r_cleanup_pktid.read()) 2595 3226 #endif 2596 3227 ); 2597 bool match_inst = (r_cleanup_copy_inst.read() == cleanup_inst); 2598 bool match = match_srcid && match_inst; 2599 2600 // update the cache directory (for the copies) 2601 DirectoryEntry entry; 2602 entry.valid = true; 2603 entry.is_cnt = r_cleanup_is_cnt.read(); 2604 entry.dirty = r_cleanup_dirty.read(); 2605 entry.tag = r_cleanup_tag.read(); 2606 entry.lock = r_cleanup_lock.read(); 2607 entry.ptr = r_cleanup_ptr.read(); 2608 if(r_cleanup_is_cnt.read()) { // Directory is a counter 2609 entry.count = r_cleanup_count.read() -1; 2610 entry.owner.srcid = 0; 3228 bool match_inst = (r_cleanup_copy_inst.read() == cleanup_inst); 3229 bool match = match_srcid && match_inst; 3230 3231 // update the cache directory (for the copies) 3232 DirectoryEntry entry; 3233 entry.valid = true; 3234 entry.is_cnt = r_cleanup_is_cnt.read(); 3235 entry.dirty = r_cleanup_dirty.read(); 3236 entry.tag = r_cleanup_tag.read(); 3237 entry.lock = r_cleanup_lock.read(); 3238 entry.ptr = r_cleanup_ptr.read(); 3239 3240 if ( r_cleanup_is_cnt.read() ) // counter mode 3241 { 3242 entry.count = r_cleanup_count.read() -1; 3243 entry.owner.srcid = 0; 2611 3244 #if L1_MULTI_CACHE 2612 entry.owner.cache_id= 0; 2613 #endif 2614 entry.owner.inst = 0; 2615 // response to the cache 3245 entry.owner.cache_id= 0; 3246 #endif 3247 entry.owner.inst = 0; 3248 // response to the cache 3249 r_cleanup_fsm = CLEANUP_RSP; 3250 } 3251 else // linked_list mode 3252 { 3253 if ( match ) // hit 3254 { 3255 entry.count = 0; // no more copy 3256 entry.owner.srcid = 0; 3257 #if L1_MULTI_CACHE 3258 entry.owner.cache_id=0; 3259 #endif 3260 entry.owner.inst = 0; 3261 r_cleanup_fsm = CLEANUP_RSP; 3262 } 3263 else // miss 3264 { 3265 entry.count = r_cleanup_count.read(); 3266 entry.owner.srcid = r_cleanup_copy.read(); 3267 #if L1_MULTI_CACHE 3268 entry.owner.cache_id = r_cleanup_copy_cache.read(); 3269 #endif 3270 entry.owner.inst = r_cleanup_copy_inst.read(); 3271 r_cleanup_fsm = CLEANUP_UPT_LOCK; 3272 } 3273 } 3274 m_cache_directory.write(set, way, entry); 3275 3276 #if DEBUG_MEMC_CLEANUP 3277 if( m_debug_cleanup_fsm ) 3278 { 3279 std::cout << " <MEMC.CLEANUP_DIR_WRITE> Update directory:" << std::hex 3280 << " line = " << r_cleanup_nline.read() * m_words * 4 3281 << " / dir_id = " << entry.owner.srcid 3282 << " / dir_ins = " << entry.owner.inst 3283 << " / count = " << entry.count 3284 << " / is_cnt = " << entry.is_cnt << std::endl; 3285 } 3286 #endif 3287 3288 break; 3289 } 3290 /////////////////////// 3291 case CLEANUP_HEAP_LOCK: // two cases are handled in this state: 3292 // - the matching copy is directly in the directory 3293 // - the matching copy is the first copy in the heap 3294 { 3295 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP ) 3296 { 3297 size_t way = r_cleanup_way.read(); 3298 size_t set = m_y[(vci_addr_t)(r_cleanup_nline.read()*m_words*4)]; 3299 HeapEntry heap_entry = m_heap.read(r_cleanup_ptr.read()); 3300 bool last = (heap_entry.next == r_cleanup_ptr.read()); 3301 bool cleanup_inst = r_cleanup_trdid.read() & 0x1; 3302 3303 // match_dir computation 3304 bool match_dir_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 3305 bool match_dir_inst = (r_cleanup_copy_inst.read() == cleanup_inst); 3306 bool match_dir = match_dir_srcid and match_dir_inst; 3307 #if L1_MULTI_CACHE 3308 match_dir = match_dir and (r_cleanup_copy_cache.read() == r_cleanup_pktid.read()); 3309 #endif 3310 3311 // match_heap computation 3312 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 3313 &nbs